diff --git a/ontoScorer/metrics.py b/ontoScorer/metrics.py
index 3c4936fb48fe46a58428e95835c9dafc62190402..f712fa80a0660d5b880027ed1669112358c4d701 100644
--- a/ontoScorer/metrics.py
+++ b/ontoScorer/metrics.py
@@ -36,14 +36,15 @@ class Metrics:
             },
             "taxonomic_relations": {
                 "subclass": Score(),
+                "subproperty": Score(),
                 "instanciation": Score(),
                 "synthesis": Score()  # Synthesis score for taxonomic relations axis
             },
             "non_taxonomic_relations": {
                 "object_properties": Score(),
                 "data_properties": Score(),
-                "domains": Score(),
-                "ranges": Score(),
+                # "domains": Score(),
+                # "ranges": Score(),
                 "synthesis": Score()  # Synthesis score for non-taxonomic relations axis
             },
             "axioms": {
@@ -96,6 +97,79 @@ class Metrics:
         self.scores["entities"]["synthesis"].compute(y_true_overall, y_pred_overall)
 
 
+    def compute_taxonomic_relation_scores(self, reference_ontology, generated_ontology):
+        relation_methods = {
+            "subclass": ("get_subclass_relations", Ontology.compare_relations),
+            "subproperty": ("get_subproperty_relations", Ontology.compare_relations),
+            "instanciation": ("get_instance_relations", Ontology.compare_relations)
+        }
+
+        y_true_overall = []
+        y_pred_overall = []
+
+        for score_name, (method_name, comparison_function) in relation_methods.items():
+            reference_relations = getattr(reference_ontology, method_name)()
+            generated_relations = getattr(generated_ontology, method_name)()
+
+            all_relations = list(set(reference_relations + generated_relations))
+            y_true = [1 if any([comparison_function(elem, ref_elem) for ref_elem in reference_relations]) else 0 for elem in all_relations]
+            y_pred = [1 if any([comparison_function(elem, gen_elem) for gen_elem in generated_relations]) else 0 for elem in all_relations]
+
+            self.scores["taxonomic_relations"][score_name].compute(y_true, y_pred)
+
+            y_true_overall.extend(y_true)
+            y_pred_overall.extend(y_pred)
+
+        self.scores["taxonomic_relations"]["synthesis"].compute(y_true_overall, y_pred_overall)
+        
+
+    # def compute_non_taxonomic_relation_scores(self, reference_ontology, generated_ontology):
+    #     relation_methods = {
+    #         "object_properties": ("get_object_property_relations", Ontology.compare_relations),
+    #         "data_properties": ("get_data_property_relations", Ontology.compare_relations)
+    #     }
+
+    #     y_true_overall = []
+    #     y_pred_overall = []
+
+    #     for score_name, (method_name, comparison_function) in relation_methods.items():
+    #         reference_relations = getattr(reference_ontology, method_name)()
+    #         generated_relations = getattr(generated_ontology, method_name)()
+
+    #         all_relations = list(set(reference_relations + generated_relations))
+    #         y_true = [1 if rel in reference_relations else 0 for rel in all_relations]
+    #         y_pred = [1 if rel in generated_relations else 0 for rel in all_relations]
+
+    #         self.scores["non_taxonomic_relations"][score_name].compute(y_true, y_pred)
+
+    #         y_true_overall.extend(y_true)
+    #         y_pred_overall.extend(y_pred)
+
+    #     self.scores["non_taxonomic_relations"]["synthesis"].compute(y_true_overall, y_pred_overall)
+
+    # def compute_axiom_scores(self, reference_ontology, generated_ontology):
+    #     axiom_methods = {
+    #         "restriction_axioms": ("get_restriction_axioms", Ontology.compare_axioms)
+    #     }
+
+    #     y_true_overall = []
+    #     y_pred_overall = []
+
+    #     for score_name, (method_name, comparison_function) in axiom_methods.items():
+    #         reference_axioms = getattr(reference_ontology, method_name)()
+    #         generated_axioms = getattr(generated_ontology, method_name)()
+
+    #         all_axioms = list(set(reference_axioms + generated_axioms))
+    #         y_true = [1 if ax in reference_axioms else 0 for ax in all_axioms]
+    #         y_pred = [1 if ax in generated_axioms else 0 for ax in all_axioms]
+
+    #         self.scores["axioms"][score_name].compute(y_true, y_pred)
+
+    #         y_true_overall.extend(y_true)
+    #         y_pred_overall.extend(y_pred)
+
+    #     self.scores["axioms"]["synthesis"].compute(y_true_overall, y_pred_overall)
+
 
     #--------------------------------------------------------------------------
     # Printing Method(s)
@@ -105,8 +179,29 @@ class Metrics:
         """
         Prints the scores (precision, recall, f1) for each ontology element category.
         """
+        
         entity_scores = self.scores["entities"]
         for element, score in entity_scores.items():
             print(f"Metrics for {element.capitalize()} (Entity axis):")
             print(score)
             print("----------------------------")
+            
+        taxonomic_relation_scores = self.scores["taxonomic_relations"]
+        for element, score in taxonomic_relation_scores.items():
+            print(f"Metrics for {element.capitalize()} (Taxonomic Relation axis):")
+            print(score)
+            print("----------------------------")
+            
+        # nontaxonomic_relation_scores = self.scores["non_taxonomic_relations"]
+        # for element, score in nontaxonomic_relation_scores.items():
+        #     print(f"Metrics for {element.capitalize()} (Non-Taxonomic Relation axis):")
+        #     print(score)
+        #     print("----------------------------")
+            
+        # axiom_scores = self.scores["axioms"]
+        # for element, score in axiom_scores.items():
+        #     print(f"Metrics for {element.capitalize()} (Axiom axis):")
+        #     print(score)
+        #     print("----------------------------")
+            
+            
diff --git a/tests/test_metrics.py b/tests/test_metrics.py
index de92d2ff0caae8c8ffe106ff168cc479d7f2ef9e..bfd909bd5804af68e4c448528bf906ad9318b270 100644
--- a/tests/test_metrics.py
+++ b/tests/test_metrics.py
@@ -26,10 +26,24 @@ class TestMetrics(unittest.TestCase):
         self.onto2 = Ontology(self.ontology2_path)
         self.metrics = Metrics()
 
-
-    def test_computes_scores(self):
+    def test_computes_entity_scores(self):
         self.metrics.compute_entity_scores(self.onto1, self.onto2)
-        for element, score in self.metrics.scores.items():
+        self.verify_scores(self.metrics.scores["entities"])
+
+    def test_computes_taxonomic_relation_scores(self):
+        self.metrics.compute_taxonomic_relation_scores(self.onto1, self.onto2)
+        self.verify_scores(self.metrics.scores["taxonomic_relations"])
+
+    # def test_computes_non_taxonomic_relation_scores(self):
+    #     self.metrics.compute_non_taxonomic_relation_scores(self.onto1, self.onto2)
+    #     self.verify_scores(self.metrics.scores["non_taxonomic_relations"])
+
+    # def test_computes_axiom_scores(self):
+    #     self.metrics.compute_axiom_scores(self.onto1, self.onto2)
+    #     self.verify_scores(self.metrics.scores["axioms"])
+
+    def verify_scores(self, score_category):
+        for element, score in score_category.items():
             if score.total_elements == 0:
                 self.assertIsNone(score.precision, f"Precision for {element} should be None when no elements are present")
                 self.assertIsNone(score.recall, f"Recall for {element} should be None when no elements are present")
@@ -39,9 +53,11 @@ class TestMetrics(unittest.TestCase):
                 self.assertTrue(0 <= score.recall <= 1, f"Invalid recall for {element}")
                 self.assertTrue(0 <= score.f1 <= 1, f"Invalid F1 score for {element}")
 
-
     def test_print_scores(self):
         self.metrics.compute_entity_scores(self.onto1, self.onto2)
+        self.metrics.compute_taxonomic_relation_scores(self.onto1, self.onto2)
+        # self.metrics.compute_non_taxonomic_relation_scores(self.onto1, self.onto2)
+        # self.metrics.compute_axiom_scores(self.onto1, self.onto2)
         print()
         self.metrics.print_scores()