Skip to content
Snippets Groups Projects
Commit 1530e185 authored by Aurélien Lamercerie's avatar Aurélien Lamercerie
Browse files

Update module Metrics to upgrade some metrics

parent ce88ae14
No related branches found
No related tags found
No related merge requests found
...@@ -36,14 +36,15 @@ class Metrics: ...@@ -36,14 +36,15 @@ class Metrics:
}, },
"taxonomic_relations": { "taxonomic_relations": {
"subclass": Score(), "subclass": Score(),
"subproperty": Score(),
"instanciation": Score(), "instanciation": Score(),
"synthesis": Score() # Synthesis score for taxonomic relations axis "synthesis": Score() # Synthesis score for taxonomic relations axis
}, },
"non_taxonomic_relations": { "non_taxonomic_relations": {
"object_properties": Score(), "object_properties": Score(),
"data_properties": Score(), "data_properties": Score(),
"domains": Score(), # "domains": Score(),
"ranges": Score(), # "ranges": Score(),
"synthesis": Score() # Synthesis score for non-taxonomic relations axis "synthesis": Score() # Synthesis score for non-taxonomic relations axis
}, },
"axioms": { "axioms": {
...@@ -96,6 +97,79 @@ class Metrics: ...@@ -96,6 +97,79 @@ class Metrics:
self.scores["entities"]["synthesis"].compute(y_true_overall, y_pred_overall) self.scores["entities"]["synthesis"].compute(y_true_overall, y_pred_overall)
def compute_taxonomic_relation_scores(self, reference_ontology, generated_ontology):
relation_methods = {
"subclass": ("get_subclass_relations", Ontology.compare_relations),
"subproperty": ("get_subproperty_relations", Ontology.compare_relations),
"instanciation": ("get_instance_relations", Ontology.compare_relations)
}
y_true_overall = []
y_pred_overall = []
for score_name, (method_name, comparison_function) in relation_methods.items():
reference_relations = getattr(reference_ontology, method_name)()
generated_relations = getattr(generated_ontology, method_name)()
all_relations = list(set(reference_relations + generated_relations))
y_true = [1 if any([comparison_function(elem, ref_elem) for ref_elem in reference_relations]) else 0 for elem in all_relations]
y_pred = [1 if any([comparison_function(elem, gen_elem) for gen_elem in generated_relations]) else 0 for elem in all_relations]
self.scores["taxonomic_relations"][score_name].compute(y_true, y_pred)
y_true_overall.extend(y_true)
y_pred_overall.extend(y_pred)
self.scores["taxonomic_relations"]["synthesis"].compute(y_true_overall, y_pred_overall)
# def compute_non_taxonomic_relation_scores(self, reference_ontology, generated_ontology):
# relation_methods = {
# "object_properties": ("get_object_property_relations", Ontology.compare_relations),
# "data_properties": ("get_data_property_relations", Ontology.compare_relations)
# }
# y_true_overall = []
# y_pred_overall = []
# for score_name, (method_name, comparison_function) in relation_methods.items():
# reference_relations = getattr(reference_ontology, method_name)()
# generated_relations = getattr(generated_ontology, method_name)()
# all_relations = list(set(reference_relations + generated_relations))
# y_true = [1 if rel in reference_relations else 0 for rel in all_relations]
# y_pred = [1 if rel in generated_relations else 0 for rel in all_relations]
# self.scores["non_taxonomic_relations"][score_name].compute(y_true, y_pred)
# y_true_overall.extend(y_true)
# y_pred_overall.extend(y_pred)
# self.scores["non_taxonomic_relations"]["synthesis"].compute(y_true_overall, y_pred_overall)
# def compute_axiom_scores(self, reference_ontology, generated_ontology):
# axiom_methods = {
# "restriction_axioms": ("get_restriction_axioms", Ontology.compare_axioms)
# }
# y_true_overall = []
# y_pred_overall = []
# for score_name, (method_name, comparison_function) in axiom_methods.items():
# reference_axioms = getattr(reference_ontology, method_name)()
# generated_axioms = getattr(generated_ontology, method_name)()
# all_axioms = list(set(reference_axioms + generated_axioms))
# y_true = [1 if ax in reference_axioms else 0 for ax in all_axioms]
# y_pred = [1 if ax in generated_axioms else 0 for ax in all_axioms]
# self.scores["axioms"][score_name].compute(y_true, y_pred)
# y_true_overall.extend(y_true)
# y_pred_overall.extend(y_pred)
# self.scores["axioms"]["synthesis"].compute(y_true_overall, y_pred_overall)
#-------------------------------------------------------------------------- #--------------------------------------------------------------------------
# Printing Method(s) # Printing Method(s)
...@@ -105,8 +179,29 @@ class Metrics: ...@@ -105,8 +179,29 @@ class Metrics:
""" """
Prints the scores (precision, recall, f1) for each ontology element category. Prints the scores (precision, recall, f1) for each ontology element category.
""" """
entity_scores = self.scores["entities"] entity_scores = self.scores["entities"]
for element, score in entity_scores.items(): for element, score in entity_scores.items():
print(f"Metrics for {element.capitalize()} (Entity axis):") print(f"Metrics for {element.capitalize()} (Entity axis):")
print(score) print(score)
print("----------------------------") print("----------------------------")
taxonomic_relation_scores = self.scores["taxonomic_relations"]
for element, score in taxonomic_relation_scores.items():
print(f"Metrics for {element.capitalize()} (Taxonomic Relation axis):")
print(score)
print("----------------------------")
# nontaxonomic_relation_scores = self.scores["non_taxonomic_relations"]
# for element, score in nontaxonomic_relation_scores.items():
# print(f"Metrics for {element.capitalize()} (Non-Taxonomic Relation axis):")
# print(score)
# print("----------------------------")
# axiom_scores = self.scores["axioms"]
# for element, score in axiom_scores.items():
# print(f"Metrics for {element.capitalize()} (Axiom axis):")
# print(score)
# print("----------------------------")
...@@ -26,10 +26,24 @@ class TestMetrics(unittest.TestCase): ...@@ -26,10 +26,24 @@ class TestMetrics(unittest.TestCase):
self.onto2 = Ontology(self.ontology2_path) self.onto2 = Ontology(self.ontology2_path)
self.metrics = Metrics() self.metrics = Metrics()
def test_computes_entity_scores(self):
def test_computes_scores(self):
self.metrics.compute_entity_scores(self.onto1, self.onto2) self.metrics.compute_entity_scores(self.onto1, self.onto2)
for element, score in self.metrics.scores.items(): self.verify_scores(self.metrics.scores["entities"])
def test_computes_taxonomic_relation_scores(self):
self.metrics.compute_taxonomic_relation_scores(self.onto1, self.onto2)
self.verify_scores(self.metrics.scores["taxonomic_relations"])
# def test_computes_non_taxonomic_relation_scores(self):
# self.metrics.compute_non_taxonomic_relation_scores(self.onto1, self.onto2)
# self.verify_scores(self.metrics.scores["non_taxonomic_relations"])
# def test_computes_axiom_scores(self):
# self.metrics.compute_axiom_scores(self.onto1, self.onto2)
# self.verify_scores(self.metrics.scores["axioms"])
def verify_scores(self, score_category):
for element, score in score_category.items():
if score.total_elements == 0: if score.total_elements == 0:
self.assertIsNone(score.precision, f"Precision for {element} should be None when no elements are present") self.assertIsNone(score.precision, f"Precision for {element} should be None when no elements are present")
self.assertIsNone(score.recall, f"Recall for {element} should be None when no elements are present") self.assertIsNone(score.recall, f"Recall for {element} should be None when no elements are present")
...@@ -39,9 +53,11 @@ class TestMetrics(unittest.TestCase): ...@@ -39,9 +53,11 @@ class TestMetrics(unittest.TestCase):
self.assertTrue(0 <= score.recall <= 1, f"Invalid recall for {element}") self.assertTrue(0 <= score.recall <= 1, f"Invalid recall for {element}")
self.assertTrue(0 <= score.f1 <= 1, f"Invalid F1 score for {element}") self.assertTrue(0 <= score.f1 <= 1, f"Invalid F1 score for {element}")
def test_print_scores(self): def test_print_scores(self):
self.metrics.compute_entity_scores(self.onto1, self.onto2) self.metrics.compute_entity_scores(self.onto1, self.onto2)
self.metrics.compute_taxonomic_relation_scores(self.onto1, self.onto2)
# self.metrics.compute_non_taxonomic_relation_scores(self.onto1, self.onto2)
# self.metrics.compute_axiom_scores(self.onto1, self.onto2)
print() print()
self.metrics.print_scores() self.metrics.print_scores()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment