Skip to content
Snippets Groups Projects
Commit 067f000f authored by Aurélien Lamercerie's avatar Aurélien Lamercerie
Browse files

Add metric_score (metrics submodule)

parent ba217b0f
Branches
No related tags found
No related merge requests found
# -*-coding:Utf-8 -*
"""
metric_score: Ontology Scoring Helper Module
------------------------------------------------------------------------------
This module defines the Score class, which helps to encapsulate and compute
individual scoring metrics such as precision, recall, and F1 score. It also
keeps track of the number of total elements and matched elements.
"""
from sklearn.metrics import precision_score, recall_score, f1_score
class Score:
"""Class to encapsulate individual scoring metrics."""
#--------------------------------------------------------------------------
# Constructor(s)
#--------------------------------------------------------------------------
def __init__(self):
"""Initialize an empty score object with default metrics."""
self.precision = None
self.recall = None
self.f1 = None
self.total_elements = 0
self.matched_elements = 0
#--------------------------------------------------------------------------
# Computing Method(s)
#--------------------------------------------------------------------------
def compute(self, y_true, y_pred):
"""
Compute and update the precision, recall, and F1 score based on true and predicted labels.
Args:
y_true (list[int]): List of ground truth (correct) labels.
y_pred (list[int]): List of predicted labels.
Returns:
None
"""
self.precision = precision_score(y_true, y_pred)
self.recall = recall_score(y_true, y_pred)
self.f1 = f1_score(y_true, y_pred)
self.total_elements = len(y_true)
self.matched_elements = sum([1 for true, pred in zip(y_true, y_pred) if true == pred])
#--------------------------------------------------------------------------
# Printing Method(s)
#--------------------------------------------------------------------------
def _format_metric(self, metric_value):
return f"{metric_value:.4f}" if metric_value is not None else "NA"
def __str__(self):
metrics = [
f"\tPrecision: {self._format_metric(self.precision)}",
f"\tRecall: {self._format_metric(self.recall)}",
f"\tF1 Score: {self._format_metric(self.f1)}",
f"\tTotal Elements: {self.total_elements}",
f"\tMatched Elements: {self.matched_elements}"
]
return "\n".join(metrics)
#!/usr/bin/python3.10
# -*-coding:Utf-8 -*
#==============================================================================
# ontoScorer: Ontology Scoring Module
#------------------------------------------------------------------------------
# This module provides metrics to evaluate and compare different ontologies.
# It calculates precision, recall, and F1 score for various ontology elements
# such as classes, object properties, data properties, restrictions, individuals,
# and annotations. It also computes an overall score taking into account all
# the ontology elements. The comparison is performed between a reference ontology
# and a generated ontology, allowing users to evaluate how well the generated
# ontology matches the reference.
#==============================================================================
from sklearn.metrics import precision_score, recall_score, f1_score
"""
ontoScorer: Ontology Scoring Module
------------------------------------------------------------------------------
This module provides metrics to evaluate and compare different ontologies. It
calculates precision, recall, and F1 score for various ontology elements such
as classes, object properties, data properties, restrictions, individuals, and
annotations. The comparison is performed between a reference ontology and a
generated ontology, allowing users to evaluate how well the generated ontology
matches the reference.
"""
from ontoScorer.ontology import Ontology
from ontoScorer.metric_score import Score
class Metrics:
"""
Metrics class provides functionalities to compute scores for ontology
elements based on a reference and generated ontology.
"""
#--------------------------------------------------------------------------
# Constructor(s)
#--------------------------------------------------------------------------
def __init__(self):
"""
Initializes score categories for various ontology elements.
"""
self.scores = {
"class": {"precision": 0, "recall": 0, "f1": 0},
"object_property": {"precision": 0, "recall": 0, "f1": 0},
"data_property": {"precision": 0, "recall": 0, "f1": 0},
"restriction": {"precision": 0, "recall": 0, "f1": 0},
"individual": {"precision": 0, "recall": 0, "f1": 0},
"annotation": {"precision": 0, "recall": 0, "f1": 0},
"overall": {"precision": 0, "recall": 0, "f1": 0}
"class": Score(),
"object_property": Score(),
"data_property": Score(),
"restriction": Score(),
"individual": Score(),
"annotation": Score(),
"overall": Score()
}
......@@ -40,9 +45,17 @@ class Metrics:
#--------------------------------------------------------------------------
def calculate(self, reference_ontology, generated_ontology):
"""
Compute scores (precision, recall, f1) for each ontology element category.
Args:
- reference_ontology: Ontology object representing the reference ontology.
- generated_ontology: Ontology object representing the generated ontology.
"""
methods = [
("class", "get_classes"),
("object_property", "get_object_properties"),
# Additional methods can be uncommented as needed
#("data_property", "get_data_properties"),
#("restriction", "get_restrictions"),
("individual", "get_individuals"),
......@@ -60,16 +73,13 @@ class Metrics:
y_true = [1 if elem in reference_elements else 0 for elem in all_elements]
y_pred = [1 if elem in generated_elements else 0 for elem in all_elements]
self.scores[score_name]["precision"] = precision_score(y_true, y_pred)
self.scores[score_name]["recall"] = recall_score(y_true, y_pred)
self.scores[score_name]["f1"] = f1_score(y_true, y_pred)
self.scores[score_name].compute(y_true, y_pred)
y_true_overall.extend(y_true)
y_pred_overall.extend(y_pred)
self.scores["overall"]["precision"] = precision_score(y_true_overall, y_pred_overall)
self.scores["overall"]["recall"] = recall_score(y_true_overall, y_pred_overall)
self.scores["overall"]["f1"] = f1_score(y_true_overall, y_pred_overall)
self.scores["overall"].compute(y_true_overall, y_pred_overall)
#--------------------------------------------------------------------------
......@@ -77,9 +87,10 @@ class Metrics:
#--------------------------------------------------------------------------
def print_scores(self):
for element, scores in self.scores.items():
"""
Prints the scores (precision, recall, f1) for each ontology element category.
"""
for element, score in self.scores.items():
print(f"Metrics for {element.capitalize()}:")
print(f"\tPrecision: {scores['precision']:.4f}")
print(f"\tRecall: {scores['recall']:.4f}")
print(f"\tF1 Score: {scores['f1']:.4f}")
print(score)
print("----------------------------")
#!/usr/bin/python3.10
# -*-coding:Utf-8 -*
#==============================================================================
# test_metrics: Metrics Testing Module
#------------------------------------------------------------------------------
# Contains tests for verifying functionality of the Metrics class.
#==============================================================================
"""
test_metrics: Metrics Testing Module
------------------------------------------------------------------------------
Contains tests for verifying functionality of the Metrics class.
"""
import unittest
import os
......@@ -26,16 +26,23 @@ class TestMetrics(unittest.TestCase):
self.onto2 = Ontology(self.ontology2_path)
self.metrics = Metrics()
def test_calculate_scores(self):
self.metrics.calculate(self.onto1, self.onto2)
for key in self.metrics.scores:
self.assertTrue(0 <= self.metrics.scores[key]["precision"] <= 1)
self.assertTrue(0 <= self.metrics.scores[key]["recall"] <= 1)
self.assertTrue(0 <= self.metrics.scores[key]["f1"] <= 1)
for element, score in self.metrics.scores.items():
if score.total_elements == 0:
self.assertIsNone(score.precision, f"Precision for {element} should be None when no elements are present")
self.assertIsNone(score.recall, f"Recall for {element} should be None when no elements are present")
self.assertIsNone(score.f1, f"F1 score for {element} should be None when no elements are present")
else:
self.assertTrue(0 <= score.precision <= 1, f"Invalid precision for {element}")
self.assertTrue(0 <= score.recall <= 1, f"Invalid recall for {element}")
self.assertTrue(0 <= score.f1 <= 1, f"Invalid F1 score for {element}")
def test_print_scores(self):
self.metrics.calculate(self.onto1, self.onto2)
print()
self.metrics.print_scores()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment