Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
ontoScorer
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Tetras MARS
ontoScorer
Commits
067f000f
Commit
067f000f
authored
1 year ago
by
Aurélien Lamercerie
Browse files
Options
Downloads
Patches
Plain Diff
Add metric_score (metrics submodule)
parent
ba217b0f
Branches
Branches containing commit
No related tags found
No related merge requests found
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
ontoScorer/metric_score.py
+68
-0
68 additions, 0 deletions
ontoScorer/metric_score.py
ontoScorer/metrics.py
+50
-39
50 additions, 39 deletions
ontoScorer/metrics.py
tests/test_metrics.py
+16
-9
16 additions, 9 deletions
tests/test_metrics.py
with
134 additions
and
48 deletions
ontoScorer/metric_score.py
0 → 100644
+
68
−
0
View file @
067f000f
# -*-coding:Utf-8 -*
"""
metric_score: Ontology Scoring Helper Module
------------------------------------------------------------------------------
This module defines the Score class, which helps to encapsulate and compute
individual scoring metrics such as precision, recall, and F1 score. It also
keeps track of the number of total elements and matched elements.
"""
from
sklearn.metrics
import
precision_score
,
recall_score
,
f1_score
class
Score
:
"""
Class to encapsulate individual scoring metrics.
"""
#--------------------------------------------------------------------------
# Constructor(s)
#--------------------------------------------------------------------------
def
__init__
(
self
):
"""
Initialize an empty score object with default metrics.
"""
self
.
precision
=
None
self
.
recall
=
None
self
.
f1
=
None
self
.
total_elements
=
0
self
.
matched_elements
=
0
#--------------------------------------------------------------------------
# Computing Method(s)
#--------------------------------------------------------------------------
def
compute
(
self
,
y_true
,
y_pred
):
"""
Compute and update the precision, recall, and F1 score based on true and predicted labels.
Args:
y_true (list[int]): List of ground truth (correct) labels.
y_pred (list[int]): List of predicted labels.
Returns:
None
"""
self
.
precision
=
precision_score
(
y_true
,
y_pred
)
self
.
recall
=
recall_score
(
y_true
,
y_pred
)
self
.
f1
=
f1_score
(
y_true
,
y_pred
)
self
.
total_elements
=
len
(
y_true
)
self
.
matched_elements
=
sum
([
1
for
true
,
pred
in
zip
(
y_true
,
y_pred
)
if
true
==
pred
])
#--------------------------------------------------------------------------
# Printing Method(s)
#--------------------------------------------------------------------------
def
_format_metric
(
self
,
metric_value
):
return
f
"
{
metric_value
:
.
4
f
}
"
if
metric_value
is
not
None
else
"
NA
"
def
__str__
(
self
):
metrics
=
[
f
"
\t
Precision:
{
self
.
_format_metric
(
self
.
precision
)
}
"
,
f
"
\t
Recall:
{
self
.
_format_metric
(
self
.
recall
)
}
"
,
f
"
\t
F1 Score:
{
self
.
_format_metric
(
self
.
f1
)
}
"
,
f
"
\t
Total Elements:
{
self
.
total_elements
}
"
,
f
"
\t
Matched Elements:
{
self
.
matched_elements
}
"
]
return
"
\n
"
.
join
(
metrics
)
This diff is collapsed.
Click to expand it.
ontoScorer/metrics.py
+
50
−
39
View file @
067f000f
#!/usr/bin/python3.10
# -*-coding:Utf-8 -*
#==============================================================================
# ontoScorer: Ontology Scoring Module
#------------------------------------------------------------------------------
# This module provides metrics to evaluate and compare different ontologies.
# It calculates precision, recall, and F1 score for various ontology elements
# such as classes, object properties, data properties, restrictions, individuals,
# and annotations. It also computes an overall score taking into account all
# the ontology elements. The comparison is performed between a reference ontology
# and a generated ontology, allowing users to evaluate how well the generated
# ontology matches the reference.
#==============================================================================
from
sklearn.metrics
import
precision_score
,
recall_score
,
f1_score
"""
ontoScorer: Ontology Scoring Module
------------------------------------------------------------------------------
This module provides metrics to evaluate and compare different ontologies. It
calculates precision, recall, and F1 score for various ontology elements such
as classes, object properties, data properties, restrictions, individuals, and
annotations. The comparison is performed between a reference ontology and a
generated ontology, allowing users to evaluate how well the generated ontology
matches the reference.
"""
from
ontoScorer.ontology
import
Ontology
from
ontoScorer.metric_score
import
Score
class
Metrics
:
"""
Metrics class provides functionalities to compute scores for ontology
elements based on a reference and generated ontology.
"""
#--------------------------------------------------------------------------
# Constructor(s)
#--------------------------------------------------------------------------
def
__init__
(
self
):
"""
Initializes score categories for various ontology elements.
"""
self
.
scores
=
{
"
class
"
:
{
"
precision
"
:
0
,
"
recall
"
:
0
,
"
f1
"
:
0
}
,
"
object_property
"
:
{
"
precision
"
:
0
,
"
recall
"
:
0
,
"
f1
"
:
0
}
,
"
data_property
"
:
{
"
precision
"
:
0
,
"
recall
"
:
0
,
"
f1
"
:
0
}
,
"
restriction
"
:
{
"
precision
"
:
0
,
"
recall
"
:
0
,
"
f1
"
:
0
}
,
"
individual
"
:
{
"
precision
"
:
0
,
"
recall
"
:
0
,
"
f1
"
:
0
}
,
"
annotation
"
:
{
"
precision
"
:
0
,
"
recall
"
:
0
,
"
f1
"
:
0
}
,
"
overall
"
:
{
"
precision
"
:
0
,
"
recall
"
:
0
,
"
f1
"
:
0
}
"
class
"
:
Score
()
,
"
object_property
"
:
Score
()
,
"
data_property
"
:
Score
()
,
"
restriction
"
:
Score
()
,
"
individual
"
:
Score
()
,
"
annotation
"
:
Score
()
,
"
overall
"
:
Score
()
}
...
...
@@ -40,9 +45,17 @@ class Metrics:
#--------------------------------------------------------------------------
def
calculate
(
self
,
reference_ontology
,
generated_ontology
):
"""
Compute scores (precision, recall, f1) for each ontology element category.
Args:
- reference_ontology: Ontology object representing the reference ontology.
- generated_ontology: Ontology object representing the generated ontology.
"""
methods
=
[
(
"
class
"
,
"
get_classes
"
),
(
"
object_property
"
,
"
get_object_properties
"
),
# Additional methods can be uncommented as needed
#("data_property", "get_data_properties"),
#("restriction", "get_restrictions"),
(
"
individual
"
,
"
get_individuals
"
),
...
...
@@ -60,16 +73,13 @@ class Metrics:
y_true
=
[
1
if
elem
in
reference_elements
else
0
for
elem
in
all_elements
]
y_pred
=
[
1
if
elem
in
generated_elements
else
0
for
elem
in
all_elements
]
self
.
scores
[
score_name
][
"
precision
"
]
=
precision_score
(
y_true
,
y_pred
)
self
.
scores
[
score_name
][
"
recall
"
]
=
recall_score
(
y_true
,
y_pred
)
self
.
scores
[
score_name
][
"
f1
"
]
=
f1_score
(
y_true
,
y_pred
)
self
.
scores
[
score_name
].
compute
(
y_true
,
y_pred
)
y_true_overall
.
extend
(
y_true
)
y_pred_overall
.
extend
(
y_pred
)
self
.
scores
[
"
overall
"
][
"
precision
"
]
=
precision_score
(
y_true_overall
,
y_pred_overall
)
self
.
scores
[
"
overall
"
][
"
recall
"
]
=
recall_score
(
y_true_overall
,
y_pred_overall
)
self
.
scores
[
"
overall
"
][
"
f1
"
]
=
f1_score
(
y_true_overall
,
y_pred_overall
)
self
.
scores
[
"
overall
"
].
compute
(
y_true_overall
,
y_pred_overall
)
#--------------------------------------------------------------------------
...
...
@@ -77,9 +87,10 @@ class Metrics:
#--------------------------------------------------------------------------
def
print_scores
(
self
):
for
element
,
scores
in
self
.
scores
.
items
():
"""
Prints the scores (precision, recall, f1) for each ontology element category.
"""
for
element
,
score
in
self
.
scores
.
items
():
print
(
f
"
Metrics for
{
element
.
capitalize
()
}
:
"
)
print
(
f
"
\t
Precision:
{
scores
[
'
precision
'
]
:
.
4
f
}
"
)
print
(
f
"
\t
Recall:
{
scores
[
'
recall
'
]
:
.
4
f
}
"
)
print
(
f
"
\t
F1 Score:
{
scores
[
'
f1
'
]
:
.
4
f
}
"
)
print
(
score
)
print
(
"
----------------------------
"
)
This diff is collapsed.
Click to expand it.
tests/test_metrics.py
+
16
−
9
View file @
067f000f
#!/usr/bin/python3.10
# -*-coding:Utf-8 -*
#==============================================================================
#
test_metrics: Metrics Testing Module
#
------------------------------------------------------------------------------
#
Contains tests for verifying functionality of the Metrics class.
#==============================================================================
"""
test_metrics: Metrics Testing Module
------------------------------------------------------------------------------
Contains tests for verifying functionality of the Metrics class.
"""
import
unittest
import
os
...
...
@@ -26,16 +26,23 @@ class TestMetrics(unittest.TestCase):
self
.
onto2
=
Ontology
(
self
.
ontology2_path
)
self
.
metrics
=
Metrics
()
def
test_calculate_scores
(
self
):
self
.
metrics
.
calculate
(
self
.
onto1
,
self
.
onto2
)
for
key
in
self
.
metrics
.
scores
:
self
.
assertTrue
(
0
<=
self
.
metrics
.
scores
[
key
][
"
precision
"
]
<=
1
)
self
.
assertTrue
(
0
<=
self
.
metrics
.
scores
[
key
][
"
recall
"
]
<=
1
)
self
.
assertTrue
(
0
<=
self
.
metrics
.
scores
[
key
][
"
f1
"
]
<=
1
)
for
element
,
score
in
self
.
metrics
.
scores
.
items
():
if
score
.
total_elements
==
0
:
self
.
assertIsNone
(
score
.
precision
,
f
"
Precision for
{
element
}
should be None when no elements are present
"
)
self
.
assertIsNone
(
score
.
recall
,
f
"
Recall for
{
element
}
should be None when no elements are present
"
)
self
.
assertIsNone
(
score
.
f1
,
f
"
F1 score for
{
element
}
should be None when no elements are present
"
)
else
:
self
.
assertTrue
(
0
<=
score
.
precision
<=
1
,
f
"
Invalid precision for
{
element
}
"
)
self
.
assertTrue
(
0
<=
score
.
recall
<=
1
,
f
"
Invalid recall for
{
element
}
"
)
self
.
assertTrue
(
0
<=
score
.
f1
<=
1
,
f
"
Invalid F1 score for
{
element
}
"
)
def
test_print_scores
(
self
):
self
.
metrics
.
calculate
(
self
.
onto1
,
self
.
onto2
)
print
()
self
.
metrics
.
print_scores
()
...
...
This diff is collapsed.
Click to expand it.
Aurélien Lamercerie
@alam
mentioned in issue
#2 (closed)
·
1 year ago
mentioned in issue
#2 (closed)
mentioned in issue #2
Toggle commit list
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment