Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
ontoScorer
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Tetras MARS
ontoScorer
Commits
1530e185
Commit
1530e185
authored
1 year ago
by
Aurélien Lamercerie
Browse files
Options
Downloads
Patches
Plain Diff
Update module Metrics to upgrade some metrics
parent
ce88ae14
No related branches found
No related tags found
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
ontoScorer/metrics.py
+97
-2
97 additions, 2 deletions
ontoScorer/metrics.py
tests/test_metrics.py
+20
-4
20 additions, 4 deletions
tests/test_metrics.py
with
117 additions
and
6 deletions
ontoScorer/metrics.py
+
97
−
2
View file @
1530e185
...
@@ -36,14 +36,15 @@ class Metrics:
...
@@ -36,14 +36,15 @@ class Metrics:
},
},
"
taxonomic_relations
"
:
{
"
taxonomic_relations
"
:
{
"
subclass
"
:
Score
(),
"
subclass
"
:
Score
(),
"
subproperty
"
:
Score
(),
"
instanciation
"
:
Score
(),
"
instanciation
"
:
Score
(),
"
synthesis
"
:
Score
()
# Synthesis score for taxonomic relations axis
"
synthesis
"
:
Score
()
# Synthesis score for taxonomic relations axis
},
},
"
non_taxonomic_relations
"
:
{
"
non_taxonomic_relations
"
:
{
"
object_properties
"
:
Score
(),
"
object_properties
"
:
Score
(),
"
data_properties
"
:
Score
(),
"
data_properties
"
:
Score
(),
"
domains
"
:
Score
(),
#
"domains": Score(),
"
ranges
"
:
Score
(),
#
"ranges": Score(),
"
synthesis
"
:
Score
()
# Synthesis score for non-taxonomic relations axis
"
synthesis
"
:
Score
()
# Synthesis score for non-taxonomic relations axis
},
},
"
axioms
"
:
{
"
axioms
"
:
{
...
@@ -96,6 +97,79 @@ class Metrics:
...
@@ -96,6 +97,79 @@ class Metrics:
self
.
scores
[
"
entities
"
][
"
synthesis
"
].
compute
(
y_true_overall
,
y_pred_overall
)
self
.
scores
[
"
entities
"
][
"
synthesis
"
].
compute
(
y_true_overall
,
y_pred_overall
)
def
compute_taxonomic_relation_scores
(
self
,
reference_ontology
,
generated_ontology
):
relation_methods
=
{
"
subclass
"
:
(
"
get_subclass_relations
"
,
Ontology
.
compare_relations
),
"
subproperty
"
:
(
"
get_subproperty_relations
"
,
Ontology
.
compare_relations
),
"
instanciation
"
:
(
"
get_instance_relations
"
,
Ontology
.
compare_relations
)
}
y_true_overall
=
[]
y_pred_overall
=
[]
for
score_name
,
(
method_name
,
comparison_function
)
in
relation_methods
.
items
():
reference_relations
=
getattr
(
reference_ontology
,
method_name
)()
generated_relations
=
getattr
(
generated_ontology
,
method_name
)()
all_relations
=
list
(
set
(
reference_relations
+
generated_relations
))
y_true
=
[
1
if
any
([
comparison_function
(
elem
,
ref_elem
)
for
ref_elem
in
reference_relations
])
else
0
for
elem
in
all_relations
]
y_pred
=
[
1
if
any
([
comparison_function
(
elem
,
gen_elem
)
for
gen_elem
in
generated_relations
])
else
0
for
elem
in
all_relations
]
self
.
scores
[
"
taxonomic_relations
"
][
score_name
].
compute
(
y_true
,
y_pred
)
y_true_overall
.
extend
(
y_true
)
y_pred_overall
.
extend
(
y_pred
)
self
.
scores
[
"
taxonomic_relations
"
][
"
synthesis
"
].
compute
(
y_true_overall
,
y_pred_overall
)
# def compute_non_taxonomic_relation_scores(self, reference_ontology, generated_ontology):
# relation_methods = {
# "object_properties": ("get_object_property_relations", Ontology.compare_relations),
# "data_properties": ("get_data_property_relations", Ontology.compare_relations)
# }
# y_true_overall = []
# y_pred_overall = []
# for score_name, (method_name, comparison_function) in relation_methods.items():
# reference_relations = getattr(reference_ontology, method_name)()
# generated_relations = getattr(generated_ontology, method_name)()
# all_relations = list(set(reference_relations + generated_relations))
# y_true = [1 if rel in reference_relations else 0 for rel in all_relations]
# y_pred = [1 if rel in generated_relations else 0 for rel in all_relations]
# self.scores["non_taxonomic_relations"][score_name].compute(y_true, y_pred)
# y_true_overall.extend(y_true)
# y_pred_overall.extend(y_pred)
# self.scores["non_taxonomic_relations"]["synthesis"].compute(y_true_overall, y_pred_overall)
# def compute_axiom_scores(self, reference_ontology, generated_ontology):
# axiom_methods = {
# "restriction_axioms": ("get_restriction_axioms", Ontology.compare_axioms)
# }
# y_true_overall = []
# y_pred_overall = []
# for score_name, (method_name, comparison_function) in axiom_methods.items():
# reference_axioms = getattr(reference_ontology, method_name)()
# generated_axioms = getattr(generated_ontology, method_name)()
# all_axioms = list(set(reference_axioms + generated_axioms))
# y_true = [1 if ax in reference_axioms else 0 for ax in all_axioms]
# y_pred = [1 if ax in generated_axioms else 0 for ax in all_axioms]
# self.scores["axioms"][score_name].compute(y_true, y_pred)
# y_true_overall.extend(y_true)
# y_pred_overall.extend(y_pred)
# self.scores["axioms"]["synthesis"].compute(y_true_overall, y_pred_overall)
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# Printing Method(s)
# Printing Method(s)
...
@@ -105,8 +179,29 @@ class Metrics:
...
@@ -105,8 +179,29 @@ class Metrics:
"""
"""
Prints the scores (precision, recall, f1) for each ontology element category.
Prints the scores (precision, recall, f1) for each ontology element category.
"""
"""
entity_scores
=
self
.
scores
[
"
entities
"
]
entity_scores
=
self
.
scores
[
"
entities
"
]
for
element
,
score
in
entity_scores
.
items
():
for
element
,
score
in
entity_scores
.
items
():
print
(
f
"
Metrics for
{
element
.
capitalize
()
}
(Entity axis):
"
)
print
(
f
"
Metrics for
{
element
.
capitalize
()
}
(Entity axis):
"
)
print
(
score
)
print
(
score
)
print
(
"
----------------------------
"
)
print
(
"
----------------------------
"
)
taxonomic_relation_scores
=
self
.
scores
[
"
taxonomic_relations
"
]
for
element
,
score
in
taxonomic_relation_scores
.
items
():
print
(
f
"
Metrics for
{
element
.
capitalize
()
}
(Taxonomic Relation axis):
"
)
print
(
score
)
print
(
"
----------------------------
"
)
# nontaxonomic_relation_scores = self.scores["non_taxonomic_relations"]
# for element, score in nontaxonomic_relation_scores.items():
# print(f"Metrics for {element.capitalize()} (Non-Taxonomic Relation axis):")
# print(score)
# print("----------------------------")
# axiom_scores = self.scores["axioms"]
# for element, score in axiom_scores.items():
# print(f"Metrics for {element.capitalize()} (Axiom axis):")
# print(score)
# print("----------------------------")
This diff is collapsed.
Click to expand it.
tests/test_metrics.py
+
20
−
4
View file @
1530e185
...
@@ -26,10 +26,24 @@ class TestMetrics(unittest.TestCase):
...
@@ -26,10 +26,24 @@ class TestMetrics(unittest.TestCase):
self
.
onto2
=
Ontology
(
self
.
ontology2_path
)
self
.
onto2
=
Ontology
(
self
.
ontology2_path
)
self
.
metrics
=
Metrics
()
self
.
metrics
=
Metrics
()
def
test_computes_entity_scores
(
self
):
def
test_computes_scores
(
self
):
self
.
metrics
.
compute_entity_scores
(
self
.
onto1
,
self
.
onto2
)
self
.
metrics
.
compute_entity_scores
(
self
.
onto1
,
self
.
onto2
)
for
element
,
score
in
self
.
metrics
.
scores
.
items
():
self
.
verify_scores
(
self
.
metrics
.
scores
[
"
entities
"
])
def
test_computes_taxonomic_relation_scores
(
self
):
self
.
metrics
.
compute_taxonomic_relation_scores
(
self
.
onto1
,
self
.
onto2
)
self
.
verify_scores
(
self
.
metrics
.
scores
[
"
taxonomic_relations
"
])
# def test_computes_non_taxonomic_relation_scores(self):
# self.metrics.compute_non_taxonomic_relation_scores(self.onto1, self.onto2)
# self.verify_scores(self.metrics.scores["non_taxonomic_relations"])
# def test_computes_axiom_scores(self):
# self.metrics.compute_axiom_scores(self.onto1, self.onto2)
# self.verify_scores(self.metrics.scores["axioms"])
def
verify_scores
(
self
,
score_category
):
for
element
,
score
in
score_category
.
items
():
if
score
.
total_elements
==
0
:
if
score
.
total_elements
==
0
:
self
.
assertIsNone
(
score
.
precision
,
f
"
Precision for
{
element
}
should be None when no elements are present
"
)
self
.
assertIsNone
(
score
.
precision
,
f
"
Precision for
{
element
}
should be None when no elements are present
"
)
self
.
assertIsNone
(
score
.
recall
,
f
"
Recall for
{
element
}
should be None when no elements are present
"
)
self
.
assertIsNone
(
score
.
recall
,
f
"
Recall for
{
element
}
should be None when no elements are present
"
)
...
@@ -39,9 +53,11 @@ class TestMetrics(unittest.TestCase):
...
@@ -39,9 +53,11 @@ class TestMetrics(unittest.TestCase):
self
.
assertTrue
(
0
<=
score
.
recall
<=
1
,
f
"
Invalid recall for
{
element
}
"
)
self
.
assertTrue
(
0
<=
score
.
recall
<=
1
,
f
"
Invalid recall for
{
element
}
"
)
self
.
assertTrue
(
0
<=
score
.
f1
<=
1
,
f
"
Invalid F1 score for
{
element
}
"
)
self
.
assertTrue
(
0
<=
score
.
f1
<=
1
,
f
"
Invalid F1 score for
{
element
}
"
)
def
test_print_scores
(
self
):
def
test_print_scores
(
self
):
self
.
metrics
.
compute_entity_scores
(
self
.
onto1
,
self
.
onto2
)
self
.
metrics
.
compute_entity_scores
(
self
.
onto1
,
self
.
onto2
)
self
.
metrics
.
compute_taxonomic_relation_scores
(
self
.
onto1
,
self
.
onto2
)
# self.metrics.compute_non_taxonomic_relation_scores(self.onto1, self.onto2)
# self.metrics.compute_axiom_scores(self.onto1, self.onto2)
print
()
print
()
self
.
metrics
.
print_scores
()
self
.
metrics
.
print_scores
()
...
...
This diff is collapsed.
Click to expand it.
Aurélien Lamercerie
@alam
mentioned in issue
#2 (closed)
·
1 year ago
mentioned in issue
#2 (closed)
mentioned in issue #2
Toggle commit list
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment