diff --git a/ontoScorer/__pycache__/__init__.cpython-311.pyc b/ontoScorer/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index 15c9d4b5e58309a39ad2614703ebff6ad3759740..0000000000000000000000000000000000000000
Binary files a/ontoScorer/__pycache__/__init__.cpython-311.pyc and /dev/null differ
diff --git a/ontoScorer/__pycache__/metrics.cpython-311.pyc b/ontoScorer/__pycache__/metrics.cpython-311.pyc
deleted file mode 100644
index 261155446af384b3fa8a8d952545d43dda6c6dec..0000000000000000000000000000000000000000
Binary files a/ontoScorer/__pycache__/metrics.cpython-311.pyc and /dev/null differ
diff --git a/ontoScorer/__pycache__/ontology.cpython-311.pyc b/ontoScorer/__pycache__/ontology.cpython-311.pyc
deleted file mode 100644
index b71e1d0f3cad1e8ed6574796cb86610805e54287..0000000000000000000000000000000000000000
Binary files a/ontoScorer/__pycache__/ontology.cpython-311.pyc and /dev/null differ
diff --git a/ontoScorer/__pycache__/report.cpython-311.pyc b/ontoScorer/__pycache__/report.cpython-311.pyc
deleted file mode 100644
index 3ac64f032d3c2903f9d0664b49ed1fe36c6d71ac..0000000000000000000000000000000000000000
Binary files a/ontoScorer/__pycache__/report.cpython-311.pyc and /dev/null differ
diff --git a/ontoScorer/__pycache__/scorer.cpython-311.pyc b/ontoScorer/__pycache__/scorer.cpython-311.pyc
deleted file mode 100644
index 7696fe257f510bead13598ef2ee71e027b9b4612..0000000000000000000000000000000000000000
Binary files a/ontoScorer/__pycache__/scorer.cpython-311.pyc and /dev/null differ
diff --git a/ontoScorer/metrics.py b/ontoScorer/metrics.py
index 73fd413c38cce339f00800388902305f355985e0..feedd3327057aa10299e37b6698be1c4cf9f1f2f 100644
--- a/ontoScorer/metrics.py
+++ b/ontoScorer/metrics.py
@@ -2,28 +2,84 @@
 # -*-coding:Utf-8 -*
 
 #==============================================================================
-# ontoScorer: [brief description of the module]
+# ontoScorer: Ontology Scoring Module
 #------------------------------------------------------------------------------
-# Detailed module description
+# This module provides metrics to evaluate and compare different ontologies.
+# It calculates precision, recall, and F1 score for various ontology elements
+# such as classes, object properties, data properties, restrictions, individuals,
+# and annotations. It also computes an overall score taking into account all
+# the ontology elements. The comparison is performed between a reference ontology
+# and a generated ontology, allowing users to evaluate how well the generated 
+# ontology matches the reference.
 #==============================================================================
 
+
 from sklearn.metrics import precision_score, recall_score, f1_score
 from ontoScorer.ontology import Ontology
 
 class Metrics:
+    
+    #--------------------------------------------------------------------------
+    # Constructor(s)
+    #--------------------------------------------------------------------------
+    
     def __init__(self):
-        self.precision = 0
-        self.recall = 0
-        self.f1 = 0
+        self.scores = {
+            "class": {"precision": 0, "recall": 0, "f1": 0},
+            "object_property": {"precision": 0, "recall": 0, "f1": 0},
+            "data_property": {"precision": 0, "recall": 0, "f1": 0},
+            "restriction": {"precision": 0, "recall": 0, "f1": 0},
+            "individual": {"precision": 0, "recall": 0, "f1": 0},
+            "annotation": {"precision": 0, "recall": 0, "f1": 0},
+            "overall": {"precision": 0, "recall": 0, "f1": 0}
+        }
+
+
+    #--------------------------------------------------------------------------
+    # Computing Method(s)
+    #--------------------------------------------------------------------------
 
     def calculate(self, reference_ontology, generated_ontology):
-        reference_classes = set([cls.name() for cls in reference_ontology.get_classes()])
-        generated_classes = set([cls.name() for cls in generated_ontology.get_classes()])
+        methods = [
+            ("class", "get_classes"),
+            ("object_property", "get_object_properties"),
+            ("data_property", "get_data_properties"),
+            ("restriction", "get_restrictions"),
+            ("individual", "get_individuals"),
+            #("annotation", "get_annotations")
+        ]
+
+        y_true_overall = []
+        y_pred_overall = []
+
+        for score_name, method_name in methods:
+            reference_elements = set([elem.name() for elem in getattr(reference_ontology, method_name)()])
+            generated_elements = set([elem.name() for elem in getattr(generated_ontology, method_name)()])
+
+            all_elements = reference_elements.union(generated_elements)
+            y_true = [1 if elem in reference_elements else 0 for elem in all_elements]
+            y_pred = [1 if elem in generated_elements else 0 for elem in all_elements]
+
+            self.scores[score_name]["precision"] = precision_score(y_true, y_pred)
+            self.scores[score_name]["recall"] = recall_score(y_true, y_pred)
+            self.scores[score_name]["f1"] = f1_score(y_true, y_pred)
 
-        all_classes = reference_classes.union(generated_classes)
-        y_true = [1 if cls in reference_classes else 0 for cls in all_classes]
-        y_pred = [1 if cls in generated_classes else 0 for cls in all_classes]
+            y_true_overall.extend(y_true)
+            y_pred_overall.extend(y_pred)
 
-        self.precision = precision_score(y_true, y_pred)
-        self.recall = recall_score(y_true, y_pred)
-        self.f1 = f1_score(y_true, y_pred)
\ No newline at end of file
+        self.scores["overall"]["precision"] = precision_score(y_true_overall, y_pred_overall)
+        self.scores["overall"]["recall"] = recall_score(y_true_overall, y_pred_overall)
+        self.scores["overall"]["f1"] = f1_score(y_true_overall, y_pred_overall)
+        
+        
+    #--------------------------------------------------------------------------
+    # Printing Method(s)
+    #--------------------------------------------------------------------------
+    
+    def print_scores(self):
+        for element, scores in self.scores.items():
+            print(f"Metrics for {element.capitalize()}:")
+            print(f"\tPrecision: {scores['precision']:.4f}")
+            print(f"\tRecall: {scores['recall']:.4f}")
+            print(f"\tF1 Score: {scores['f1']:.4f}")
+            print("----------------------------")
\ No newline at end of file
diff --git a/ontoScorer/ontology.py b/ontoScorer/ontology.py
index 90d432950bd7f96755e8036d5c8a71e86cd3b717..cb2d501cd78090b846f8c42725596f5994bd4388 100644
--- a/ontoScorer/ontology.py
+++ b/ontoScorer/ontology.py
@@ -114,17 +114,21 @@ class Ontology:
         return self._get_elements_of_type(OWL.Restriction)
 
     def get_individuals(self) -> list:
-        """Extract all individuals from the ontology."""
-        all_types = set(self.graph.subjects(RDF.type))
-        non_individuals = {element.reference for element in 
-                           self.get_classes() + 
-                           self.get_object_properties() + 
-                           self.get_data_properties()}
+        """Extract all individuals from the ontology, including elements explicitly typed as owl:Individual, 
+        owl:NamedIndividual or any class."""
         
-        individuals = all_types - non_individuals
-        
-        return [NamedElement(i, self.graph) if isinstance(i, URIRef) else BlankElement(i, self.graph) 
-                for i in individuals]
+        # Getting elements of type owl:NamedIndividual and owl:Individual
+        individuals = self._get_elements_of_type(OWL.NamedIndividual)
+        individuals += self._get_elements_of_type(URIRef("http://www.w3.org/2002/07/owl#Individual"))
+    
+        # Getting all elements typed as one of the classes
+        all_classes = {cls.reference for cls in self.get_classes()}
+        for cls in all_classes:
+            individuals += [NamedElement(s, self.graph) if isinstance(s, URIRef) else BlankElement(s, self.graph)
+                            for s, _, o in self.graph.triples((None, RDF.type, cls))]
+    
+        return list(set(individuals))  # Ensuring uniqueness
+
 
     def get_annotations(self):
         """Extract all annotation comments from the ontology."""
diff --git a/ontoScorer/report.py b/ontoScorer/report.py
index 30f7659bb493a623975d29a418ae5f42f48bf32a..a5df58a36f0422d36b97f5f5a6cd3425f8bd78d1 100644
--- a/ontoScorer/report.py
+++ b/ontoScorer/report.py
@@ -33,9 +33,9 @@ class Report:
             report_str += "The generated ontology and the reference ontology have the same number of classes."
 
         report_str += "\n\nEvaluation Metrics:"
-        report_str += f"\nPrecision: {self.metrics.precision}"
-        report_str += f"\nRecall: {self.metrics.recall}"
-        report_str += f"\nF1 Score: {self.metrics.f1}"
+        report_str += f'\nPrecision: {self.metrics.scores["overall"]["precision"]}'
+        report_str += f'\nRecall: {self.metrics.scores["overall"]["recall"]}'
+        report_str += f'\nF1 Score: {self.metrics.scores["overall"]["f1"]}'
 
         return report_str
 
diff --git a/tests/test_metrics.py b/tests/test_metrics.py
index 4415d78eaa2ad835966f1b350fdefbdf3e9d9d61..9dc65011609dc3e73c66cfba0a52ac1402f00440 100644
--- a/tests/test_metrics.py
+++ b/tests/test_metrics.py
@@ -2,9 +2,39 @@
 # -*-coding:Utf-8 -*
 
 #==============================================================================
-# ontoScorer: [brief description of the module]
+# test_metrics: Metrics Testing Module
 #------------------------------------------------------------------------------
-# Detailed module description, if needed
+# Contains tests for verifying functionality of the Metrics class.
 #==============================================================================
 
-# TODO
+import unittest
+import os
+from context import ontoScorer
+from ontoScorer.ontology import Ontology
+from ontoScorer.metrics import Metrics
+
+class TestMetrics(unittest.TestCase):
+
+    def setUp(self):
+        DATA_FOLDER_PATH = f'{os.path.dirname(os.path.abspath(__file__))}/test_data'
+        self.ontology1_path = f"{DATA_FOLDER_PATH}/ontology_a.ttl"
+        self.ontology2_path = f"{DATA_FOLDER_PATH}/ontology_b.ttl"
+        self.onto1 = Ontology(self.ontology1_path)
+        self.onto2 = Ontology(self.ontology2_path)
+        self.metrics = Metrics()
+
+    def test_calculate_scores(self):
+        self.metrics.calculate(self.onto1, self.onto2)
+        for key in self.metrics.scores:
+            self.assertTrue(0 <= self.metrics.scores[key]["precision"] <= 1)
+            self.assertTrue(0 <= self.metrics.scores[key]["recall"] <= 1)
+            self.assertTrue(0 <= self.metrics.scores[key]["f1"] <= 1)
+
+
+    def test_print_scores(self):
+        self.metrics.calculate(self.onto1, self.onto2)
+        self.metrics.print_scores()
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tests/test_ontology.py b/tests/test_ontology.py
index 8da3b2eea6aad01ab668e23029d6fcf0e27fb401..da89d127abbd6f1dc7701f9fd0922c030e497281 100644
--- a/tests/test_ontology.py
+++ b/tests/test_ontology.py
@@ -68,7 +68,7 @@ class TestOntology(unittest.TestCase):
 
     def test_get_individuals(self):
         individuals_names = {ind.name() for ind in self.onto1.get_individuals()}
-        
+        self.assertEqual(len(individuals_names), 1)
         self.assertIn("SolarSystem", individuals_names)
         self.assertNotIn("gravitation", individuals_names)