From 45880b17e8c192bbd0c5b9003d226ce88533a163 Mon Sep 17 00:00:00 2001
From: daxid <david.rouquet@tetras-libre.fr>
Date: Thu, 22 Jun 2023 16:26:29 +0000
Subject: [PATCH] Update config to use config_dict as parameter of the config
 constructor

---
 tenet/__init__.py                |   1 +
 tenet/extraction/config.py       | 170 ++++++++++++++++-------------
 tenet/extraction/process.py      |  10 +-
 tenet/extraction/structure.py    |   2 +-
 tenet/main.py                    | 176 ++++++++++++++++++++++---------
 tenet/scheme/owl_amr_scheme_1.py | 153 +++++++++++++++++++++++++++
 6 files changed, 383 insertions(+), 129 deletions(-)

diff --git a/tenet/__init__.py b/tenet/__init__.py
index 3665b4dc..bd883646 100644
--- a/tenet/__init__.py
+++ b/tenet/__init__.py
@@ -8,6 +8,7 @@ sys.path.insert(0, os.path.abspath(LIB_PATH))
 # -- Main Methods
 from tenet.main import create_ontology_from_amrld_file
 from tenet.main import create_ontology_from_amrld_dir 
+from tenet.main import create_ontology_from_amrld_dir_multi_cpu
 from tenet.main import generate_odrl_from_amrld_file
 from tenet.main import generate_odrl_from_amrld_dir 
 #from main import create_ontology_from_unlrdf_file
\ No newline at end of file
diff --git a/tenet/extraction/config.py b/tenet/extraction/config.py
index 304f04cb..851484c8 100644
--- a/tenet/extraction/config.py
+++ b/tenet/extraction/config.py
@@ -36,13 +36,26 @@ class Config:
     # Constructor
     #-------------------------------------------------
     
-    def __init__(self, 
-                 config_file, 
-                 uuid_str, source_corpus, target_ref='base',
-                 base_dir=None,
-                 base_output_dir=None,
-                 technical_dir_path=None):
+    # def __init__(self, 
+    #              config_file, 
+    #              uuid_str, source_corpus, target_ref='base',
+    #              base_dir=None,
+    #              base_output_dir=None,
+    #              technical_dir_path=None,
+    #              source_type=None
+    #             ):
+        
+    def __init__(self, config_dict):
+        config_file = config_dict['config_file_path']
+        uuid_str = config_dict['onto_prefix']
+        source_corpus = config_dict['source_corpus']
+        base_dir = config_dict['base_dir'] if 'base_dir' in config_dict.keys() else None
+        target_ref = config_dict['target_ref'] if 'target_ref' in config_dict.keys() else 'base'
+        base_output_dir = config_dict['base_output_dir'] if 'base_output_dir' in config_dict.keys() else None
+        technical_dir_path = config_dict['technical_dir_path'] if 'technical_dir_path' in config_dict.keys() else None
+        source_type = config_dict['source_type'] if 'source_type' in config_dict.keys() else None
         
+                
         # -- Config XML Tree
         config_tree = etree.parse(config_file)
         
@@ -54,7 +67,11 @@ class Config:
         self.source_corpus = source_corpus
         self.target_ref = target_ref
         self._process_level = c_base.get("process_level")
-        self.source_type = c_base.get("source_type")
+        #self.source_type = c_base.get("source_type")
+        if source_type == None:
+            self.source_type = c_dir.get("source_type")
+        else:
+            self.source_type = source_type
         self.extraction_scheme = c_base.get("extraction_scheme")
         
         # # -- CTS Reference
@@ -227,11 +244,17 @@ class Config:
         config_tree = etree.parse(self.config_file)
         c_file = config_tree.xpath("file")[0]
         self._schema_file = self.structure_dir
+        
         if self.source_type == 'amr':
             self._schema_file += c_file.get("amr_input_data_schema")
         if self.source_type == 'unl':
             self._schema_file += c_file.get("unl_input_data_schema")
-        self._schema_file += schema_file_cmpl + ".ttl"
+            
+        if not schema_file_cmpl.startswith("./structure"):
+            self._schema_file += schema_file_cmpl
+            
+        if not self._schema_file.endswith(".ttl"): 
+            self._schema_file += ".ttl"
     
     schema_file = property(_get_schema_file, _set_schema_file) 
             
@@ -243,7 +266,10 @@ class Config:
     
     def _set_source_sentence_file(self, source_sentence_file_cmpl):
         self._source_sentence_file = self.input_doc_dir + self.source_corpus
-        self._source_sentence_file += source_sentence_file_cmpl + '.ttl'
+        self._source_sentence_file += source_sentence_file_cmpl
+        
+        if not self._source_sentence_file.endswith(".ttl"): 
+            self._source_sentence_file += ".ttl"
     
     source_sentence_file = property(_get_source_sentence_file, 
                                      _set_source_sentence_file) 
@@ -341,69 +367,69 @@ class Config:
         return config_str
     
     
-    def to_dict(self):
-        config_dict = {
-            "config_file": self.config_file,
-            "uuid_str": self.uuid_str,
-            "source_corpus": self.source_corpus,
-            "target_ref": self.target_ref,
-            "base_dir": self.base_dir,
-            "structure_dir": self.structure_dir,
-            "cts_dir": self.cts_dir,
-            "target_frame_dir": self.target_frame_dir,
-            "input_doc_dir": self.input_doc_dir,
-            "base_output_dir": self.base_output_dir,
-            "output_dir": self.output_dir,
-            "technical_dir_path": self.technical_dir_path,
-            "sentence_output_dir": self.sentence_output_dir,
-            "process_level": self.process_level,
-            "source_type": self.source_type,
-            "extraction_scheme": self.extraction_scheme,
-            "config_param_file": self.config_param_file,
-            "base_ontology_file": self.base_ontology_file,
-            "cts_file": self.cts_file,
-            "base_uri": self.base_uri,
-            "onto_suffix": self.onto_suffix,
-            "onto_seed_suffix": self.onto_seed_suffix,
-            "source_sentence_file": self.source_sentence_file,
-            "frame_ontology_file": self.frame_ontology_file,
-            "frame_ontology_seed_file": self.frame_ontology_seed_file,
-            "output_ontology_namespace": self.output_ontology_namespace,
-            "output_file": self.output_file,
-            "input_doc_dir": self.input_doc_dir,
-            "schema_file": self.schema_file,
-        }
-        return config_dict
+    # def to_dict(self):
+    #     config_dict = {
+    #         "config_file": self.config_file,
+    #         "uuid_str": self.uuid_str,
+    #         "source_corpus": self.source_corpus,
+    #         "target_ref": self.target_ref,
+    #         "base_dir": self.base_dir,
+    #         "structure_dir": self.structure_dir,
+    #         "cts_dir": self.cts_dir,
+    #         "target_frame_dir": self.target_frame_dir,
+    #         "input_doc_dir": self.input_doc_dir,
+    #         "base_output_dir": self.base_output_dir,
+    #         "output_dir": self.output_dir,
+    #         "technical_dir_path": self.technical_dir_path,
+    #         "sentence_output_dir": self.sentence_output_dir,
+    #         "process_level": self.process_level,
+    #         "source_type": self.source_type,
+    #         "extraction_scheme": self.extraction_scheme,
+    #         "config_param_file": self.config_param_file,
+    #         "base_ontology_file": self.base_ontology_file,
+    #         "cts_file": self.cts_file,
+    #         "base_uri": self.base_uri,
+    #         "onto_suffix": self.onto_suffix,
+    #         "onto_seed_suffix": self.onto_seed_suffix,
+    #         "source_sentence_file": self.source_sentence_file,
+    #         "frame_ontology_file": self.frame_ontology_file,
+    #         "frame_ontology_seed_file": self.frame_ontology_seed_file,
+    #         "output_ontology_namespace": self.output_ontology_namespace,
+    #         "output_file": self.output_file,
+    #         "input_doc_dir": self.input_doc_dir,
+    #         "schema_file": self.schema_file,
+    #     }
+    #     return config_dict
             
         
-    def update_from_dict(self, config_dict):
-        self.config_file = config_dict.get("config_file")
-        self.uuid_str = config_dict.get("uuid_str")
-        self.source_corpus = config_dict.get("source_corpus")
-        self.target_ref = config_dict.get("target_ref")
-        self.base_dir = config_dict.get("base_dir")
-        self.structure_dir = config_dict.get("structure_dir")
-        self.cts_dir = config_dict.get("cts_dir")
-        self.target_frame_dir = config_dict.get("target_frame_dir")
-        self.input_doc_dir = config_dict.get("input_doc_dir")
-        self.base_output_dir = config_dict.get("base_output_dir")
-        self.output_dir = config_dict.get("output_dir")
-        self.technical_dir_path = config_dict.get("technical_dir_path")
-        self.sentence_output_dir = config_dict.get("sentence_output_dir")
-        self.process_level = config_dict.get("process_level")
-        self.source_type = config_dict.get("source_type")
-        self.extraction_scheme = config_dict.get("extraction_scheme")
-        self.config_param_file = config_dict.get("config_param_file")
-        self.base_ontology_file = config_dict.get("base_ontology_file")
-        self.cts_file = config_dict.get("cts_file")
-        self.base_uri = config_dict.get("base_uri")
-        self.onto_suffix = config_dict.get("onto_suffix")
-        self.onto_seed_suffix = config_dict.get("onto_seed_suffix")
-        self.source_sentence_file = config_dict.get("source_sentence_file")
-        self.frame_ontology_file = config_dict.get("frame_ontology_file")
-        self.frame_ontology_seed_file = config_dict.get("frame_ontology_seed_file")
-        self.output_ontology_namespace = config_dict.get("output_ontology_namespace")
-        self.output_file = config_dict.get("output_file")
-        self.input_doc_dir = config_dict.get("input_doc_dir")
-        self.schema_file = config_dict.get("schema_file")
+    # def update_from_dict(self, config_dict):
+    #     self.config_file = config_dict.get("config_file")
+    #     self.uuid_str = config_dict.get("uuid_str")
+    #     self.source_corpus = config_dict.get("source_corpus")
+    #     self.target_ref = config_dict.get("target_ref")
+    #     self.base_dir = config_dict.get("base_dir")
+    #     self.structure_dir = config_dict.get("structure_dir")
+    #     self.cts_dir = config_dict.get("cts_dir")
+    #     self.target_frame_dir = config_dict.get("target_frame_dir")
+    #     self.input_doc_dir = config_dict.get("input_doc_dir")
+    #     self.base_output_dir = config_dict.get("base_output_dir")
+    #     self.output_dir = config_dict.get("output_dir")
+    #     self.technical_dir_path = config_dict.get("technical_dir_path")
+    #     self.sentence_output_dir = config_dict.get("sentence_output_dir")
+    #     self.process_level = config_dict.get("process_level")
+    #     self.source_type = config_dict.get("source_type")
+    #     self.extraction_scheme = config_dict.get("extraction_scheme")
+    #     self.config_param_file = config_dict.get("config_param_file")
+    #     self.base_ontology_file = config_dict.get("base_ontology_file")
+    #     self.cts_file = config_dict.get("cts_file")
+    #     self.base_uri = config_dict.get("base_uri")
+    #     self.onto_suffix = config_dict.get("onto_suffix")
+    #     self.onto_seed_suffix = config_dict.get("onto_seed_suffix")
+    #     self.source_sentence_file = config_dict.get("source_sentence_file")
+    #     self.frame_ontology_file = config_dict.get("frame_ontology_file")
+    #     self.frame_ontology_seed_file = config_dict.get("frame_ontology_seed_file")
+    #     self.output_ontology_namespace = config_dict.get("output_ontology_namespace")
+    #     self.output_file = config_dict.get("output_file")
+    #     self.input_doc_dir = config_dict.get("input_doc_dir")
+    #     self.schema_file = config_dict.get("schema_file")
         
\ No newline at end of file
diff --git a/tenet/extraction/process.py b/tenet/extraction/process.py
index 06c00332..6b0b255a 100644
--- a/tenet/extraction/process.py
+++ b/tenet/extraction/process.py
@@ -228,7 +228,7 @@ def apply_step(config, graph, rule_set, step_number, step_name, step_sequence_de
         for sequence_def in step_sequence_def:
             graph, triple_list = _apply_sequence(graph, sequence_def)
             step_triple_list.extend(triple_list)
-         
+        
         # -- Serialize the working graph updated during the step
         if config.technical_dir_path is not None:
             os.makedirs(config.sentence_output_dir, exist_ok=True)
@@ -238,7 +238,7 @@ def apply_step(config, graph, rule_set, step_number, step_name, step_sequence_de
         str = "----- {0} triples extracted during {1} step"
         new_triple_count = len(graph) - graph_length_before_step
         logger.info(str.format(new_triple_count, step_name))
-         
+        
         return graph, step_triple_list
     
     except AssertionError:
@@ -265,7 +265,7 @@ def apply(config, graph):
         logger.info(f"-- Loading Extraction Scheme ({config.extraction_scheme})")
         rule_dir, prefix_list, scheme = load_cts(config)
         logger.debug("----- Step number: {0}".format(len(scheme)))
-            
+        
         # -- Loading Extraction Rules
         logger.info("-- Loading Extraction Rules ({0}*)".format(rule_dir))
         rule_set = load_rule_set(config, rule_dir, prefix_list)
@@ -278,7 +278,7 @@ def apply(config, graph):
             step_number += 1
             graph, new_triple_list = apply_step(config, graph, rule_set,
                                                 step_number, step_name, step_sequence_def)
-    
+        
         # -- Result: file containing only the factoids (last step result)
         if config.technical_dir_path is not None:
             os.makedirs(config.sentence_output_dir, exist_ok=True)
@@ -293,7 +293,7 @@ def apply(config, graph):
             factoid_graph.serialize(destination=factoid_file, 
                                     base=base_ref, 
                                     format='turtle')
-                                
+                
         return graph, new_triple_list   
     
     except:
diff --git a/tenet/extraction/structure.py b/tenet/extraction/structure.py
index bd02ac21..d50c8637 100644
--- a/tenet/extraction/structure.py
+++ b/tenet/extraction/structure.py
@@ -173,7 +173,7 @@ def prepare_work_graph(config, sentence_file):
             load_sentence(config, work_graph, sentence_file)
         else: # process_level == 'document'
             load_all_sentences(config, work_graph)
-    
+        
         # -- Result
         if config.technical_dir_path is not None:
             os.makedirs(config.sentence_output_dir, exist_ok=True)
diff --git a/tenet/main.py b/tenet/main.py
index 28fa3a5e..d34ceb9b 100644
--- a/tenet/main.py
+++ b/tenet/main.py
@@ -40,11 +40,42 @@ def __set_context():
     os.chdir(LIB_PATH)
 
 
-def __set_config(
-        config_file_path,
-        source_type, source_corpus, onto_prefix, 
-        base_output_dir, technical_dir_path):
-    
+# def __set_config(
+#         config_file_path,
+#         source_type, source_corpus, onto_prefix, 
+#         base_output_dir, technical_dir_path):
+    
+#     logger.info("-- Process Setting ")
+#     logger.info(f'----- Corpus source: {source_corpus} ({source_type})')
+#     logger.info(f'----- Base output dir: {base_output_dir}')
+#     logger.info(f'----- technical dir path: {technical_dir_path}')
+#     logger.info(f'----- Ontology target (id): {onto_prefix}')
+#     logger.info(f'----- Current path: {os.getcwd()}')
+#     logger.debug(f'----- Config file: {config_file_path}')
+    
+#     process_config = config.Config(config_file_path, 
+#                                    onto_prefix, 
+#                                    source_corpus, 
+#                                    base_output_dir = base_output_dir,
+#                                    technical_dir_path = technical_dir_path,
+#                                    source_type = source_type
+#                                    )
+#     #process_config.source_type = source_type
+#     # config.output_ontology_namespace = target_ontology_namespace
+    
+#     logger.debug(process_config.get_full_config())
+    
+#     return process_config
+
+
+def __set_config(config_dict):
+    config_file_path = config_dict['config_file_path']
+    source_type = config_dict['source_type']
+    source_corpus = config_dict['source_corpus']
+    onto_prefix = config_dict['onto_prefix']
+    base_output_dir = config_dict['base_output_dir']
+    technical_dir_path = config_dict['technical_dir_path']
+
     logger.info("-- Process Setting ")
     logger.info(f'----- Corpus source: {source_corpus} ({source_type})')
     logger.info(f'----- Base output dir: {base_output_dir}')
@@ -52,19 +83,20 @@ def __set_config(
     logger.info(f'----- Ontology target (id): {onto_prefix}')
     logger.info(f'----- Current path: {os.getcwd()}')
     logger.debug(f'----- Config file: {config_file_path}')
-    
-    process_config = config.Config(config_file_path, 
-                                   onto_prefix, 
-                                   source_corpus, 
-                                   base_output_dir = base_output_dir,
-                                   technical_dir_path = technical_dir_path
-                                   )
-    process_config.source_type = source_type
-    # config.output_ontology_namespace = target_ontology_namespace
-    
-    logger.debug(process_config.get_full_config())
-    
-    return process_config
+
+    # process_config = config.Config(
+    #     config_file_path,
+    #     onto_prefix,
+    #     source_corpus,
+    #     base_output_dir=base_output_dir,
+    #     technical_dir_path=technical_dir_path,
+    #     source_type=source_type
+    # )
+    base_config = config.Config(config_dict)
+
+    logger.debug(base_config.get_full_config())
+
+    return base_config
 
 
 def __count_number_of_graph(config):   
@@ -80,6 +112,7 @@ def __apply_extraction(config, sentence_file):
     if config.technical_dir_path is not None:
         os.makedirs(config.sentence_output_dir, exist_ok=True)
     work_graph = structure.prepare_sentence_work(config, sentence_file)
+        
     _, new_triple_list = process.apply(config, work_graph)
     return new_triple_list
     
@@ -144,9 +177,22 @@ def create_ontology_from_amrld_file(amrld_file_path,
     __set_context()
     if onto_prefix is None: onto_prefix = 'DefaultId'
     base_output_dir = os.path.dirname(out_file_path) if out_file_path is not None else None
-    config = __set_config(OWL_CONFIG_FILE_PATH,
-                          'amr', amrld_file_path, onto_prefix, 
-                          base_output_dir, technical_dir_path)
+    
+    config_dict = {
+        'config_file_path': OWL_CONFIG_FILE_PATH,
+        'source_type': 'amr',
+        'source_corpus': amrld_file_path,
+        'onto_prefix': onto_prefix,
+        'base_output_dir': base_output_dir,
+        'technical_dir_path': technical_dir_path
+    }
+
+    config = __set_config(config_dict)
+
+    # config = __set_config(OWL_CONFIG_FILE_PATH,
+#                           'amr', amrld_file_path, onto_prefix, 
+#                           base_output_dir, technical_dir_path)
+    
     assert os.path.exists(amrld_file_path), f'input file does not exists ({amrld_file_path})'
     
     # -- Extraction Processing   
@@ -201,9 +247,22 @@ def create_ontology_from_amrld_dir(amrld_dir_path,
     __set_context()
     if onto_prefix is None: onto_prefix = 'DefaultId'
     base_output_dir = os.path.dirname(out_file_path) if out_file_path is not None else None
-    config = __set_config(OWL_CONFIG_FILE_PATH,
-                          'amr', amrld_dir_path, onto_prefix, 
-                          base_output_dir, technical_dir_path)
+        
+    config_dict = {
+        'config_file_path': OWL_CONFIG_FILE_PATH,
+        'source_type': 'amr',
+        'source_corpus': amrld_dir_path,
+        'onto_prefix': onto_prefix,
+        'base_output_dir': base_output_dir,
+        'technical_dir_path': technical_dir_path
+    }
+
+    config = __set_config(config_dict)
+
+    # config = __set_config(OWL_CONFIG_FILE_PATH,
+#                           'amr', amrld_dir_path, onto_prefix, 
+#                           base_output_dir, technical_dir_path)
+
     assert os.path.exists(amrld_dir_path), f'input directory does not exists ({amrld_dir_path})'
     __count_number_of_graph(config)
     
@@ -238,41 +297,41 @@ def create_ontology_from_amrld_dir(amrld_dir_path,
 # AMR Main Methods (to create an ontology) - Multiprocessing
 #==============================================================================
 
-global result_triple_queue
-global sentence_file_list
+#global result_triple_queue
+#global sentence_file_list
 
 def dump_queue(q):
     q.put(None)
     return list(iter(q.get, None))
 
-def pool_function(arg_dic):
-    global result_triple_queue
-    global sentence_file_list
-    print(f'==================== TEST A')
-    process_config = config.Config(OWL_CONFIG_FILE_PATH, 'default', 'default')
-    process_config.update_from_dict(arg_dic)
-    print(f'==================== TEST B')
-    print(f'==================== process_config (1): {process_config}')
-    sentence_indice = arg_dic['sentence_list_indice']
-    print(f'==================== process_config (2): {process_config}')
+def pool_function(arg_dict):
+    #global result_triple_queue
+    #global sentence_file_list
+
+    #process_config = config.Config(OWL_CONFIG_FILE_PATH, 'default', 'default')
+    #process_config.update_from_dict(arg_dict)
+    process_config = config.Config(arg_dict)
+
+    sentence_indice = arg_dict['sentence_list_indice']
     sentence_file = sentence_file_list[sentence_indice]
-    print(f'==================== sentence_file: {sentence_file}')
+
     logger.info(f'     *** sentence {sentence_indice} *** ')
-    process_config.sentence_output_dir = f'-{sentence_indice}'
+    process_config.sentence_output_dir = f'-{sentence_indice}\n'
     new_triple_list = __apply_extraction(process_config, sentence_file)
-    print(f'==================== TEST C')
+
     # The following must handled via a global queue
-    result_triple_queue.extend(new_triple_list)
+    #result_triple_queue.extend(new_triple_list)
+    
     return(new_triple_list)
     
 
 #@timed
-def create_ontology_from_amrld_dir_with_multiprocessing(amrld_dir_path, 
+def create_ontology_from_amrld_dir_multi_cpu(amrld_dir_path, 
                                                         base_ontology_path=None, 
                                                         onto_prefix=None, 
                                                         out_file_path=None, 
                                                         technical_dir_path=None,
-                                                        processes=3#multiprocessing.cpu_count()-1
+                                                        processes=multiprocessing.cpu_count()-1
                                                         ):
     """
     Method to create an ontology (as Turtle String) from a transduction 
@@ -302,9 +361,22 @@ def create_ontology_from_amrld_dir_with_multiprocessing(amrld_dir_path,
     __set_context()
     if onto_prefix is None: onto_prefix = 'DefaultId'
     base_output_dir = os.path.dirname(out_file_path) if out_file_path is not None else None    
-    config = __set_config(OWL_CONFIG_FILE_PATH,
-                          'amr', amrld_dir_path, onto_prefix, 
-                          base_output_dir, technical_dir_path)
+    
+    config_dict = {
+        'config_file_path': OWL_CONFIG_FILE_PATH,
+        'source_type': 'amr',
+        'source_corpus': amrld_dir_path,
+        'onto_prefix': onto_prefix,
+        'base_output_dir': base_output_dir,
+        'technical_dir_path': technical_dir_path
+    }
+
+    config = __set_config(config_dict)
+
+    # config = __set_config(OWL_CONFIG_FILE_PATH,
+#                           'amr', amrld_dir_path, onto_prefix, 
+#                           base_output_dir, technical_dir_path)
+    
     assert os.path.exists(amrld_dir_path), f'input directory does not exists ({amrld_dir_path})'
     __count_number_of_graph(config)
     
@@ -314,14 +386,14 @@ def create_ontology_from_amrld_dir_with_multiprocessing(amrld_dir_path,
     sentence_count = 0
     result_triple_list = []
 
-    result_triple_queue = multiprocessing.Queue()
+    #result_triple_queue = multiprocessing.Queue()
     
     sentence_file_list = glob.glob(sentence_dir, recursive = True)
     
     # The following is for multiprocessing logging (must be exec before the pool is created
     multiprocessing_logging.install_mp_handler()
     
-    config_dict = config.to_dict()
+    # config_dict = config.to_dict()
     #star_iterable = [(i, config) for i in range(len(sentence_file_list))]
     
     mapIterable = []
@@ -329,15 +401,17 @@ def create_ontology_from_amrld_dir_with_multiprocessing(amrld_dir_path,
     for i in range(len(sentence_file_list)):
         config_dict['sentence_list_indice'] = i
         mapIterable = mapIterable + [config_dict.copy()]
-    print(config_dict)
     
     with multiprocessing.Pool(processes) as p:
-        print (f'\n mapIterable: {mapIterable}')
-        triples = p.map(pool_function, mapIterable)
+        triplesLists = p.map(pool_function, mapIterable)
+    
+    result_triple_list = []
+    for tripleList in triplesLists :
+        result_triple_list = result_triple_list + tripleList
     
     # -- Final Ontology Generation (factoid_graph)
     logger.info('\n === Final Ontology Generation  === ') 
-    result_triple_list = dump_queue(result_triple_queue)
+    #result_triple_list = dump_queue(result_triple_queue)
     factoid_graph = __generate_final_ontology(result_triple_list)
     ontology_turtle_string = __serialize_factoid_graph(config, factoid_graph, out_file_path)
         
diff --git a/tenet/scheme/owl_amr_scheme_1.py b/tenet/scheme/owl_amr_scheme_1.py
index b0069466..50b6a7a1 100644
--- a/tenet/scheme/owl_amr_scheme_1.py
+++ b/tenet/scheme/owl_amr_scheme_1.py
@@ -151,3 +151,156 @@ scheme = {
     }
 
 
+#!/usr/bin/python3.10
+# -*-coding:Utf-8 -*
+
+#==============================================================================
+# TENET: Composition Transduction Scheme for AMR analysis
+#------------------------------------------------------------------------------
+# Composition Transduction Scheme (CTS) using CTR (rules) for analysis of AMR 
+# structure.
+#==============================================================================
+
+import scheme.amr_master_rule as rule
+
+#==============================================================================
+# Rule Directory
+#==============================================================================
+
+rule_dir = 'amr_master_rule/'
+
+
+#==============================================================================
+# Prefix using in CTR
+#==============================================================================
+
+prefix_list = [('owl', '<http://www.w3.org/2002/07/owl#>'),
+               ('odrl', '<http://www.w3.org/ns/odrl/2/>'),
+               ('cc', '<https://creativecommons.org/ns#>'),
+               ('rdf', '<http://www.w3.org/1999/02/22-rdf-syntax-ns#>'),
+               ('rdfs', '<http://www.w3.org/2000/01/rdf-schema#>'),
+               ('xsd', '<http://www.w3.org/2001/XMLSchema#>'),
+               ('amr', '<https://amr.tetras-libre.fr/rdf/schema#>'),
+               ('ns1', '<http://amr.isi.edu/frames/ld/v1.2.2/>'),
+               ('ns2', '<http://amr.isi.edu/rdf/amr-terms#>'),
+               ('ns3', '<http://amr.isi.edu/rdf/core-amr#>'),
+               ('ns4', '<http://amr.isi.edu/entity-types#>'),
+               ('net', '<https://tenet.tetras-libre.fr/semantic-net#>'),
+               ('cprm', '<https://tenet.tetras-libre.fr/config/parameters#>'),
+               ('fprm', '<https://tenet.tetras-libre.fr/frame/parameters#>'),
+               ('base-out', '<https://tenet.tetras-libre.fr/base-ontology#>'),
+               ('ext-out', '<https://tenet.tetras-libre.fr/extract-result#>')]
+
+
+#==============================================================================
+# Sequences
+#==============================================================================
+
+# ---------------------------------------------
+# Preprocessing Sequence(s)
+# ---------------------------------------------
+
+amr_bug_fixing_sequence = ['Bug fixing for some known anomalies of AMR-LD data',
+                           rule.fix_amr_bug_1
+                           ]
+
+amr_reification_sequence = ['AMR reification from AMR-Linked-Data to AMR (tenet) structure',
+                            rule.reclassify_concept_1,
+                            rule.reclassify_concept_2,
+                            rule.reclassify_concept_3,
+                            rule.reclassify_concept_4,
+                            rule.reclassify_concept_5,
+                            rule.reify_roles_as_concept,
+                            rule.reclassify_existing_variable,
+                            rule.add_new_variable_for_reified_concept,
+                            rule.add_amr_leaf_for_reclassified_concept,
+                            rule.add_amr_leaf_for_reified_concept,
+                            rule.add_amr_edge_for_core_relation,
+                            rule.add_amr_edge_for_reified_concept,
+                            rule.add_amr_edge_for_name_relation,
+                            rule.add_amr_edge_for_quant_relation,
+                            rule.add_amr_edge_for_polarity_relation,
+                            rule.update_amr_edge_role_1,
+                            rule.add_amr_root
+                            ]
+
+# ---------------------------------------------
+# Transduction Sequences
+# ---------------------------------------------
+
+atomic_extraction_sequence = ['atomic extraction sequence',
+                              rule.extract_atom_class,
+                              rule.extract_atom_individual,
+                              rule.extract_atom_property,
+                              rule.extract_atom_value,
+                              rule.extract_atom_phenomena,
+                              rule.propagate_atom_relation]
+
+classification_sequence_1 = ['classification sequence (1)',
+                             rule.classify_modality_phenomena,
+                             rule.reclassify_argument_property_to_class
+                             ]
+
+phenomena_analyze_sequence_1 = ['phenomena analyze sequence (1)',
+                                rule.analyze_phenomena_polarity_1,
+                                rule.analyze_phenomena_polarity_2,
+                                rule.analyze_phenomena_polarity_3,
+                                rule.analyze_phenomena_polarity_4,
+                                rule.analyze_phenomena_polarity_5,
+                                rule.analyze_phenomena_mod_1,
+                                rule.classify_modality_phenomena
+                                ]
+
+phenomena_analyze_sequence_2 = ['phenomena analyze sequence (2)',
+                                rule.analyze_phenomena_or_1,
+                                rule.analyze_phenomena_or_2,
+                                rule.analyze_phenomena_and_1,
+                                rule.analyze_phenomena_and_2]
+
+composite_class_extraction_sequence = ['composite class extraction sequence',
+                                       rule.extract_composite_class_1,
+                                       rule.extract_composite_class_2]
+
+classification_sequence_2 = ['classification sequence (2)',
+                             rule.classify_entity_from_core_arguments,
+                             rule.classify_entity_from_part_relation,
+                             rule.classify_entity_from_degree_arguments,
+                             rule.classify_mother_from_domain_relation,
+                             rule.propagate_individual_1,
+                             rule.propagate_individual_2
+                             ]
+
+
+# # ---------------------------------------------
+# # OWL Generation
+# # ---------------------------------------------
+
+owl_generation_sequence = ['OWL Generation Sequence',
+                           rule.generate_owl_class,
+                           rule.generate_owl_property,
+                           rule.generate_owl_individual
+                           ]
+
+
+
+#==============================================================================
+# Transduction Scheme
+#==============================================================================
+
+scheme = {
+    
+    'Preprocessing': [amr_bug_fixing_sequence,
+                      amr_reification_sequence],
+    
+    'Transduction': [atomic_extraction_sequence,
+                     classification_sequence_1,
+                     phenomena_analyze_sequence_1,
+                     phenomena_analyze_sequence_2,
+                     composite_class_extraction_sequence,
+                     classification_sequence_2],
+    
+    'Generation': [owl_generation_sequence]
+    
+    }
+
+
-- 
GitLab