diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..950f50f256db44189a66537501d054cbdb6f91f5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +*.pyc +*__pycache__* +*.todo +devtemp*.py +.project +*.ttl.tbc diff --git a/asd/__pycache__/doc.cpython-310.pyc b/asd/__pycache__/doc.cpython-310.pyc deleted file mode 100644 index 85a8c8992eda54bf56db9080f47bf63f2375f1ec..0000000000000000000000000000000000000000 Binary files a/asd/__pycache__/doc.cpython-310.pyc and /dev/null differ diff --git a/grammar/doc/__pycache__/docLexer.cpython-310.pyc b/grammar/doc/__pycache__/docLexer.cpython-310.pyc deleted file mode 100644 index 997817b3aabec262a7798f35c9b0ab317b3e40f1..0000000000000000000000000000000000000000 Binary files a/grammar/doc/__pycache__/docLexer.cpython-310.pyc and /dev/null differ diff --git a/grammar/doc/__pycache__/docParser.cpython-310.pyc b/grammar/doc/__pycache__/docParser.cpython-310.pyc deleted file mode 100644 index 6356f2ab138d2e5b8efa078c6b549fa9e9274a89..0000000000000000000000000000000000000000 Binary files a/grammar/doc/__pycache__/docParser.cpython-310.pyc and /dev/null differ diff --git a/grammar/org/org.interp b/grammar/org/org.interp new file mode 100644 index 0000000000000000000000000000000000000000..cd5d6304b0eadd4658c765126d5b28aa781cea56 --- /dev/null +++ b/grammar/org/org.interp @@ -0,0 +1,16 @@ +token literal names: +null +null +null + +token symbolic names: +null +WS +ORG + +rule names: +orgPart + + +atn: +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 4, 7, 4, 2, 9, 2, 3, 2, 3, 2, 3, 2, 2, 2, 3, 2, 2, 2, 2, 5, 2, 4, 3, 2, 2, 2, 4, 5, 7, 4, 2, 2, 5, 3, 3, 2, 2, 2, 2] \ No newline at end of file diff --git a/grammar/org/org.tokens b/grammar/org/org.tokens new file mode 100644 index 0000000000000000000000000000000000000000..42c64256cb6ccb35d39ab262e0724c20ee3f64a0 --- /dev/null +++ b/grammar/org/org.tokens @@ -0,0 +1,2 @@ +WS=1 +ORG=2 diff --git a/grammar/org/orgLexer.interp b/grammar/org/orgLexer.interp new file mode 100644 index 0000000000000000000000000000000000000000..56d77e2efa3809c0f51926b5ba6ccb022b44850c --- /dev/null +++ b/grammar/org/orgLexer.interp @@ -0,0 +1,23 @@ +token literal names: +null +null +null + +token symbolic names: +null +WS +ORG + +rule names: +WS +ORG + +channel names: +DEFAULT_TOKEN_CHANNEL +HIDDEN + +mode names: +DEFAULT_MODE + +atn: +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 4, 36, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 3, 2, 6, 2, 9, 10, 2, 13, 2, 14, 2, 10, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 25, 10, 3, 12, 3, 14, 3, 28, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 4, 3, 3, 5, 4, 3, 2, 3, 5, 2, 11, 12, 14, 15, 34, 34, 2, 37, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 3, 8, 3, 2, 2, 2, 5, 14, 3, 2, 2, 2, 7, 9, 9, 2, 2, 2, 8, 7, 3, 2, 2, 2, 9, 10, 3, 2, 2, 2, 10, 8, 3, 2, 2, 2, 10, 11, 3, 2, 2, 2, 11, 12, 3, 2, 2, 2, 12, 13, 8, 2, 2, 2, 13, 4, 3, 2, 2, 2, 14, 15, 7, 125, 2, 2, 15, 16, 7, 113, 2, 2, 16, 17, 7, 116, 2, 2, 17, 18, 7, 105, 2, 2, 18, 19, 7, 60, 2, 2, 19, 20, 7, 103, 2, 2, 20, 21, 7, 112, 2, 2, 21, 22, 7, 127, 2, 2, 22, 26, 3, 2, 2, 2, 23, 25, 11, 2, 2, 2, 24, 23, 3, 2, 2, 2, 25, 28, 3, 2, 2, 2, 26, 24, 3, 2, 2, 2, 26, 27, 3, 2, 2, 2, 27, 29, 3, 2, 2, 2, 28, 26, 3, 2, 2, 2, 29, 30, 7, 125, 2, 2, 30, 31, 7, 49, 2, 2, 31, 32, 7, 113, 2, 2, 32, 33, 7, 116, 2, 2, 33, 34, 7, 105, 2, 2, 34, 35, 7, 127, 2, 2, 35, 6, 3, 2, 2, 2, 5, 2, 10, 26, 3, 8, 2, 2] \ No newline at end of file diff --git a/grammar/org/orgLexer.py b/grammar/org/orgLexer.py new file mode 100644 index 0000000000000000000000000000000000000000..8793cafbb1d48713a4d5f60b34f3ac300f665801 --- /dev/null +++ b/grammar/org/orgLexer.py @@ -0,0 +1,61 @@ +# Generated from grammar/org/org.g4 by ANTLR 4.9.3 +from antlr4 import * +from io import StringIO +import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\4") + buf.write("$\b\1\4\2\t\2\4\3\t\3\3\2\6\2\t\n\2\r\2\16\2\n\3\2\3\2") + buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\7\3\31\n\3\f") + buf.write("\3\16\3\34\13\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\2\2\4\3\3") + buf.write("\5\4\3\2\3\5\2\13\f\16\17\"\"\2%\2\3\3\2\2\2\2\5\3\2\2") + buf.write("\2\3\b\3\2\2\2\5\16\3\2\2\2\7\t\t\2\2\2\b\7\3\2\2\2\t") + buf.write("\n\3\2\2\2\n\b\3\2\2\2\n\13\3\2\2\2\13\f\3\2\2\2\f\r\b") + buf.write("\2\2\2\r\4\3\2\2\2\16\17\7}\2\2\17\20\7q\2\2\20\21\7t") + buf.write("\2\2\21\22\7i\2\2\22\23\7<\2\2\23\24\7g\2\2\24\25\7p\2") + buf.write("\2\25\26\7\177\2\2\26\32\3\2\2\2\27\31\13\2\2\2\30\27") + buf.write("\3\2\2\2\31\34\3\2\2\2\32\30\3\2\2\2\32\33\3\2\2\2\33") + buf.write("\35\3\2\2\2\34\32\3\2\2\2\35\36\7}\2\2\36\37\7\61\2\2") + buf.write("\37 \7q\2\2 !\7t\2\2!\"\7i\2\2\"#\7\177\2\2#\6\3\2\2\2") + buf.write("\5\2\n\32\3\b\2\2") + return buf.getvalue() + + +class orgLexer(Lexer): + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + WS = 1 + ORG = 2 + + channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] + + modeNames = [ "DEFAULT_MODE" ] + + literalNames = [ "<INVALID>", + ] + + symbolicNames = [ "<INVALID>", + "WS", "ORG" ] + + ruleNames = [ "WS", "ORG" ] + + grammarFileName = "org.g4" + + def __init__(self, input=None, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.9.3") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None + + diff --git a/grammar/org/orgLexer.tokens b/grammar/org/orgLexer.tokens new file mode 100644 index 0000000000000000000000000000000000000000..42c64256cb6ccb35d39ab262e0724c20ee3f64a0 --- /dev/null +++ b/grammar/org/orgLexer.tokens @@ -0,0 +1,2 @@ +WS=1 +ORG=2 diff --git a/grammar/org/orgListener.py b/grammar/org/orgListener.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1a034c750201a73ed2e2a1599cc377bb4e18b4 --- /dev/null +++ b/grammar/org/orgListener.py @@ -0,0 +1,21 @@ +# Generated from grammar/org/org.g4 by ANTLR 4.9.3 +from antlr4 import * +if __name__ is not None and "." in __name__: + from .orgParser import orgParser +else: + from orgParser import orgParser + +# This class defines a complete listener for a parse tree produced by orgParser. +class orgListener(ParseTreeListener): + + # Enter a parse tree produced by orgParser#orgPart. + def enterOrgPart(self, ctx:orgParser.OrgPartContext): + pass + + # Exit a parse tree produced by orgParser#orgPart. + def exitOrgPart(self, ctx:orgParser.OrgPartContext): + pass + + + +del orgParser \ No newline at end of file diff --git a/grammar/org/orgParser.py b/grammar/org/orgParser.py new file mode 100644 index 0000000000000000000000000000000000000000..68db601b81e1481c25bb8ef26f4ccbec18b703b4 --- /dev/null +++ b/grammar/org/orgParser.py @@ -0,0 +1,94 @@ +# Generated from grammar/org/org.g4 by ANTLR 4.9.3 +# encoding: utf-8 +from antlr4 import * +from io import StringIO +import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\4") + buf.write("\7\4\2\t\2\3\2\3\2\3\2\2\2\3\2\2\2\2\5\2\4\3\2\2\2\4\5") + buf.write("\7\4\2\2\5\3\3\2\2\2\2") + return buf.getvalue() + + +class orgParser ( Parser ): + + grammarFileName = "org.g4" + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + sharedContextCache = PredictionContextCache() + + literalNames = [ ] + + symbolicNames = [ "<INVALID>", "WS", "ORG" ] + + RULE_orgPart = 0 + + ruleNames = [ "orgPart" ] + + EOF = Token.EOF + WS=1 + ORG=2 + + def __init__(self, input:TokenStream, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.9.3") + self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) + self._predicates = None + + + + + class OrgPartContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ORG(self): + return self.getToken(orgParser.ORG, 0) + + def getRuleIndex(self): + return orgParser.RULE_orgPart + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterOrgPart" ): + listener.enterOrgPart(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitOrgPart" ): + listener.exitOrgPart(self) + + + + + def orgPart(self): + + localctx = orgParser.OrgPartContext(self, self._ctx, self.state) + self.enterRule(localctx, 0, self.RULE_orgPart) + try: + self.enterOuterAlt(localctx, 1) + self.state = 2 + self.match(orgParser.ORG) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + + + diff --git a/grammar/unl/unl.g4 b/grammar/unl/unl.g4 index dac12c8a9c08bdbc1410f868779c2ad89b123f1d..925fb6749d737c59c4266951878a0ee1c3b42154 100644 --- a/grammar/unl/unl.g4 +++ b/grammar/unl/unl.g4 @@ -14,9 +14,56 @@ grammar unl; //--------------------------------------------------------- unlPart - : UNL + : '{unl}' (relationOccurrence)+ '{/unl}' ; +relationOccurrence + : universalRelation LP universalWord COMMA universalWord RP + ; + +universalWord + : headword + (LP restriction (COMMA restriction)* RP)? + (attribute)* + | value + ; + +headword + : ident + ; + +restriction + : universalRelation GREATER ident + ; + +attribute + : DOT AT ident + ; + +value + : VALUE + ; + +universalRelation + : ( AND | AOJ | BEN | CNT | + EQU | ICL | OBJ | QUA ) + ; + + +//--------------------------------------------------------- +// Base Element +//--------------------------------------------------------- + +sentence : (word | punctuation | bracket)* ; + +ident : word (UNDERSCORE word)* ; + +word : LETTER | WORD ; + +punctuation : DOT | COMMA | SEMCOL | COLON | DASH ; + +bracket : LP | RP | LC | RC ; + //============================================================================= // Lexer Grammar @@ -25,6 +72,43 @@ unlPart // ignore whitespaces WS : (' '|'\n'|'\t'|'\r'|'\u000C')+ -> skip ; +// fragments +fragment LOWERCASE : [a-z] ; +fragment UPPERCASE : [A-Z] ; +fragment DIGIT : '0'..'9' ; +fragment ASCII : ~('\n'|'"'|'<'|'>'|'('|')') ; + +// punctuation +DOT : '.' ; +COMMA : ',' ; +SEMCOL : ';' ; +COLON : ':' ; +DASH : '-' ; + +// brackets +LP : '(' ; // Left parenthesis +RP : ')' ; +LC : '{' ; // Left curly bracket +RC : '}' ; + +// symbols +LESS : '<' ; +GREATER : '>' ; +AT : '@' ; +UNDERSCORE : '_' ; + +// relations +AND : 'and' ; +AOJ : 'aoj' ; +BEN : 'ben' ; +CNT : 'cnt' ; +EQU : 'equ' ; +ICL : 'icl' ; +OBJ : 'obj' ; +QUA : 'qua' ; + // other tokens -ORG : '{org:en}' (.)* '{/org}' ; -UNL : '{unl}' (.)* '{/unl}' ; +LETTER : LOWERCASE | UPPERCASE ; +WORD : (LETTER)+ ; +VALUE : (DIGIT)+ (DOT (DIGIT)+)? ; + diff --git a/grammar/unl/unl.interp b/grammar/unl/unl.interp new file mode 100644 index 0000000000000000000000000000000000000000..525ed69941a5b9c77d996dee48d06fbdda14e0af --- /dev/null +++ b/grammar/unl/unl.interp @@ -0,0 +1,78 @@ +token literal names: +null +'{unl}' +'{/unl}' +null +'.' +',' +';' +':' +'-' +'(' +')' +'{' +'}' +'<' +'>' +'@' +'_' +'and' +'aoj' +'ben' +'cnt' +'equ' +'icl' +'obj' +'qua' +null +null +null + +token symbolic names: +null +null +null +WS +DOT +COMMA +SEMCOL +COLON +DASH +LP +RP +LC +RC +LESS +GREATER +AT +UNDERSCORE +AND +AOJ +BEN +CNT +EQU +ICL +OBJ +QUA +LETTER +WORD +VALUE + +rule names: +unlPart +relationOccurrence +universalWord +headword +restriction +attribute +value +universalRelation +sentence +ident +word +punctuation +bracket + + +atn: +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 29, 103, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 3, 2, 3, 2, 6, 2, 31, 10, 2, 13, 2, 14, 2, 32, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 7, 4, 49, 10, 4, 12, 4, 14, 4, 52, 11, 4, 3, 4, 3, 4, 5, 4, 56, 10, 4, 3, 4, 7, 4, 59, 10, 4, 12, 4, 14, 4, 62, 11, 4, 3, 4, 5, 4, 65, 10, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 7, 10, 84, 10, 10, 12, 10, 14, 10, 87, 11, 10, 3, 11, 3, 11, 3, 11, 7, 11, 92, 10, 11, 12, 11, 14, 11, 95, 11, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 14, 2, 2, 15, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 2, 6, 3, 2, 19, 26, 3, 2, 27, 28, 3, 2, 6, 10, 3, 2, 11, 14, 2, 98, 2, 28, 3, 2, 2, 2, 4, 36, 3, 2, 2, 2, 6, 64, 3, 2, 2, 2, 8, 66, 3, 2, 2, 2, 10, 68, 3, 2, 2, 2, 12, 72, 3, 2, 2, 2, 14, 76, 3, 2, 2, 2, 16, 78, 3, 2, 2, 2, 18, 85, 3, 2, 2, 2, 20, 88, 3, 2, 2, 2, 22, 96, 3, 2, 2, 2, 24, 98, 3, 2, 2, 2, 26, 100, 3, 2, 2, 2, 28, 30, 7, 3, 2, 2, 29, 31, 5, 4, 3, 2, 30, 29, 3, 2, 2, 2, 31, 32, 3, 2, 2, 2, 32, 30, 3, 2, 2, 2, 32, 33, 3, 2, 2, 2, 33, 34, 3, 2, 2, 2, 34, 35, 7, 4, 2, 2, 35, 3, 3, 2, 2, 2, 36, 37, 5, 16, 9, 2, 37, 38, 7, 11, 2, 2, 38, 39, 5, 6, 4, 2, 39, 40, 7, 7, 2, 2, 40, 41, 5, 6, 4, 2, 41, 42, 7, 12, 2, 2, 42, 5, 3, 2, 2, 2, 43, 55, 5, 8, 5, 2, 44, 45, 7, 11, 2, 2, 45, 50, 5, 10, 6, 2, 46, 47, 7, 7, 2, 2, 47, 49, 5, 10, 6, 2, 48, 46, 3, 2, 2, 2, 49, 52, 3, 2, 2, 2, 50, 48, 3, 2, 2, 2, 50, 51, 3, 2, 2, 2, 51, 53, 3, 2, 2, 2, 52, 50, 3, 2, 2, 2, 53, 54, 7, 12, 2, 2, 54, 56, 3, 2, 2, 2, 55, 44, 3, 2, 2, 2, 55, 56, 3, 2, 2, 2, 56, 60, 3, 2, 2, 2, 57, 59, 5, 12, 7, 2, 58, 57, 3, 2, 2, 2, 59, 62, 3, 2, 2, 2, 60, 58, 3, 2, 2, 2, 60, 61, 3, 2, 2, 2, 61, 65, 3, 2, 2, 2, 62, 60, 3, 2, 2, 2, 63, 65, 5, 14, 8, 2, 64, 43, 3, 2, 2, 2, 64, 63, 3, 2, 2, 2, 65, 7, 3, 2, 2, 2, 66, 67, 5, 20, 11, 2, 67, 9, 3, 2, 2, 2, 68, 69, 5, 16, 9, 2, 69, 70, 7, 16, 2, 2, 70, 71, 5, 20, 11, 2, 71, 11, 3, 2, 2, 2, 72, 73, 7, 6, 2, 2, 73, 74, 7, 17, 2, 2, 74, 75, 5, 20, 11, 2, 75, 13, 3, 2, 2, 2, 76, 77, 7, 29, 2, 2, 77, 15, 3, 2, 2, 2, 78, 79, 9, 2, 2, 2, 79, 17, 3, 2, 2, 2, 80, 84, 5, 22, 12, 2, 81, 84, 5, 24, 13, 2, 82, 84, 5, 26, 14, 2, 83, 80, 3, 2, 2, 2, 83, 81, 3, 2, 2, 2, 83, 82, 3, 2, 2, 2, 84, 87, 3, 2, 2, 2, 85, 83, 3, 2, 2, 2, 85, 86, 3, 2, 2, 2, 86, 19, 3, 2, 2, 2, 87, 85, 3, 2, 2, 2, 88, 93, 5, 22, 12, 2, 89, 90, 7, 18, 2, 2, 90, 92, 5, 22, 12, 2, 91, 89, 3, 2, 2, 2, 92, 95, 3, 2, 2, 2, 93, 91, 3, 2, 2, 2, 93, 94, 3, 2, 2, 2, 94, 21, 3, 2, 2, 2, 95, 93, 3, 2, 2, 2, 96, 97, 9, 3, 2, 2, 97, 23, 3, 2, 2, 2, 98, 99, 9, 4, 2, 2, 99, 25, 3, 2, 2, 2, 100, 101, 9, 5, 2, 2, 101, 27, 3, 2, 2, 2, 10, 32, 50, 55, 60, 64, 83, 85, 93] \ No newline at end of file diff --git a/grammar/unl/unl.tokens b/grammar/unl/unl.tokens new file mode 100644 index 0000000000000000000000000000000000000000..c2b43c1801adaa8e5215d0b68aafc37b4ab15038 --- /dev/null +++ b/grammar/unl/unl.tokens @@ -0,0 +1,50 @@ +T__0=1 +T__1=2 +WS=3 +DOT=4 +COMMA=5 +SEMCOL=6 +COLON=7 +DASH=8 +LP=9 +RP=10 +LC=11 +RC=12 +LESS=13 +GREATER=14 +AT=15 +UNDERSCORE=16 +AND=17 +AOJ=18 +BEN=19 +CNT=20 +EQU=21 +ICL=22 +OBJ=23 +QUA=24 +LETTER=25 +WORD=26 +VALUE=27 +'{unl}'=1 +'{/unl}'=2 +'.'=4 +','=5 +';'=6 +':'=7 +'-'=8 +'('=9 +')'=10 +'{'=11 +'}'=12 +'<'=13 +'>'=14 +'@'=15 +'_'=16 +'and'=17 +'aoj'=18 +'ben'=19 +'cnt'=20 +'equ'=21 +'icl'=22 +'obj'=23 +'qua'=24 diff --git a/grammar/unl/unlLexer.interp b/grammar/unl/unlLexer.interp new file mode 100644 index 0000000000000000000000000000000000000000..bab2ee3b8c3f970b15d73ab6b9b0c5103b8dafcd --- /dev/null +++ b/grammar/unl/unlLexer.interp @@ -0,0 +1,102 @@ +token literal names: +null +'{unl}' +'{/unl}' +null +'.' +',' +';' +':' +'-' +'(' +')' +'{' +'}' +'<' +'>' +'@' +'_' +'and' +'aoj' +'ben' +'cnt' +'equ' +'icl' +'obj' +'qua' +null +null +null + +token symbolic names: +null +null +null +WS +DOT +COMMA +SEMCOL +COLON +DASH +LP +RP +LC +RC +LESS +GREATER +AT +UNDERSCORE +AND +AOJ +BEN +CNT +EQU +ICL +OBJ +QUA +LETTER +WORD +VALUE + +rule names: +T__0 +T__1 +WS +LOWERCASE +UPPERCASE +DIGIT +ASCII +DOT +COMMA +SEMCOL +COLON +DASH +LP +RP +LC +RC +LESS +GREATER +AT +UNDERSCORE +AND +AOJ +BEN +CNT +EQU +ICL +OBJ +QUA +LETTER +WORD +VALUE + +channel names: +DEFAULT_TOKEN_CHANNEL +HIDDEN + +mode names: +DEFAULT_MODE + +atn: +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 29, 173, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 6, 4, 80, 10, 4, 13, 4, 14, 4, 81, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 17, 3, 17, 3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 5, 30, 154, 10, 30, 3, 31, 6, 31, 157, 10, 31, 13, 31, 14, 31, 158, 3, 32, 6, 32, 162, 10, 32, 13, 32, 14, 32, 163, 3, 32, 3, 32, 6, 32, 168, 10, 32, 13, 32, 14, 32, 169, 5, 32, 172, 10, 32, 2, 2, 33, 3, 3, 5, 4, 7, 5, 9, 2, 11, 2, 13, 2, 15, 2, 17, 6, 19, 7, 21, 8, 23, 9, 25, 10, 27, 11, 29, 12, 31, 13, 33, 14, 35, 15, 37, 16, 39, 17, 41, 18, 43, 19, 45, 20, 47, 21, 49, 22, 51, 23, 53, 24, 55, 25, 57, 26, 59, 27, 61, 28, 63, 29, 3, 2, 6, 5, 2, 11, 12, 14, 15, 34, 34, 3, 2, 99, 124, 3, 2, 67, 92, 7, 2, 12, 12, 36, 36, 42, 43, 62, 62, 64, 64, 2, 174, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 3, 65, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 79, 3, 2, 2, 2, 9, 85, 3, 2, 2, 2, 11, 87, 3, 2, 2, 2, 13, 89, 3, 2, 2, 2, 15, 91, 3, 2, 2, 2, 17, 93, 3, 2, 2, 2, 19, 95, 3, 2, 2, 2, 21, 97, 3, 2, 2, 2, 23, 99, 3, 2, 2, 2, 25, 101, 3, 2, 2, 2, 27, 103, 3, 2, 2, 2, 29, 105, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 109, 3, 2, 2, 2, 35, 111, 3, 2, 2, 2, 37, 113, 3, 2, 2, 2, 39, 115, 3, 2, 2, 2, 41, 117, 3, 2, 2, 2, 43, 119, 3, 2, 2, 2, 45, 123, 3, 2, 2, 2, 47, 127, 3, 2, 2, 2, 49, 131, 3, 2, 2, 2, 51, 135, 3, 2, 2, 2, 53, 139, 3, 2, 2, 2, 55, 143, 3, 2, 2, 2, 57, 147, 3, 2, 2, 2, 59, 153, 3, 2, 2, 2, 61, 156, 3, 2, 2, 2, 63, 161, 3, 2, 2, 2, 65, 66, 7, 125, 2, 2, 66, 67, 7, 119, 2, 2, 67, 68, 7, 112, 2, 2, 68, 69, 7, 110, 2, 2, 69, 70, 7, 127, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 125, 2, 2, 72, 73, 7, 49, 2, 2, 73, 74, 7, 119, 2, 2, 74, 75, 7, 112, 2, 2, 75, 76, 7, 110, 2, 2, 76, 77, 7, 127, 2, 2, 77, 6, 3, 2, 2, 2, 78, 80, 9, 2, 2, 2, 79, 78, 3, 2, 2, 2, 80, 81, 3, 2, 2, 2, 81, 79, 3, 2, 2, 2, 81, 82, 3, 2, 2, 2, 82, 83, 3, 2, 2, 2, 83, 84, 8, 4, 2, 2, 84, 8, 3, 2, 2, 2, 85, 86, 9, 3, 2, 2, 86, 10, 3, 2, 2, 2, 87, 88, 9, 4, 2, 2, 88, 12, 3, 2, 2, 2, 89, 90, 4, 50, 59, 2, 90, 14, 3, 2, 2, 2, 91, 92, 10, 5, 2, 2, 92, 16, 3, 2, 2, 2, 93, 94, 7, 48, 2, 2, 94, 18, 3, 2, 2, 2, 95, 96, 7, 46, 2, 2, 96, 20, 3, 2, 2, 2, 97, 98, 7, 61, 2, 2, 98, 22, 3, 2, 2, 2, 99, 100, 7, 60, 2, 2, 100, 24, 3, 2, 2, 2, 101, 102, 7, 47, 2, 2, 102, 26, 3, 2, 2, 2, 103, 104, 7, 42, 2, 2, 104, 28, 3, 2, 2, 2, 105, 106, 7, 43, 2, 2, 106, 30, 3, 2, 2, 2, 107, 108, 7, 125, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 127, 2, 2, 110, 34, 3, 2, 2, 2, 111, 112, 7, 62, 2, 2, 112, 36, 3, 2, 2, 2, 113, 114, 7, 64, 2, 2, 114, 38, 3, 2, 2, 2, 115, 116, 7, 66, 2, 2, 116, 40, 3, 2, 2, 2, 117, 118, 7, 97, 2, 2, 118, 42, 3, 2, 2, 2, 119, 120, 7, 99, 2, 2, 120, 121, 7, 112, 2, 2, 121, 122, 7, 102, 2, 2, 122, 44, 3, 2, 2, 2, 123, 124, 7, 99, 2, 2, 124, 125, 7, 113, 2, 2, 125, 126, 7, 108, 2, 2, 126, 46, 3, 2, 2, 2, 127, 128, 7, 100, 2, 2, 128, 129, 7, 103, 2, 2, 129, 130, 7, 112, 2, 2, 130, 48, 3, 2, 2, 2, 131, 132, 7, 101, 2, 2, 132, 133, 7, 112, 2, 2, 133, 134, 7, 118, 2, 2, 134, 50, 3, 2, 2, 2, 135, 136, 7, 103, 2, 2, 136, 137, 7, 115, 2, 2, 137, 138, 7, 119, 2, 2, 138, 52, 3, 2, 2, 2, 139, 140, 7, 107, 2, 2, 140, 141, 7, 101, 2, 2, 141, 142, 7, 110, 2, 2, 142, 54, 3, 2, 2, 2, 143, 144, 7, 113, 2, 2, 144, 145, 7, 100, 2, 2, 145, 146, 7, 108, 2, 2, 146, 56, 3, 2, 2, 2, 147, 148, 7, 115, 2, 2, 148, 149, 7, 119, 2, 2, 149, 150, 7, 99, 2, 2, 150, 58, 3, 2, 2, 2, 151, 154, 5, 9, 5, 2, 152, 154, 5, 11, 6, 2, 153, 151, 3, 2, 2, 2, 153, 152, 3, 2, 2, 2, 154, 60, 3, 2, 2, 2, 155, 157, 5, 59, 30, 2, 156, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 156, 3, 2, 2, 2, 158, 159, 3, 2, 2, 2, 159, 62, 3, 2, 2, 2, 160, 162, 5, 13, 7, 2, 161, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 161, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, 171, 3, 2, 2, 2, 165, 167, 5, 17, 9, 2, 166, 168, 5, 13, 7, 2, 167, 166, 3, 2, 2, 2, 168, 169, 3, 2, 2, 2, 169, 167, 3, 2, 2, 2, 169, 170, 3, 2, 2, 2, 170, 172, 3, 2, 2, 2, 171, 165, 3, 2, 2, 2, 171, 172, 3, 2, 2, 2, 172, 64, 3, 2, 2, 2, 9, 2, 81, 153, 158, 163, 169, 171, 3, 8, 2, 2] \ No newline at end of file diff --git a/grammar/unl/unlLexer.py b/grammar/unl/unlLexer.py new file mode 100644 index 0000000000000000000000000000000000000000..58e84e73c355631f444b54ba9dcd1ef8066116d2 --- /dev/null +++ b/grammar/unl/unlLexer.py @@ -0,0 +1,145 @@ +# Generated from grammar/unl/unl.g4 by ANTLR 4.9.3 +from antlr4 import * +from io import StringIO +import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\35") + buf.write("\u00ad\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7") + buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r") + buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23") + buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30") + buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36") + buf.write("\t\36\4\37\t\37\4 \t \3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3") + buf.write("\3\3\3\3\3\3\3\3\3\3\3\4\6\4P\n\4\r\4\16\4Q\3\4\3\4\3") + buf.write("\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3") + buf.write("\13\3\f\3\f\3\r\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\21") + buf.write("\3\21\3\22\3\22\3\23\3\23\3\24\3\24\3\25\3\25\3\26\3\26") + buf.write("\3\26\3\26\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\31") + buf.write("\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33") + buf.write("\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\36\3\36\5\36") + buf.write("\u009a\n\36\3\37\6\37\u009d\n\37\r\37\16\37\u009e\3 \6") + buf.write(" \u00a2\n \r \16 \u00a3\3 \3 \6 \u00a8\n \r \16 \u00a9") + buf.write("\5 \u00ac\n \2\2!\3\3\5\4\7\5\t\2\13\2\r\2\17\2\21\6\23") + buf.write("\7\25\b\27\t\31\n\33\13\35\f\37\r!\16#\17%\20\'\21)\22") + buf.write("+\23-\24/\25\61\26\63\27\65\30\67\319\32;\33=\34?\35\3") + buf.write("\2\6\5\2\13\f\16\17\"\"\3\2c|\3\2C\\\7\2\f\f$$*+>>@@\2") + buf.write("\u00ae\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\21\3\2\2") + buf.write("\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2") + buf.write("\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#") + buf.write("\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2") + buf.write("\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65") + buf.write("\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2") + buf.write("\2?\3\2\2\2\3A\3\2\2\2\5G\3\2\2\2\7O\3\2\2\2\tU\3\2\2") + buf.write("\2\13W\3\2\2\2\rY\3\2\2\2\17[\3\2\2\2\21]\3\2\2\2\23_") + buf.write("\3\2\2\2\25a\3\2\2\2\27c\3\2\2\2\31e\3\2\2\2\33g\3\2\2") + buf.write("\2\35i\3\2\2\2\37k\3\2\2\2!m\3\2\2\2#o\3\2\2\2%q\3\2\2") + buf.write("\2\'s\3\2\2\2)u\3\2\2\2+w\3\2\2\2-{\3\2\2\2/\177\3\2\2") + buf.write("\2\61\u0083\3\2\2\2\63\u0087\3\2\2\2\65\u008b\3\2\2\2") + buf.write("\67\u008f\3\2\2\29\u0093\3\2\2\2;\u0099\3\2\2\2=\u009c") + buf.write("\3\2\2\2?\u00a1\3\2\2\2AB\7}\2\2BC\7w\2\2CD\7p\2\2DE\7") + buf.write("n\2\2EF\7\177\2\2F\4\3\2\2\2GH\7}\2\2HI\7\61\2\2IJ\7w") + buf.write("\2\2JK\7p\2\2KL\7n\2\2LM\7\177\2\2M\6\3\2\2\2NP\t\2\2") + buf.write("\2ON\3\2\2\2PQ\3\2\2\2QO\3\2\2\2QR\3\2\2\2RS\3\2\2\2S") + buf.write("T\b\4\2\2T\b\3\2\2\2UV\t\3\2\2V\n\3\2\2\2WX\t\4\2\2X\f") + buf.write("\3\2\2\2YZ\4\62;\2Z\16\3\2\2\2[\\\n\5\2\2\\\20\3\2\2\2") + buf.write("]^\7\60\2\2^\22\3\2\2\2_`\7.\2\2`\24\3\2\2\2ab\7=\2\2") + buf.write("b\26\3\2\2\2cd\7<\2\2d\30\3\2\2\2ef\7/\2\2f\32\3\2\2\2") + buf.write("gh\7*\2\2h\34\3\2\2\2ij\7+\2\2j\36\3\2\2\2kl\7}\2\2l ") + buf.write("\3\2\2\2mn\7\177\2\2n\"\3\2\2\2op\7>\2\2p$\3\2\2\2qr\7") + buf.write("@\2\2r&\3\2\2\2st\7B\2\2t(\3\2\2\2uv\7a\2\2v*\3\2\2\2") + buf.write("wx\7c\2\2xy\7p\2\2yz\7f\2\2z,\3\2\2\2{|\7c\2\2|}\7q\2") + buf.write("\2}~\7l\2\2~.\3\2\2\2\177\u0080\7d\2\2\u0080\u0081\7g") + buf.write("\2\2\u0081\u0082\7p\2\2\u0082\60\3\2\2\2\u0083\u0084\7") + buf.write("e\2\2\u0084\u0085\7p\2\2\u0085\u0086\7v\2\2\u0086\62\3") + buf.write("\2\2\2\u0087\u0088\7g\2\2\u0088\u0089\7s\2\2\u0089\u008a") + buf.write("\7w\2\2\u008a\64\3\2\2\2\u008b\u008c\7k\2\2\u008c\u008d") + buf.write("\7e\2\2\u008d\u008e\7n\2\2\u008e\66\3\2\2\2\u008f\u0090") + buf.write("\7q\2\2\u0090\u0091\7d\2\2\u0091\u0092\7l\2\2\u00928\3") + buf.write("\2\2\2\u0093\u0094\7s\2\2\u0094\u0095\7w\2\2\u0095\u0096") + buf.write("\7c\2\2\u0096:\3\2\2\2\u0097\u009a\5\t\5\2\u0098\u009a") + buf.write("\5\13\6\2\u0099\u0097\3\2\2\2\u0099\u0098\3\2\2\2\u009a") + buf.write("<\3\2\2\2\u009b\u009d\5;\36\2\u009c\u009b\3\2\2\2\u009d") + buf.write("\u009e\3\2\2\2\u009e\u009c\3\2\2\2\u009e\u009f\3\2\2\2") + buf.write("\u009f>\3\2\2\2\u00a0\u00a2\5\r\7\2\u00a1\u00a0\3\2\2") + buf.write("\2\u00a2\u00a3\3\2\2\2\u00a3\u00a1\3\2\2\2\u00a3\u00a4") + buf.write("\3\2\2\2\u00a4\u00ab\3\2\2\2\u00a5\u00a7\5\21\t\2\u00a6") + buf.write("\u00a8\5\r\7\2\u00a7\u00a6\3\2\2\2\u00a8\u00a9\3\2\2\2") + buf.write("\u00a9\u00a7\3\2\2\2\u00a9\u00aa\3\2\2\2\u00aa\u00ac\3") + buf.write("\2\2\2\u00ab\u00a5\3\2\2\2\u00ab\u00ac\3\2\2\2\u00ac@") + buf.write("\3\2\2\2\t\2Q\u0099\u009e\u00a3\u00a9\u00ab\3\b\2\2") + return buf.getvalue() + + +class unlLexer(Lexer): + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + T__0 = 1 + T__1 = 2 + WS = 3 + DOT = 4 + COMMA = 5 + SEMCOL = 6 + COLON = 7 + DASH = 8 + LP = 9 + RP = 10 + LC = 11 + RC = 12 + LESS = 13 + GREATER = 14 + AT = 15 + UNDERSCORE = 16 + AND = 17 + AOJ = 18 + BEN = 19 + CNT = 20 + EQU = 21 + ICL = 22 + OBJ = 23 + QUA = 24 + LETTER = 25 + WORD = 26 + VALUE = 27 + + channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] + + modeNames = [ "DEFAULT_MODE" ] + + literalNames = [ "<INVALID>", + "'{unl}'", "'{/unl}'", "'.'", "','", "';'", "':'", "'-'", "'('", + "')'", "'{'", "'}'", "'<'", "'>'", "'@'", "'_'", "'and'", "'aoj'", + "'ben'", "'cnt'", "'equ'", "'icl'", "'obj'", "'qua'" ] + + symbolicNames = [ "<INVALID>", + "WS", "DOT", "COMMA", "SEMCOL", "COLON", "DASH", "LP", "RP", + "LC", "RC", "LESS", "GREATER", "AT", "UNDERSCORE", "AND", "AOJ", + "BEN", "CNT", "EQU", "ICL", "OBJ", "QUA", "LETTER", "WORD", + "VALUE" ] + + ruleNames = [ "T__0", "T__1", "WS", "LOWERCASE", "UPPERCASE", "DIGIT", + "ASCII", "DOT", "COMMA", "SEMCOL", "COLON", "DASH", "LP", + "RP", "LC", "RC", "LESS", "GREATER", "AT", "UNDERSCORE", + "AND", "AOJ", "BEN", "CNT", "EQU", "ICL", "OBJ", "QUA", + "LETTER", "WORD", "VALUE" ] + + grammarFileName = "unl.g4" + + def __init__(self, input=None, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.9.3") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None + + diff --git a/grammar/unl/unlLexer.tokens b/grammar/unl/unlLexer.tokens new file mode 100644 index 0000000000000000000000000000000000000000..c2b43c1801adaa8e5215d0b68aafc37b4ab15038 --- /dev/null +++ b/grammar/unl/unlLexer.tokens @@ -0,0 +1,50 @@ +T__0=1 +T__1=2 +WS=3 +DOT=4 +COMMA=5 +SEMCOL=6 +COLON=7 +DASH=8 +LP=9 +RP=10 +LC=11 +RC=12 +LESS=13 +GREATER=14 +AT=15 +UNDERSCORE=16 +AND=17 +AOJ=18 +BEN=19 +CNT=20 +EQU=21 +ICL=22 +OBJ=23 +QUA=24 +LETTER=25 +WORD=26 +VALUE=27 +'{unl}'=1 +'{/unl}'=2 +'.'=4 +','=5 +';'=6 +':'=7 +'-'=8 +'('=9 +')'=10 +'{'=11 +'}'=12 +'<'=13 +'>'=14 +'@'=15 +'_'=16 +'and'=17 +'aoj'=18 +'ben'=19 +'cnt'=20 +'equ'=21 +'icl'=22 +'obj'=23 +'qua'=24 diff --git a/grammar/unl/unlListener.py b/grammar/unl/unlListener.py new file mode 100644 index 0000000000000000000000000000000000000000..d107a9cb01d483adc942656ba49b4da61f867ae1 --- /dev/null +++ b/grammar/unl/unlListener.py @@ -0,0 +1,129 @@ +# Generated from grammar/unl/unl.g4 by ANTLR 4.9.3 +from antlr4 import * +if __name__ is not None and "." in __name__: + from .unlParser import unlParser +else: + from unlParser import unlParser + +# This class defines a complete listener for a parse tree produced by unlParser. +class unlListener(ParseTreeListener): + + # Enter a parse tree produced by unlParser#unlPart. + def enterUnlPart(self, ctx:unlParser.UnlPartContext): + pass + + # Exit a parse tree produced by unlParser#unlPart. + def exitUnlPart(self, ctx:unlParser.UnlPartContext): + pass + + + # Enter a parse tree produced by unlParser#relationOccurrence. + def enterRelationOccurrence(self, ctx:unlParser.RelationOccurrenceContext): + pass + + # Exit a parse tree produced by unlParser#relationOccurrence. + def exitRelationOccurrence(self, ctx:unlParser.RelationOccurrenceContext): + pass + + + # Enter a parse tree produced by unlParser#universalWord. + def enterUniversalWord(self, ctx:unlParser.UniversalWordContext): + pass + + # Exit a parse tree produced by unlParser#universalWord. + def exitUniversalWord(self, ctx:unlParser.UniversalWordContext): + pass + + + # Enter a parse tree produced by unlParser#headword. + def enterHeadword(self, ctx:unlParser.HeadwordContext): + pass + + # Exit a parse tree produced by unlParser#headword. + def exitHeadword(self, ctx:unlParser.HeadwordContext): + pass + + + # Enter a parse tree produced by unlParser#restriction. + def enterRestriction(self, ctx:unlParser.RestrictionContext): + pass + + # Exit a parse tree produced by unlParser#restriction. + def exitRestriction(self, ctx:unlParser.RestrictionContext): + pass + + + # Enter a parse tree produced by unlParser#attribute. + def enterAttribute(self, ctx:unlParser.AttributeContext): + pass + + # Exit a parse tree produced by unlParser#attribute. + def exitAttribute(self, ctx:unlParser.AttributeContext): + pass + + + # Enter a parse tree produced by unlParser#value. + def enterValue(self, ctx:unlParser.ValueContext): + pass + + # Exit a parse tree produced by unlParser#value. + def exitValue(self, ctx:unlParser.ValueContext): + pass + + + # Enter a parse tree produced by unlParser#universalRelation. + def enterUniversalRelation(self, ctx:unlParser.UniversalRelationContext): + pass + + # Exit a parse tree produced by unlParser#universalRelation. + def exitUniversalRelation(self, ctx:unlParser.UniversalRelationContext): + pass + + + # Enter a parse tree produced by unlParser#sentence. + def enterSentence(self, ctx:unlParser.SentenceContext): + pass + + # Exit a parse tree produced by unlParser#sentence. + def exitSentence(self, ctx:unlParser.SentenceContext): + pass + + + # Enter a parse tree produced by unlParser#ident. + def enterIdent(self, ctx:unlParser.IdentContext): + pass + + # Exit a parse tree produced by unlParser#ident. + def exitIdent(self, ctx:unlParser.IdentContext): + pass + + + # Enter a parse tree produced by unlParser#word. + def enterWord(self, ctx:unlParser.WordContext): + pass + + # Exit a parse tree produced by unlParser#word. + def exitWord(self, ctx:unlParser.WordContext): + pass + + + # Enter a parse tree produced by unlParser#punctuation. + def enterPunctuation(self, ctx:unlParser.PunctuationContext): + pass + + # Exit a parse tree produced by unlParser#punctuation. + def exitPunctuation(self, ctx:unlParser.PunctuationContext): + pass + + + # Enter a parse tree produced by unlParser#bracket. + def enterBracket(self, ctx:unlParser.BracketContext): + pass + + # Exit a parse tree produced by unlParser#bracket. + def exitBracket(self, ctx:unlParser.BracketContext): + pass + + + +del unlParser \ No newline at end of file diff --git a/grammar/unl/unlParser.py b/grammar/unl/unlParser.py new file mode 100644 index 0000000000000000000000000000000000000000..6609289b4790a1245628350b0461329789a45be1 --- /dev/null +++ b/grammar/unl/unlParser.py @@ -0,0 +1,945 @@ +# Generated from grammar/unl/unl.g4 by ANTLR 4.9.3 +# encoding: utf-8 +from antlr4 import * +from io import StringIO +import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\35") + buf.write("g\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b") + buf.write("\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t") + buf.write("\16\3\2\3\2\6\2\37\n\2\r\2\16\2 \3\2\3\2\3\3\3\3\3\3\3") + buf.write("\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\7\4\61\n\4\f\4\16\4") + buf.write("\64\13\4\3\4\3\4\5\48\n\4\3\4\7\4;\n\4\f\4\16\4>\13\4") + buf.write("\3\4\5\4A\n\4\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7") + buf.write("\3\b\3\b\3\t\3\t\3\n\3\n\3\n\7\nT\n\n\f\n\16\nW\13\n\3") + buf.write("\13\3\13\3\13\7\13\\\n\13\f\13\16\13_\13\13\3\f\3\f\3") + buf.write("\r\3\r\3\16\3\16\3\16\2\2\17\2\4\6\b\n\f\16\20\22\24\26") + buf.write("\30\32\2\6\3\2\23\32\3\2\33\34\3\2\6\n\3\2\13\16\2b\2") + buf.write("\34\3\2\2\2\4$\3\2\2\2\6@\3\2\2\2\bB\3\2\2\2\nD\3\2\2") + buf.write("\2\fH\3\2\2\2\16L\3\2\2\2\20N\3\2\2\2\22U\3\2\2\2\24X") + buf.write("\3\2\2\2\26`\3\2\2\2\30b\3\2\2\2\32d\3\2\2\2\34\36\7\3") + buf.write("\2\2\35\37\5\4\3\2\36\35\3\2\2\2\37 \3\2\2\2 \36\3\2\2") + buf.write("\2 !\3\2\2\2!\"\3\2\2\2\"#\7\4\2\2#\3\3\2\2\2$%\5\20\t") + buf.write("\2%&\7\13\2\2&\'\5\6\4\2\'(\7\7\2\2()\5\6\4\2)*\7\f\2") + buf.write("\2*\5\3\2\2\2+\67\5\b\5\2,-\7\13\2\2-\62\5\n\6\2./\7\7") + buf.write("\2\2/\61\5\n\6\2\60.\3\2\2\2\61\64\3\2\2\2\62\60\3\2\2") + buf.write("\2\62\63\3\2\2\2\63\65\3\2\2\2\64\62\3\2\2\2\65\66\7\f") + buf.write("\2\2\668\3\2\2\2\67,\3\2\2\2\678\3\2\2\28<\3\2\2\29;\5") + buf.write("\f\7\2:9\3\2\2\2;>\3\2\2\2<:\3\2\2\2<=\3\2\2\2=A\3\2\2") + buf.write("\2><\3\2\2\2?A\5\16\b\2@+\3\2\2\2@?\3\2\2\2A\7\3\2\2\2") + buf.write("BC\5\24\13\2C\t\3\2\2\2DE\5\20\t\2EF\7\20\2\2FG\5\24\13") + buf.write("\2G\13\3\2\2\2HI\7\6\2\2IJ\7\21\2\2JK\5\24\13\2K\r\3\2") + buf.write("\2\2LM\7\35\2\2M\17\3\2\2\2NO\t\2\2\2O\21\3\2\2\2PT\5") + buf.write("\26\f\2QT\5\30\r\2RT\5\32\16\2SP\3\2\2\2SQ\3\2\2\2SR\3") + buf.write("\2\2\2TW\3\2\2\2US\3\2\2\2UV\3\2\2\2V\23\3\2\2\2WU\3\2") + buf.write("\2\2X]\5\26\f\2YZ\7\22\2\2Z\\\5\26\f\2[Y\3\2\2\2\\_\3") + buf.write("\2\2\2][\3\2\2\2]^\3\2\2\2^\25\3\2\2\2_]\3\2\2\2`a\t\3") + buf.write("\2\2a\27\3\2\2\2bc\t\4\2\2c\31\3\2\2\2de\t\5\2\2e\33\3") + buf.write("\2\2\2\n \62\67<@SU]") + return buf.getvalue() + + +class unlParser ( Parser ): + + grammarFileName = "unl.g4" + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + sharedContextCache = PredictionContextCache() + + literalNames = [ "<INVALID>", "'{unl}'", "'{/unl}'", "<INVALID>", "'.'", + "','", "';'", "':'", "'-'", "'('", "')'", "'{'", "'}'", + "'<'", "'>'", "'@'", "'_'", "'and'", "'aoj'", "'ben'", + "'cnt'", "'equ'", "'icl'", "'obj'", "'qua'" ] + + symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "WS", "DOT", + "COMMA", "SEMCOL", "COLON", "DASH", "LP", "RP", "LC", + "RC", "LESS", "GREATER", "AT", "UNDERSCORE", "AND", + "AOJ", "BEN", "CNT", "EQU", "ICL", "OBJ", "QUA", "LETTER", + "WORD", "VALUE" ] + + RULE_unlPart = 0 + RULE_relationOccurrence = 1 + RULE_universalWord = 2 + RULE_headword = 3 + RULE_restriction = 4 + RULE_attribute = 5 + RULE_value = 6 + RULE_universalRelation = 7 + RULE_sentence = 8 + RULE_ident = 9 + RULE_word = 10 + RULE_punctuation = 11 + RULE_bracket = 12 + + ruleNames = [ "unlPart", "relationOccurrence", "universalWord", "headword", + "restriction", "attribute", "value", "universalRelation", + "sentence", "ident", "word", "punctuation", "bracket" ] + + EOF = Token.EOF + T__0=1 + T__1=2 + WS=3 + DOT=4 + COMMA=5 + SEMCOL=6 + COLON=7 + DASH=8 + LP=9 + RP=10 + LC=11 + RC=12 + LESS=13 + GREATER=14 + AT=15 + UNDERSCORE=16 + AND=17 + AOJ=18 + BEN=19 + CNT=20 + EQU=21 + ICL=22 + OBJ=23 + QUA=24 + LETTER=25 + WORD=26 + VALUE=27 + + def __init__(self, input:TokenStream, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.9.3") + self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) + self._predicates = None + + + + + class UnlPartContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def relationOccurrence(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(unlParser.RelationOccurrenceContext) + else: + return self.getTypedRuleContext(unlParser.RelationOccurrenceContext,i) + + + def getRuleIndex(self): + return unlParser.RULE_unlPart + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterUnlPart" ): + listener.enterUnlPart(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitUnlPart" ): + listener.exitUnlPart(self) + + + + + def unlPart(self): + + localctx = unlParser.UnlPartContext(self, self._ctx, self.state) + self.enterRule(localctx, 0, self.RULE_unlPart) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 26 + self.match(unlParser.T__0) + self.state = 28 + self._errHandler.sync(self) + _la = self._input.LA(1) + while True: + self.state = 27 + self.relationOccurrence() + self.state = 30 + self._errHandler.sync(self) + _la = self._input.LA(1) + if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << unlParser.AND) | (1 << unlParser.AOJ) | (1 << unlParser.BEN) | (1 << unlParser.CNT) | (1 << unlParser.EQU) | (1 << unlParser.ICL) | (1 << unlParser.OBJ) | (1 << unlParser.QUA))) != 0)): + break + + self.state = 32 + self.match(unlParser.T__1) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class RelationOccurrenceContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def universalRelation(self): + return self.getTypedRuleContext(unlParser.UniversalRelationContext,0) + + + def LP(self): + return self.getToken(unlParser.LP, 0) + + def universalWord(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(unlParser.UniversalWordContext) + else: + return self.getTypedRuleContext(unlParser.UniversalWordContext,i) + + + def COMMA(self): + return self.getToken(unlParser.COMMA, 0) + + def RP(self): + return self.getToken(unlParser.RP, 0) + + def getRuleIndex(self): + return unlParser.RULE_relationOccurrence + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterRelationOccurrence" ): + listener.enterRelationOccurrence(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitRelationOccurrence" ): + listener.exitRelationOccurrence(self) + + + + + def relationOccurrence(self): + + localctx = unlParser.RelationOccurrenceContext(self, self._ctx, self.state) + self.enterRule(localctx, 2, self.RULE_relationOccurrence) + try: + self.enterOuterAlt(localctx, 1) + self.state = 34 + self.universalRelation() + self.state = 35 + self.match(unlParser.LP) + self.state = 36 + self.universalWord() + self.state = 37 + self.match(unlParser.COMMA) + self.state = 38 + self.universalWord() + self.state = 39 + self.match(unlParser.RP) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class UniversalWordContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def headword(self): + return self.getTypedRuleContext(unlParser.HeadwordContext,0) + + + def LP(self): + return self.getToken(unlParser.LP, 0) + + def restriction(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(unlParser.RestrictionContext) + else: + return self.getTypedRuleContext(unlParser.RestrictionContext,i) + + + def RP(self): + return self.getToken(unlParser.RP, 0) + + def attribute(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(unlParser.AttributeContext) + else: + return self.getTypedRuleContext(unlParser.AttributeContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(unlParser.COMMA) + else: + return self.getToken(unlParser.COMMA, i) + + def value(self): + return self.getTypedRuleContext(unlParser.ValueContext,0) + + + def getRuleIndex(self): + return unlParser.RULE_universalWord + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterUniversalWord" ): + listener.enterUniversalWord(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitUniversalWord" ): + listener.exitUniversalWord(self) + + + + + def universalWord(self): + + localctx = unlParser.UniversalWordContext(self, self._ctx, self.state) + self.enterRule(localctx, 4, self.RULE_universalWord) + self._la = 0 # Token type + try: + self.state = 62 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [unlParser.LETTER, unlParser.WORD]: + self.enterOuterAlt(localctx, 1) + self.state = 41 + self.headword() + self.state = 53 + self._errHandler.sync(self) + _la = self._input.LA(1) + if _la==unlParser.LP: + self.state = 42 + self.match(unlParser.LP) + self.state = 43 + self.restriction() + self.state = 48 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==unlParser.COMMA: + self.state = 44 + self.match(unlParser.COMMA) + self.state = 45 + self.restriction() + self.state = 50 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 51 + self.match(unlParser.RP) + + + self.state = 58 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==unlParser.DOT: + self.state = 55 + self.attribute() + self.state = 60 + self._errHandler.sync(self) + _la = self._input.LA(1) + + pass + elif token in [unlParser.VALUE]: + self.enterOuterAlt(localctx, 2) + self.state = 61 + self.value() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class HeadwordContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ident(self): + return self.getTypedRuleContext(unlParser.IdentContext,0) + + + def getRuleIndex(self): + return unlParser.RULE_headword + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterHeadword" ): + listener.enterHeadword(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitHeadword" ): + listener.exitHeadword(self) + + + + + def headword(self): + + localctx = unlParser.HeadwordContext(self, self._ctx, self.state) + self.enterRule(localctx, 6, self.RULE_headword) + try: + self.enterOuterAlt(localctx, 1) + self.state = 64 + self.ident() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class RestrictionContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def universalRelation(self): + return self.getTypedRuleContext(unlParser.UniversalRelationContext,0) + + + def GREATER(self): + return self.getToken(unlParser.GREATER, 0) + + def ident(self): + return self.getTypedRuleContext(unlParser.IdentContext,0) + + + def getRuleIndex(self): + return unlParser.RULE_restriction + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterRestriction" ): + listener.enterRestriction(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitRestriction" ): + listener.exitRestriction(self) + + + + + def restriction(self): + + localctx = unlParser.RestrictionContext(self, self._ctx, self.state) + self.enterRule(localctx, 8, self.RULE_restriction) + try: + self.enterOuterAlt(localctx, 1) + self.state = 66 + self.universalRelation() + self.state = 67 + self.match(unlParser.GREATER) + self.state = 68 + self.ident() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class AttributeContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def DOT(self): + return self.getToken(unlParser.DOT, 0) + + def AT(self): + return self.getToken(unlParser.AT, 0) + + def ident(self): + return self.getTypedRuleContext(unlParser.IdentContext,0) + + + def getRuleIndex(self): + return unlParser.RULE_attribute + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAttribute" ): + listener.enterAttribute(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAttribute" ): + listener.exitAttribute(self) + + + + + def attribute(self): + + localctx = unlParser.AttributeContext(self, self._ctx, self.state) + self.enterRule(localctx, 10, self.RULE_attribute) + try: + self.enterOuterAlt(localctx, 1) + self.state = 70 + self.match(unlParser.DOT) + self.state = 71 + self.match(unlParser.AT) + self.state = 72 + self.ident() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class ValueContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def VALUE(self): + return self.getToken(unlParser.VALUE, 0) + + def getRuleIndex(self): + return unlParser.RULE_value + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterValue" ): + listener.enterValue(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitValue" ): + listener.exitValue(self) + + + + + def value(self): + + localctx = unlParser.ValueContext(self, self._ctx, self.state) + self.enterRule(localctx, 12, self.RULE_value) + try: + self.enterOuterAlt(localctx, 1) + self.state = 74 + self.match(unlParser.VALUE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class UniversalRelationContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def AND(self): + return self.getToken(unlParser.AND, 0) + + def AOJ(self): + return self.getToken(unlParser.AOJ, 0) + + def BEN(self): + return self.getToken(unlParser.BEN, 0) + + def CNT(self): + return self.getToken(unlParser.CNT, 0) + + def EQU(self): + return self.getToken(unlParser.EQU, 0) + + def ICL(self): + return self.getToken(unlParser.ICL, 0) + + def OBJ(self): + return self.getToken(unlParser.OBJ, 0) + + def QUA(self): + return self.getToken(unlParser.QUA, 0) + + def getRuleIndex(self): + return unlParser.RULE_universalRelation + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterUniversalRelation" ): + listener.enterUniversalRelation(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitUniversalRelation" ): + listener.exitUniversalRelation(self) + + + + + def universalRelation(self): + + localctx = unlParser.UniversalRelationContext(self, self._ctx, self.state) + self.enterRule(localctx, 14, self.RULE_universalRelation) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 76 + _la = self._input.LA(1) + if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << unlParser.AND) | (1 << unlParser.AOJ) | (1 << unlParser.BEN) | (1 << unlParser.CNT) | (1 << unlParser.EQU) | (1 << unlParser.ICL) | (1 << unlParser.OBJ) | (1 << unlParser.QUA))) != 0)): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class SentenceContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def word(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(unlParser.WordContext) + else: + return self.getTypedRuleContext(unlParser.WordContext,i) + + + def punctuation(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(unlParser.PunctuationContext) + else: + return self.getTypedRuleContext(unlParser.PunctuationContext,i) + + + def bracket(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(unlParser.BracketContext) + else: + return self.getTypedRuleContext(unlParser.BracketContext,i) + + + def getRuleIndex(self): + return unlParser.RULE_sentence + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterSentence" ): + listener.enterSentence(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitSentence" ): + listener.exitSentence(self) + + + + + def sentence(self): + + localctx = unlParser.SentenceContext(self, self._ctx, self.state) + self.enterRule(localctx, 16, self.RULE_sentence) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 83 + self._errHandler.sync(self) + _la = self._input.LA(1) + while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << unlParser.DOT) | (1 << unlParser.COMMA) | (1 << unlParser.SEMCOL) | (1 << unlParser.COLON) | (1 << unlParser.DASH) | (1 << unlParser.LP) | (1 << unlParser.RP) | (1 << unlParser.LC) | (1 << unlParser.RC) | (1 << unlParser.LETTER) | (1 << unlParser.WORD))) != 0): + self.state = 81 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [unlParser.LETTER, unlParser.WORD]: + self.state = 78 + self.word() + pass + elif token in [unlParser.DOT, unlParser.COMMA, unlParser.SEMCOL, unlParser.COLON, unlParser.DASH]: + self.state = 79 + self.punctuation() + pass + elif token in [unlParser.LP, unlParser.RP, unlParser.LC, unlParser.RC]: + self.state = 80 + self.bracket() + pass + else: + raise NoViableAltException(self) + + self.state = 85 + self._errHandler.sync(self) + _la = self._input.LA(1) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class IdentContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def word(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(unlParser.WordContext) + else: + return self.getTypedRuleContext(unlParser.WordContext,i) + + + def UNDERSCORE(self, i:int=None): + if i is None: + return self.getTokens(unlParser.UNDERSCORE) + else: + return self.getToken(unlParser.UNDERSCORE, i) + + def getRuleIndex(self): + return unlParser.RULE_ident + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterIdent" ): + listener.enterIdent(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitIdent" ): + listener.exitIdent(self) + + + + + def ident(self): + + localctx = unlParser.IdentContext(self, self._ctx, self.state) + self.enterRule(localctx, 18, self.RULE_ident) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 86 + self.word() + self.state = 91 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==unlParser.UNDERSCORE: + self.state = 87 + self.match(unlParser.UNDERSCORE) + self.state = 88 + self.word() + self.state = 93 + self._errHandler.sync(self) + _la = self._input.LA(1) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class WordContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LETTER(self): + return self.getToken(unlParser.LETTER, 0) + + def WORD(self): + return self.getToken(unlParser.WORD, 0) + + def getRuleIndex(self): + return unlParser.RULE_word + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterWord" ): + listener.enterWord(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitWord" ): + listener.exitWord(self) + + + + + def word(self): + + localctx = unlParser.WordContext(self, self._ctx, self.state) + self.enterRule(localctx, 20, self.RULE_word) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 94 + _la = self._input.LA(1) + if not(_la==unlParser.LETTER or _la==unlParser.WORD): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class PunctuationContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def DOT(self): + return self.getToken(unlParser.DOT, 0) + + def COMMA(self): + return self.getToken(unlParser.COMMA, 0) + + def SEMCOL(self): + return self.getToken(unlParser.SEMCOL, 0) + + def COLON(self): + return self.getToken(unlParser.COLON, 0) + + def DASH(self): + return self.getToken(unlParser.DASH, 0) + + def getRuleIndex(self): + return unlParser.RULE_punctuation + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPunctuation" ): + listener.enterPunctuation(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPunctuation" ): + listener.exitPunctuation(self) + + + + + def punctuation(self): + + localctx = unlParser.PunctuationContext(self, self._ctx, self.state) + self.enterRule(localctx, 22, self.RULE_punctuation) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 96 + _la = self._input.LA(1) + if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << unlParser.DOT) | (1 << unlParser.COMMA) | (1 << unlParser.SEMCOL) | (1 << unlParser.COLON) | (1 << unlParser.DASH))) != 0)): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class BracketContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LP(self): + return self.getToken(unlParser.LP, 0) + + def RP(self): + return self.getToken(unlParser.RP, 0) + + def LC(self): + return self.getToken(unlParser.LC, 0) + + def RC(self): + return self.getToken(unlParser.RC, 0) + + def getRuleIndex(self): + return unlParser.RULE_bracket + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterBracket" ): + listener.enterBracket(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitBracket" ): + listener.exitBracket(self) + + + + + def bracket(self): + + localctx = unlParser.BracketContext(self, self._ctx, self.state) + self.enterRule(localctx, 24, self.RULE_bracket) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 98 + _la = self._input.LA(1) + if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << unlParser.LP) | (1 << unlParser.RP) | (1 << unlParser.LC) | (1 << unlParser.RC))) != 0)): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + + + diff --git a/input/r1.txt b/input/r1.txt new file mode 100644 index 0000000000000000000000000000000000000000..d1728b7e8127b8cf579baead9b739cda23059811 --- /dev/null +++ b/input/r1.txt @@ -0,0 +1,18 @@ +[D] +[S:R1] +{org:en} +The system allows a radio channel to take on two states: Listening and Traffic. +{/org} +{unl} +aoj( allow(icl>be, aoj>thing, ben>thing, obj>uw, equ>make_possible).@entry, system(icl>group).@def ) +obj( allow(icl>be, aoj>thing, ben>thing, obj>uw, equ>make_possible).@entry, take_on(aoj>thing, equ>assume,icl>change, obj>thing) ) +ben( allow(icl>be, aoj>thing, ben>thing, obj>uw, equ>make_possible).@entry, channel(icl>radiowave).@indef) +aoj( take_on(aoj>thing, equ>assume, icl>change, obj>thing), channel(icl>radiowave).@indef ) +obj( take_on(aoj>thing, equ>assume, icl>change, obj>thing), state(icl>attribute).@plu ) +qua( state(icl>attribute).@plu, 2 ) +cnt( state(icl>attribute).@plu, listening(icl>sensing) ) +and( listening(icl>sensing),traffic(icl>communication) ) +{/unl} +[/S] +[/D] + diff --git a/parse.py b/parse.py index fa573f047d1eb13cf4972ebc3610ba28c71235b8..174ba842da2325c51935d16f4869d41ba80b130d 100644 --- a/parse.py +++ b/parse.py @@ -85,11 +85,35 @@ def parse_document(input): def parse_org(input): - pass + + # -- Create python lexer and parser + create_lexer_parser_with_antlr(org_grammar) + + # -- Import Lexer/Parser (after creation by ANTLR4) + from grammar.org.orgLexer import orgLexer + from grammar.org.orgParser import orgParser + + # -- Parse UNL part + parser = instantiate_lexer_parser(input, orgLexer, orgParser) + print("--- Parse origin sentence") + tree = parser.orgPart() + print("----- resulting tree:\n" + tree.toStringTree(recog=parser)) def parse_unl(input): - pass + + # -- Create python lexer and parser + create_lexer_parser_with_antlr(unl_grammar) + + # -- Import Lexer/Parser (after creation by ANTLR4) + from grammar.unl.unlLexer import unlLexer + from grammar.unl.unlParser import unlParser + + # -- Parse UNL part + parser = instantiate_lexer_parser(input, unlLexer, unlParser) + print("--- Parse UNL representation") + tree = parser.unlPart() + print("----- resulting tree:\n" + tree.toStringTree(recog=parser)) #============================================================================== @@ -109,6 +133,14 @@ def main(argv): unl_part = document.sentence.unl_part.to_string() print("----- org_part:\n" + org_part) print("----- unl_part:\n" + unl_part) + + # -- ORG Parsing (Sentence Original Part) + print("-- ORG Parsing (Origin Sentence) ") + parse_org(InputStream(org_part)) + + # -- UNL Parsing (Sentence UNL Part) + print("-- UNL Parsing (UNL Representation) ") + parse_unl(InputStream(unl_part)) if __name__ == '__main__':