From ac19a74eb56b55a79b0379342ec9050d073f0dcf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Aur=C3=A9lien=20Lamercerie?=
 <aurelien.lamercerie@tetras-libre.fr>
Date: Thu, 7 Apr 2022 23:06:39 +0200
Subject: [PATCH] ORG and UNL parsing (Concrete syntax only)

---
 .gitignore                                    |   6 +
 asd/__pycache__/doc.cpython-310.pyc           | Bin 1692 -> 0 bytes
 .../doc/__pycache__/docLexer.cpython-310.pyc  | Bin 2912 -> 0 bytes
 .../doc/__pycache__/docParser.cpython-310.pyc | Bin 8112 -> 0 bytes
 grammar/org/org.interp                        |  16 +
 grammar/org/org.tokens                        |   2 +
 grammar/org/orgLexer.interp                   |  23 +
 grammar/org/orgLexer.py                       |  61 ++
 grammar/org/orgLexer.tokens                   |   2 +
 grammar/org/orgListener.py                    |  21 +
 grammar/org/orgParser.py                      |  94 ++
 grammar/unl/unl.g4                            |  90 +-
 grammar/unl/unl.interp                        |  78 ++
 grammar/unl/unl.tokens                        |  50 +
 grammar/unl/unlLexer.interp                   | 102 ++
 grammar/unl/unlLexer.py                       | 145 +++
 grammar/unl/unlLexer.tokens                   |  50 +
 grammar/unl/unlListener.py                    | 129 +++
 grammar/unl/unlParser.py                      | 945 ++++++++++++++++++
 input/r1.txt                                  |  18 +
 parse.py                                      |  36 +-
 21 files changed, 1863 insertions(+), 5 deletions(-)
 create mode 100644 .gitignore
 delete mode 100644 asd/__pycache__/doc.cpython-310.pyc
 delete mode 100644 grammar/doc/__pycache__/docLexer.cpython-310.pyc
 delete mode 100644 grammar/doc/__pycache__/docParser.cpython-310.pyc
 create mode 100644 grammar/org/org.interp
 create mode 100644 grammar/org/org.tokens
 create mode 100644 grammar/org/orgLexer.interp
 create mode 100644 grammar/org/orgLexer.py
 create mode 100644 grammar/org/orgLexer.tokens
 create mode 100644 grammar/org/orgListener.py
 create mode 100644 grammar/org/orgParser.py
 create mode 100644 grammar/unl/unl.interp
 create mode 100644 grammar/unl/unl.tokens
 create mode 100644 grammar/unl/unlLexer.interp
 create mode 100644 grammar/unl/unlLexer.py
 create mode 100644 grammar/unl/unlLexer.tokens
 create mode 100644 grammar/unl/unlListener.py
 create mode 100644 grammar/unl/unlParser.py
 create mode 100644 input/r1.txt

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..950f50f
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+*.pyc
+*__pycache__*
+*.todo
+devtemp*.py
+.project
+*.ttl.tbc
diff --git a/asd/__pycache__/doc.cpython-310.pyc b/asd/__pycache__/doc.cpython-310.pyc
deleted file mode 100644
index 85a8c8992eda54bf56db9080f47bf63f2375f1ec..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 1692
zcmd1j<>g{vU|_J`=9i?)&cN^(#6iZ)3=9ko3=9m#DGUq@DGVu$ISf%Cnkk1dmnn)V
zmpO`=5yWTCVaa8UV$EfXVq;`*XGmd5VQpbZVQprLVs~dqVM}3eVMt+bW{To)XGq~l
z;cQ_@;cRA#;&f+7;Y#6dVMyU_W{ToUVGL%_<ar6Q-%pe27HdF$L2BMDmH^KHS52l{
zTyB{;slg>hsfoF_1fBD9bMx~;^0QO(AVSF?UC5XT>hjMr3=9mZ3{i|J3{gxej44bl
z3{lJ}%qc7_3{fm8ticSLY_~XE@{>z*Q}araLAqe3fY>1H%)r3#85B%4j5Q4L3@Hr3
z3@aJ^G#PJk6sP8uq~;~3YBJqoDNfBvD*~xn$xy_@z`*d!KtCftH&s6;F*h|YD^ou_
zzbL!7ATc>rKP0uJD6v?-G%v?7uS7qwI7L4tKUuG!@)k#Yd}dx|NqoFA$VQOWT#QwM
zFt_Q!WRn?TE@WU}0I@+TL5?g2Szp7D#jt>JAww`j5hnu!gC-N$QMWit^5ct3iZb)k
zS27lHGcZ6L33h1_FNiC^z`&peV(>FCFjNV_ZH4ReTgeJ?VP0ZxYJB`HuK4)e{FKrh
z5Su4HzOXbg2O?9%2Qma?8pLo=SU^}{YuFhW7!*O?V`E@o;9%fj;$bcViQ$aDB9KB*
z90fxII~kNFVIcrw!@Uo(6cR@@Of?MgjG&l;2Er|l{G#;ug2bYdTO6f%IbfD1GbA7+
zKn|7!5om4&`9uel_*obj7}yv=kpXiA5s@Lpz`y{Dj2eb4rYuHrc%(2Ffh`3&xCm^s
zAQtOEvIZdQK^_!htP+A-j);>YkS&nlMRUA30|SE=NH-|_zyZxt1QH_{(Cq$2=>dsF
zxbs302gtFY#I=&~7He5zPHE~&CP+|%@*0wBz%dRAMKh2NaKaLRnvOdFK`9!Pnm>aa
z4hleMN-mNH8G<XIEJ0}s6i{r8Re~@(hzTbt1_lNbkjY{o0>mR6LZNv%M1~N=(Xt?;
z(c%XbEcPIANU%VSBpfV7av)8h(1)0fD?pq;%H%<LSrBG6F~On0z`$S&(h3d^5Koi)
z7ISfB@hz_6(xif-{N&W);v!H$7lHELEw;qGlAI!wB2X47Vg+dg<z{e7D*^>(5y%Ln
lBmoWuP`bFqVFM{Z>_8D-3<?eoJ^@AnMm_;PPChOnP5{{>B*p*$

diff --git a/grammar/doc/__pycache__/docLexer.cpython-310.pyc b/grammar/doc/__pycache__/docLexer.cpython-310.pyc
deleted file mode 100644
index 997817b3aabec262a7798f35c9b0ab317b3e40f1..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 2912
zcmd1j<>g{vU|=Xz^iR6a!@%$u#6iX^3=9ko3=9m#ISdR8DGVu$ISe5nnkk1dmnn*g
z5yWQBVUA)>WzJ$rVUc7=Va;YKl1X7mVas97Ws72CWXNL!^VlK06owS`9FAPhC{9L(
z6pj?m7KSJ;cZL+M6z&#=6z){+X67iK6y{(CO`ew^cW5%+V${-Pyu}e*Qk0pO?&<IM
zl97Rd;Uy~r1A`{xEw+%<iV{zMO~zZyDf!9CARRCaQpUl+z~BsWy%!?`LkUAOL;s9g
zh8l(ij46x@85togCOC^3&SHVHSm7)-IEx+5;()U_;VdpViyO}3fwOqwEIv4kAI=hh
zvjpKRAvjAI&JuyMMBywkI7=MPl7O=$;VdaQOB&9SfwGtuGK4cQF{CiCFt9K*Gcz&-
zGZ-=y&nstSWB|cP22gwlGib{Cl}a-(Ffc!B>V7q6)r*$Rua~cV(z@sI`sL5(wLM+B
zgo(3CI*fyng^81ig_)C?g@u!ag_V<)g^iPqg`JbVN}7d(lY^NF3Yp=6g_(u9N*Y8$
zFe@_*vazvgaIx`LNwe{>X)?1hvoW({z$$5Wc6JLcb{=*<c57}l`6_8<4rIvA;myUt
z!@<Yl$IZc9C50l##Kg(W%*w*f%E`vfUM0!P!OzLe#LUXX&BMp9q{I}##LUdZ#Kgo}
zB?YG0!6YY`<OY+x%uH31OiWDtARe<Ih-4K7k?bHHRnknHiXa}hI*8=e1(E#DAW~4M
zN|HU6iAhM9-HnM!ScE;EiAjV7BqJ)uUL_IB#3UxpZpg$WE&-C4kOav~vN2alFflPn
zNrQAs%dm$sG0DiX+b}W7%CT3;gfKD5$+H_XG07`{R4Z_ROjA?>sa2{{PzIT%tOC-b
zq6(5%<pRm8X>c+zF{!IDSIILmF==Rm#5BRK)zkv9wX{K`whjkdl^he34iAW@tH)l;
z#H6RsUdY6xZ@^x{#AIN|o(XcAp%KV6M#k*vOiacm>;+6rCZ_E5RZ>h$rj{U`W)|E`
zOiboxAhv}yh^$huGzZa^Rv^;K21Hte)!NvCY_YYgk_Wlm&K_i&y#vTL2S@fyCMHKG
z_If5JCqCvXMJ6UDXBUua7gzRjCMH)mkj-xH?0HN~?j9gjRSF(HAd@}4LH2ujg4o`E
zAkxPRO!`(SfM{QT5a|b2>mL9zF(43RdteZIIVgaFf<Yz+hk#5D2?d!PDge^K#-wTK
zQ^mr}!NjD=T*S=4z;KJTyeP9I^%h5ZYDrmQPH8Hr<Vq?{Tggzw&A`C$%T_-lKQ~oB
zCowlQFDp|&JijQrxF9h(RX-%Pq$sgizceq$F|R~Fy(lp^H?c@RB|jO2eNrn@i}VUA
zZ}AqV7G);pWLBl7IEMJ~gGxR-1_lNWMlLcT8>7nOB3=dt29%-|RK$V`U=UqAiGhJ3
zl_82Tg&~S5mARQ=0ZS@tGs8m0D7F;F6s8u2RQ6Q1ERJSI7lvlWD9#k-D6SNiDDD*2
zD4rCyDBcwID83YqDE<`AC;?C<kj@k(n8K6J7$ua>kixr&F-ka{A%$-dW0XibLkj;Q
z#wgJgfhe&Q!Bp`qi8Q7Zp;XB<rgnxj#uVWckrv)4sdfeyhA8P^22Igh94Yz9;E+iM
zRRtiof-oo+KyC+D3sMXW4CxFt46&RsOtp-)OeG98Of`(nOcR+3nSvov0*W9PH%Cq8
zTTFTew-_^{m{N+taRniGK#|I2lapColANDgU}pjfb0!7`1}TOrVVDE-5b{N!3iTyO
z!Ap=!UV^wqAosjv166D6Amc<`T-_W)eL~_x{JmZM;+;Jl{rp^gZn1fIy12Od-Qs~s
z`}(`MYO?;~wDI%{bM*0av8!TLk9LVwuVPb=)(6oX>e0bgL55I~;8=A{mLgEOeTykP
z_!hH&kozs>P(L3{_FF6=@$m*=(hy7<fl1>c83qQ1B3Te2$H2f)#g>wvte0-$cZ)5*
zw4|W4BpKudP@sY^hz;@xh%N@DI#7MPfU$<LhG`)axUOY_)Vd|iH4HTjSu8aSS*#^&
z%?!;fH4O3WDGW6X@f={1Gle0TL6ga^iq%BVQqNeE>lSNqX+dhyEsps3%)HE!`1o5q
z$r-81*<q<g#hLkew*<k_>=@z~oS9pilUR~pbc;E$B<~hqN@{W@NJVi-zKfgVEwO;2
z)RfHRlFa-(=lr~q)QS@4#N>?BTkP?fc_pbu1-Cfj6Tz~@QQYwbV6}-Qsl}Siw^)i(
zbJA|HX66-?mJ}&7FfgoSyu}$GpPZ9eTpS;dND4)&3=9lDpfmt#R`D@%F^Vy=F>*0h
z3BuC?%nzC}Aphhg=BCES-{Ojo&&^LM%>l7_;^PZT6LX+4{Ei`hE~)T*U37~(1&IgB
znnfC*M8lbySDKqzlvt8_iy!9gko@41#FEq^O;8NVg9rr>p$H<BKm=<1M)4$PB<AI%
z=J+M%rWQwW=H{oQf|<8?axzO&ixMFcw|I*ybCdFOGLs>aoJFNMP<8y!g2^otB65ql
zxU%>bTX9KBerZV&D20Oq=oT+D=#o=&a$vy+D*uY~L9PPjsUkiG1_lFAK!O~~!NA8P
z!o<SF!iazzOl(XXOrUz3g-MACtX`A%7F%LoNluZ;EvC$TaDiK70}33TGDu2^&&*59
zzr|KkS&*5R4t7rzCxoMynIFZRSe#M>%7Ne#?-naKRe=qLL@?NeU;^aATO2l!D6<0<
aFU3OOa+ZgYgOQJskCBIohna&#L=gZfc}PA0

diff --git a/grammar/doc/__pycache__/docParser.cpython-310.pyc b/grammar/doc/__pycache__/docParser.cpython-310.pyc
deleted file mode 100644
index 6356f2ab138d2e5b8efa078c6b549fa9e9274a89..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 8112
zcmd1j<>g{vU|=Xz^iQgjXJB{?;vi!d1_lNP1_p-W90mr46owSW9EK1O&6LBK%M``L
z2x2qmFh?<`GH0=*ut+kbux7Iq$)qr(u;sAkvPH2mGUTy=dF&8g3PTEe4o5C$6elA?
z3P%cO3qur_J3|Ur3U>=b3U?}ZGjkMA3Ue@nCeKTdJ2V+@F=}Zt-r@)@Day=C_w@IB
z$;iOK@RF5*fkBh;7F$SaMTw`sCgUyUl>FpmkPa9IDdS*ZU~mSxzK4N<p@gBCq2IEW
zp@v}rV+!L!Mn(vW3C?1MvsmCPRyd0d&SHnNIN&T!D2r(!LpTEyLka^60}DelGb2MV
zgCRq)eK{i|0|-VkfI=*oL6gg`RGNW-f%#ce_p3RpUbJj}y?pJH);*8cFMmF-?dj4b
z%$!wHGAvA-Of1Zt%q%RNEG(>?tjtVMSS8KGj0{+qL6DUh467uWnAn(D*f^M&n8ca5
zn3<TESolFCn`o6Jh~|(0kzCyD%uGz&Jgh9tOgy~oRT3;rOuT#?OpHu?%piGw0d`g<
zCILa#DseUzCP5)q4pt^1VRkkqCSef{W=19v)+(?$qGIeEOiW_p94w4X;vgfLBqXZ@
z**Te*B&9f58JVOwL8dWjG8Zv3FfiO=EicL}Nxj99o?23tm{Xbxin^rIw3Q4++zbp1
zzwGog@^e%5a}sk?^RhDa!}E)>iwhEyQ}shqONtVU^-J?|9P>)_(~A;wa}$g7Q}UBR
zI3Tg8IJHQxpz;=PacWU!VoqjNYKmisA3rGWB^VeOI2gGwARD8~w<2B!1_qR514=%i
z#08>@nHU)uQW>HcQy8L{QW&F{Q(2lB7O<wWH8U(^jABn=N?~qcNaaXn&*E%mbYW;_
zjN(e=Zf1z$NoPo5S;QE{o6eBJx`;7~FNH0NKZQL?AcZ4JFoiQpD4ijNYY}6VaH>d_
zXq0FQdz4rTN0fL9XOu(=ca&rbPn1*&Z<KTjUzAL$Y?fRaV+wz&e2PG-LK;guLmFd>
zV2V%+Z<L}tLyB;UNDD)XNUBmZbChz5Xo^@1LzId;LyCBcL<>WT1Xx@(MKVRIg&|7K
zogqa!MW%%zMFuRco+6tf*TN8`;m(jEpQ6yhkfHz<*Gy4NQEFj`(rRa5VTjTWX3$i=
z#hH?y3<=~awv_y2y>ydgQ2qi15(tB078GdUye7rKz>v;R!w}0E!&J*y%T&Tp!&JlA
z%rudykSQ1v&7eqkadXsUzQv?xaEmcBiYcWS9RCo42b55_Y;rP-OOo?*3+$9Z(a6NW
zz#zp?B?5Pe9zua8>n~0lProonA5Ry%DpvJqmss^GHuY$I5Y3?;9c&e32o(vARoCPw
z5@ujvz((9+3J<=;>>uQQi#gQKrwHW7B2W?d5@h>JkT*41ZgHgKCzs}?=9S#yC{E2Q
zNzF@6y~Unilpc^+RC0^GG%p9lcnQ*01PYXwY@h;*9poq0%)Elql3Q%~r6mQWCCMQ3
zK*0yXAT~%fh%UB(7FP=xYZz;o7BYd0DkeywRl;1uP{WYLQp1qNTEmdV*344F5YL{%
zP{R<<k-`wnpvmM{#cHBwsb{Rod5g8Uv>>(U7Ds%1W?p7VeEcn*<c!qh?6B0L;>`TK
zTSAaHbqw(f&de>%Ni4}Py2YGWl6Q+QB{ew{q@*|`-^I=GmQZm<Vo_>}bADb)YDI~2
zVsb|6E%x}#ypq(Sf?M421x2YTnaPPIsl}Siw^)i(bJB_wKq0Kiz`(GQ@fK%%d~!}=
zadCV+B7TcNk!k~qSWqF&$H>Jf#>mB3C4`7xn458C&(9GIsM#|`Foh+RC51JW6`E1l
zQrKH~qu8MJ3_GNjiQ-7%N##sqO5si6Yhj7vg7WxN1X@_4xWS2@CzwG~s7eyy78huY
zLj&LzTLHMl@q79I|NsAB9w<5RX6BXUXJ_W62bUz4q$Yz>04Qof7{mr)Q1pIQK}0W8
z4Kp};8EY8gnZS|A3?^AXQMr=QPm{Gs2^5l`&??dgr6K10(vn+@#Vc8gv_TReu_8kd
z%Lqj1fCx}YS_H~O;6V0ZU|{$R31l`#Hc%id;q;guJkUW&ALMWl2C+eo2W9Wid<+br
zij$#)F$<I;nSvQsGWmgXiYD_d;q=s!kjjG8l%Ud_RA^uofppyB4~AwHsOU;Yi2K2A
z1ruNgg9;Bnkb^-Pj*qblk5gfJ52YjmITeJ#P6cJW5{4|sV1^<SP<w$1;uPMXP#@QL
zSSBvAg$Ehj8Mk=SQ%gWz@XSj|tq6j-hmEmH4R_EXYq`Y%D!fwjQj3y79sva+2!q(5
z6aaRe3^*0kFs3jzGZpdFFfL%MVF0xvKrs)(noNGTctL?y1WSFIOt;uG5{nZ{N{WhH
zL0O9l5{4k-AUO-+15U6`kRL)p&Ibhr7b71d%l|56++Kj|!RhKE7jQuO-Qr2D$SgrP
ztjG-%ie4bX9YlZ~0wzGwdW!?38swNrkfbLA0|UO8g((IlECY~zyr2>+FEKYYKK>S0
ze0*+xN@)&=%@ZGASelpvmEnw!FV4v?DUOf7B?u|lKsG|t+b!nIyb^GJC<3LiB1ceO
zf+!0qN=^02EG_{>R}m<;6nTRrd7-6Na%xTvtmFdw2%N{j-r|P^0Mrstq!fdaI|l;`
zqXZKN8wU#?vlvSeC<;)LDJT(u8s4CoD}Dw}2%zY%VaQ^tVaQ@$$Xv@<!?1t_lsIY_
zvRD@~Gcr`gRB6>PEMTi)Oaa#jDa<tt@ys<03)o8-vcL_|6i`h765(9PP{SC{mBNao
zf*X{yN_ZCV)-YyqW$`U!tYM7j3unkvW??AduZ(A8sAXbgs1gTN)$sx~3|WE;glZUI
zbx)Sy0$~_0oFS4Sg`t+Ijwz1^LYFg^GlYZ61d)Xdj0`o5!Dzf1h6P}mat2K{NJ#;y
zQ$c0YOHNSb2nq^KQE=`Aht(~X_~ep`Tdc*PaugCJpiT%hJNTEDq!u~ml-y#?O)N>y
zxWy6@A8!DORWne;f%t|+;0l)m7C}X(AQ_RM)a3m1yv&l!{5;o+<kSKX;}&NsoXH)Z
zT2$nbn3s~1T6BxMD77HJsKm9XD8J|ydr@j~epzY}IBl$C24}om969;Pi8&yrG8Ltw
z<-j6I1_p)%Pz47{!a_^}j4X^ijBH@c1*Sz9`54(4`52`bIU1P$GX3XZV_{_a%fZCL
z$n_t&B7~JxI4i<pkTuBd)D&T2E5a1s7KSL!6uwlhG^P}26$s^lTA3_SJm3nEH<&?F
z1i3;)E6u@GlV1_Yr-;f3X9ZZS1y1qJ3=0@*@Kk^`4Dqa>f)G-8v!PXlkQimmzr|Qu
zWDY7-z}XU<K|y&FoYgBq!4FDYpn%4#{Lnl>xbiEq1X*MSDw3FstU+v0X1~Sf4{7{C
zO95;(N)f2H*8tLC2P*s&5oIvCBcKgQl;Q$Zqroa*Ty6-3v^(*+0aW9(5aR}D8y3S2
zAa{U@tRj1mDVTLCtV+fj0-zGN1EkLh6aq*UDS8MXSENNC>j>9Ika`kkkR}tPI?4sv
zhNU_JmnhvJNpMYsR3XAlfQF(TsTCqjKg<PrAWN{h093>DgCxN&Kx*Z{On|$fA`{#a
zDe?iOI=&)bkS0(IsmLG13IGv-AOh4%0VjHJQY!+*HMo*13I@r-tF@vK5Eq<Hzyvtg
zfHMrJF<u7BFan?sGo%7z=U{<WVIWzQ^ng-@{Q+laf>l_0l_{B37&o{|<4Ivds?vDD
zRT|#{esGm0P{WwTlO?c_sfICL09vsLR%W48Y(g~*S;7lMU{x8ICko@?so2C&DmFN;
zhG7AiUCyA%?pGuZ3RqAseF+*=cnK;QUV;W7UV_3?Qyfyefr7CJ+z5&SDF;VO4u};E
zB4R+56sY<#f<$5lNDv&8nJ^h+XvGFgC*WeR2vh+SC4y8Zfe27dQIrB=fznw~Du@M+
zZ7>0jZBQd<8Ys3wxfru{g49q_MAlHSd=Ii0)Xs%tP>&YGDbfR_UZE-pM7f7nW`g=L
zL{}h1#vmVoT#nH{f`kV+;BeIn8$p^uUF<5PVi?sqq*Vpr(X5dC?9@C+B#VM9z|yTK
z0!eHKNrEc@8C-#(=O5&blGhj+7#KhpR2X8Gy*%Ll47A?BS@iA#*$XauRdL!&M#;+q
zE+V0!2P^g}Kz3m(_CV=<FGv!-*n=7XFZDnp3!v6J;i8`#)LVdRhuIIR_lvOE56TV)
z;r8Q>evp|U>mj8sFEOPqC~iqBaI+W~7}kM0XP}}2-eX_}_ZUEOC`kahxcz*ExZ*ar
zN)caiTf-R7S;Dn|yM{4~Glij)p-3l%A&(iv%i;m|4|t))C|_k5N-@e`!;mGgKoC}N
zf_d=X0-j=27^N76^J*9tfZ62?nk;@rvY_Yz7o(s-I!zHsmI8$bdbwE+QVB0N!I`TF
zobka;e|Bh^xWxh*vVs%~q!*K*X4?r+9BD8xFd&LaK1L;cMWY}hhe5L?DJ3IXOOCoF
zBd8+5*<QX23Q_cu5!E@Qm5fE81_dMr#Xx3bD-=Nzpn0bv^g<EM;!r;ylpFyn5J4DU
zp$Kir;w%(FUGF0FLJ`egvI|A1Ij};h5flK}3MEjX2%79FLN64d29R4QLbb!}Z^C6i
zs8D<hk_6k2R476XfZLDK6eX%q1jQ|Bg<>|Sp#ySE6^H=wP$C_<7%Zv=H7Z#AisV2_
z!9@nhK3I_f(u-bXfO}wwA_Jtn2%IAzMF!NvMRgzpK_doaG^{|)wJ#w3^`LGJQb_<!
zi<&xUwaG1h=xml-W=^VKVs7d!e#a0$msI#nR?#i)6eJ#~by=ha3MS6fywcp%qQsKa
zTl}yA`;dI_;B%2WNP$=Ycwinh$^nf;@X&n`s4%<5lapDJT9lXrva<LVZ*gUAQhrWm
zGMHNg8g3~9H%!5$EGXP=aTb+A99Vpd72Mjn#q8?uRul{JY#fLv1`#D7LIgyBYeY~G
z-(oJVEWX87T#^DByXFS#4K684P0TF<jY~p$E#OWJXrK%{a8d+rD1#cQMc|?zR1Oz`
zV-ys=xH~bRsuWa%6oWeH91JW>d`u!tY)l+X984_CEQ}z?0h%2Jvp_r$mSGa$<d7B+
z<4_Qg;E)m!5|9EHS(?1J*b?(fa*9lDF=gh1r!R{pfP$5$3^IQbpP84Ie~Yc8vLG`r
z9qg4TP6$UYGyfKIVsQ$%u@0VOyu}6?k_4NKG*kp{k>BF5fsAX~feN)^Ay6lbiHDJc
Rk&ls&k%x(gnS)t`4*)kBu(1FD

diff --git a/grammar/org/org.interp b/grammar/org/org.interp
new file mode 100644
index 0000000..cd5d630
--- /dev/null
+++ b/grammar/org/org.interp
@@ -0,0 +1,16 @@
+token literal names:
+null
+null
+null
+
+token symbolic names:
+null
+WS
+ORG
+
+rule names:
+orgPart
+
+
+atn:
+[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 4, 7, 4, 2, 9, 2, 3, 2, 3, 2, 3, 2, 2, 2, 3, 2, 2, 2, 2, 5, 2, 4, 3, 2, 2, 2, 4, 5, 7, 4, 2, 2, 5, 3, 3, 2, 2, 2, 2]
\ No newline at end of file
diff --git a/grammar/org/org.tokens b/grammar/org/org.tokens
new file mode 100644
index 0000000..42c6425
--- /dev/null
+++ b/grammar/org/org.tokens
@@ -0,0 +1,2 @@
+WS=1
+ORG=2
diff --git a/grammar/org/orgLexer.interp b/grammar/org/orgLexer.interp
new file mode 100644
index 0000000..56d77e2
--- /dev/null
+++ b/grammar/org/orgLexer.interp
@@ -0,0 +1,23 @@
+token literal names:
+null
+null
+null
+
+token symbolic names:
+null
+WS
+ORG
+
+rule names:
+WS
+ORG
+
+channel names:
+DEFAULT_TOKEN_CHANNEL
+HIDDEN
+
+mode names:
+DEFAULT_MODE
+
+atn:
+[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 4, 36, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 3, 2, 6, 2, 9, 10, 2, 13, 2, 14, 2, 10, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 25, 10, 3, 12, 3, 14, 3, 28, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 4, 3, 3, 5, 4, 3, 2, 3, 5, 2, 11, 12, 14, 15, 34, 34, 2, 37, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 3, 8, 3, 2, 2, 2, 5, 14, 3, 2, 2, 2, 7, 9, 9, 2, 2, 2, 8, 7, 3, 2, 2, 2, 9, 10, 3, 2, 2, 2, 10, 8, 3, 2, 2, 2, 10, 11, 3, 2, 2, 2, 11, 12, 3, 2, 2, 2, 12, 13, 8, 2, 2, 2, 13, 4, 3, 2, 2, 2, 14, 15, 7, 125, 2, 2, 15, 16, 7, 113, 2, 2, 16, 17, 7, 116, 2, 2, 17, 18, 7, 105, 2, 2, 18, 19, 7, 60, 2, 2, 19, 20, 7, 103, 2, 2, 20, 21, 7, 112, 2, 2, 21, 22, 7, 127, 2, 2, 22, 26, 3, 2, 2, 2, 23, 25, 11, 2, 2, 2, 24, 23, 3, 2, 2, 2, 25, 28, 3, 2, 2, 2, 26, 24, 3, 2, 2, 2, 26, 27, 3, 2, 2, 2, 27, 29, 3, 2, 2, 2, 28, 26, 3, 2, 2, 2, 29, 30, 7, 125, 2, 2, 30, 31, 7, 49, 2, 2, 31, 32, 7, 113, 2, 2, 32, 33, 7, 116, 2, 2, 33, 34, 7, 105, 2, 2, 34, 35, 7, 127, 2, 2, 35, 6, 3, 2, 2, 2, 5, 2, 10, 26, 3, 8, 2, 2]
\ No newline at end of file
diff --git a/grammar/org/orgLexer.py b/grammar/org/orgLexer.py
new file mode 100644
index 0000000..8793caf
--- /dev/null
+++ b/grammar/org/orgLexer.py
@@ -0,0 +1,61 @@
+# Generated from grammar/org/org.g4 by ANTLR 4.9.3
+from antlr4 import *
+from io import StringIO
+import sys
+if sys.version_info[1] > 5:
+    from typing import TextIO
+else:
+    from typing.io import TextIO
+
+
+
+def serializedATN():
+    with StringIO() as buf:
+        buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\4")
+        buf.write("$\b\1\4\2\t\2\4\3\t\3\3\2\6\2\t\n\2\r\2\16\2\n\3\2\3\2")
+        buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\7\3\31\n\3\f")
+        buf.write("\3\16\3\34\13\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\2\2\4\3\3")
+        buf.write("\5\4\3\2\3\5\2\13\f\16\17\"\"\2%\2\3\3\2\2\2\2\5\3\2\2")
+        buf.write("\2\3\b\3\2\2\2\5\16\3\2\2\2\7\t\t\2\2\2\b\7\3\2\2\2\t")
+        buf.write("\n\3\2\2\2\n\b\3\2\2\2\n\13\3\2\2\2\13\f\3\2\2\2\f\r\b")
+        buf.write("\2\2\2\r\4\3\2\2\2\16\17\7}\2\2\17\20\7q\2\2\20\21\7t")
+        buf.write("\2\2\21\22\7i\2\2\22\23\7<\2\2\23\24\7g\2\2\24\25\7p\2")
+        buf.write("\2\25\26\7\177\2\2\26\32\3\2\2\2\27\31\13\2\2\2\30\27")
+        buf.write("\3\2\2\2\31\34\3\2\2\2\32\30\3\2\2\2\32\33\3\2\2\2\33")
+        buf.write("\35\3\2\2\2\34\32\3\2\2\2\35\36\7}\2\2\36\37\7\61\2\2")
+        buf.write("\37 \7q\2\2 !\7t\2\2!\"\7i\2\2\"#\7\177\2\2#\6\3\2\2\2")
+        buf.write("\5\2\n\32\3\b\2\2")
+        return buf.getvalue()
+
+
+class orgLexer(Lexer):
+
+    atn = ATNDeserializer().deserialize(serializedATN())
+
+    decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
+
+    WS = 1
+    ORG = 2
+
+    channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
+
+    modeNames = [ "DEFAULT_MODE" ]
+
+    literalNames = [ "<INVALID>",
+ ]
+
+    symbolicNames = [ "<INVALID>",
+            "WS", "ORG" ]
+
+    ruleNames = [ "WS", "ORG" ]
+
+    grammarFileName = "org.g4"
+
+    def __init__(self, input=None, output:TextIO = sys.stdout):
+        super().__init__(input, output)
+        self.checkVersion("4.9.3")
+        self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
+        self._actions = None
+        self._predicates = None
+
+
diff --git a/grammar/org/orgLexer.tokens b/grammar/org/orgLexer.tokens
new file mode 100644
index 0000000..42c6425
--- /dev/null
+++ b/grammar/org/orgLexer.tokens
@@ -0,0 +1,2 @@
+WS=1
+ORG=2
diff --git a/grammar/org/orgListener.py b/grammar/org/orgListener.py
new file mode 100644
index 0000000..0a1a034
--- /dev/null
+++ b/grammar/org/orgListener.py
@@ -0,0 +1,21 @@
+# Generated from grammar/org/org.g4 by ANTLR 4.9.3
+from antlr4 import *
+if __name__ is not None and "." in __name__:
+    from .orgParser import orgParser
+else:
+    from orgParser import orgParser
+
+# This class defines a complete listener for a parse tree produced by orgParser.
+class orgListener(ParseTreeListener):
+
+    # Enter a parse tree produced by orgParser#orgPart.
+    def enterOrgPart(self, ctx:orgParser.OrgPartContext):
+        pass
+
+    # Exit a parse tree produced by orgParser#orgPart.
+    def exitOrgPart(self, ctx:orgParser.OrgPartContext):
+        pass
+
+
+
+del orgParser
\ No newline at end of file
diff --git a/grammar/org/orgParser.py b/grammar/org/orgParser.py
new file mode 100644
index 0000000..68db601
--- /dev/null
+++ b/grammar/org/orgParser.py
@@ -0,0 +1,94 @@
+# Generated from grammar/org/org.g4 by ANTLR 4.9.3
+# encoding: utf-8
+from antlr4 import *
+from io import StringIO
+import sys
+if sys.version_info[1] > 5:
+	from typing import TextIO
+else:
+	from typing.io import TextIO
+
+
+def serializedATN():
+    with StringIO() as buf:
+        buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\4")
+        buf.write("\7\4\2\t\2\3\2\3\2\3\2\2\2\3\2\2\2\2\5\2\4\3\2\2\2\4\5")
+        buf.write("\7\4\2\2\5\3\3\2\2\2\2")
+        return buf.getvalue()
+
+
+class orgParser ( Parser ):
+
+    grammarFileName = "org.g4"
+
+    atn = ATNDeserializer().deserialize(serializedATN())
+
+    decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
+
+    sharedContextCache = PredictionContextCache()
+
+    literalNames = [  ]
+
+    symbolicNames = [ "<INVALID>", "WS", "ORG" ]
+
+    RULE_orgPart = 0
+
+    ruleNames =  [ "orgPart" ]
+
+    EOF = Token.EOF
+    WS=1
+    ORG=2
+
+    def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
+        super().__init__(input, output)
+        self.checkVersion("4.9.3")
+        self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
+        self._predicates = None
+
+
+
+
+    class OrgPartContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def ORG(self):
+            return self.getToken(orgParser.ORG, 0)
+
+        def getRuleIndex(self):
+            return orgParser.RULE_orgPart
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterOrgPart" ):
+                listener.enterOrgPart(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitOrgPart" ):
+                listener.exitOrgPart(self)
+
+
+
+
+    def orgPart(self):
+
+        localctx = orgParser.OrgPartContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 0, self.RULE_orgPart)
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 2
+            self.match(orgParser.ORG)
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+
+
+
diff --git a/grammar/unl/unl.g4 b/grammar/unl/unl.g4
index dac12c8..925fb67 100644
--- a/grammar/unl/unl.g4
+++ b/grammar/unl/unl.g4
@@ -14,9 +14,56 @@ grammar unl;
 //---------------------------------------------------------
 
 unlPart
-  : UNL
+  : '{unl}' (relationOccurrence)+ '{/unl}'
   ;
 
+relationOccurrence
+  : universalRelation LP universalWord COMMA universalWord RP
+  ;
+
+universalWord
+  : headword
+    (LP restriction (COMMA restriction)* RP)?
+    (attribute)*
+  | value
+  ;
+
+headword
+  : ident
+  ;
+
+restriction
+  : universalRelation GREATER ident
+  ;
+
+attribute
+  : DOT AT ident
+  ;
+
+value
+  : VALUE
+  ;
+
+universalRelation
+  : ( AND | AOJ | BEN | CNT |
+      EQU | ICL | OBJ | QUA )
+  ;
+
+
+//---------------------------------------------------------
+// Base Element
+//---------------------------------------------------------
+
+sentence : (word | punctuation | bracket)* ;
+
+ident : word (UNDERSCORE word)* ;
+
+word : LETTER | WORD ;
+
+punctuation : DOT | COMMA | SEMCOL | COLON | DASH ;
+
+bracket : LP | RP | LC | RC ;
+
 
 //=============================================================================
 // Lexer Grammar
@@ -25,6 +72,43 @@ unlPart
 // ignore whitespaces
 WS              : (' '|'\n'|'\t'|'\r'|'\u000C')+ -> skip ;
 
+// fragments
+fragment LOWERCASE : [a-z] ;
+fragment UPPERCASE : [A-Z] ;
+fragment DIGIT  : '0'..'9' ;
+fragment ASCII  : ~('\n'|'"'|'<'|'>'|'('|')') ;
+
+// punctuation
+DOT             : '.' ;
+COMMA           : ',' ;
+SEMCOL          : ';' ;
+COLON           : ':' ;
+DASH            : '-' ;
+
+// brackets
+LP              : '(' ; // Left parenthesis
+RP              : ')' ;
+LC              : '{' ; // Left curly bracket
+RC              : '}' ;
+
+// symbols
+LESS            : '<' ;
+GREATER         : '>' ;
+AT              : '@' ;
+UNDERSCORE      : '_' ;
+
+// relations
+AND             : 'and' ;
+AOJ             : 'aoj' ;
+BEN             : 'ben' ;
+CNT             : 'cnt' ;
+EQU             : 'equ' ;
+ICL             : 'icl' ;
+OBJ             : 'obj' ;
+QUA             : 'qua' ;
+
 // other tokens
-ORG             : '{org:en}' (.)* '{/org}' ;
-UNL             : '{unl}' (.)* '{/unl}' ;
+LETTER          : LOWERCASE | UPPERCASE ;
+WORD            : (LETTER)+ ;
+VALUE           : (DIGIT)+ (DOT (DIGIT)+)? ;
+
diff --git a/grammar/unl/unl.interp b/grammar/unl/unl.interp
new file mode 100644
index 0000000..525ed69
--- /dev/null
+++ b/grammar/unl/unl.interp
@@ -0,0 +1,78 @@
+token literal names:
+null
+'{unl}'
+'{/unl}'
+null
+'.'
+','
+';'
+':'
+'-'
+'('
+')'
+'{'
+'}'
+'<'
+'>'
+'@'
+'_'
+'and'
+'aoj'
+'ben'
+'cnt'
+'equ'
+'icl'
+'obj'
+'qua'
+null
+null
+null
+
+token symbolic names:
+null
+null
+null
+WS
+DOT
+COMMA
+SEMCOL
+COLON
+DASH
+LP
+RP
+LC
+RC
+LESS
+GREATER
+AT
+UNDERSCORE
+AND
+AOJ
+BEN
+CNT
+EQU
+ICL
+OBJ
+QUA
+LETTER
+WORD
+VALUE
+
+rule names:
+unlPart
+relationOccurrence
+universalWord
+headword
+restriction
+attribute
+value
+universalRelation
+sentence
+ident
+word
+punctuation
+bracket
+
+
+atn:
+[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 29, 103, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 3, 2, 3, 2, 6, 2, 31, 10, 2, 13, 2, 14, 2, 32, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 7, 4, 49, 10, 4, 12, 4, 14, 4, 52, 11, 4, 3, 4, 3, 4, 5, 4, 56, 10, 4, 3, 4, 7, 4, 59, 10, 4, 12, 4, 14, 4, 62, 11, 4, 3, 4, 5, 4, 65, 10, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 7, 10, 84, 10, 10, 12, 10, 14, 10, 87, 11, 10, 3, 11, 3, 11, 3, 11, 7, 11, 92, 10, 11, 12, 11, 14, 11, 95, 11, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 14, 2, 2, 15, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 2, 6, 3, 2, 19, 26, 3, 2, 27, 28, 3, 2, 6, 10, 3, 2, 11, 14, 2, 98, 2, 28, 3, 2, 2, 2, 4, 36, 3, 2, 2, 2, 6, 64, 3, 2, 2, 2, 8, 66, 3, 2, 2, 2, 10, 68, 3, 2, 2, 2, 12, 72, 3, 2, 2, 2, 14, 76, 3, 2, 2, 2, 16, 78, 3, 2, 2, 2, 18, 85, 3, 2, 2, 2, 20, 88, 3, 2, 2, 2, 22, 96, 3, 2, 2, 2, 24, 98, 3, 2, 2, 2, 26, 100, 3, 2, 2, 2, 28, 30, 7, 3, 2, 2, 29, 31, 5, 4, 3, 2, 30, 29, 3, 2, 2, 2, 31, 32, 3, 2, 2, 2, 32, 30, 3, 2, 2, 2, 32, 33, 3, 2, 2, 2, 33, 34, 3, 2, 2, 2, 34, 35, 7, 4, 2, 2, 35, 3, 3, 2, 2, 2, 36, 37, 5, 16, 9, 2, 37, 38, 7, 11, 2, 2, 38, 39, 5, 6, 4, 2, 39, 40, 7, 7, 2, 2, 40, 41, 5, 6, 4, 2, 41, 42, 7, 12, 2, 2, 42, 5, 3, 2, 2, 2, 43, 55, 5, 8, 5, 2, 44, 45, 7, 11, 2, 2, 45, 50, 5, 10, 6, 2, 46, 47, 7, 7, 2, 2, 47, 49, 5, 10, 6, 2, 48, 46, 3, 2, 2, 2, 49, 52, 3, 2, 2, 2, 50, 48, 3, 2, 2, 2, 50, 51, 3, 2, 2, 2, 51, 53, 3, 2, 2, 2, 52, 50, 3, 2, 2, 2, 53, 54, 7, 12, 2, 2, 54, 56, 3, 2, 2, 2, 55, 44, 3, 2, 2, 2, 55, 56, 3, 2, 2, 2, 56, 60, 3, 2, 2, 2, 57, 59, 5, 12, 7, 2, 58, 57, 3, 2, 2, 2, 59, 62, 3, 2, 2, 2, 60, 58, 3, 2, 2, 2, 60, 61, 3, 2, 2, 2, 61, 65, 3, 2, 2, 2, 62, 60, 3, 2, 2, 2, 63, 65, 5, 14, 8, 2, 64, 43, 3, 2, 2, 2, 64, 63, 3, 2, 2, 2, 65, 7, 3, 2, 2, 2, 66, 67, 5, 20, 11, 2, 67, 9, 3, 2, 2, 2, 68, 69, 5, 16, 9, 2, 69, 70, 7, 16, 2, 2, 70, 71, 5, 20, 11, 2, 71, 11, 3, 2, 2, 2, 72, 73, 7, 6, 2, 2, 73, 74, 7, 17, 2, 2, 74, 75, 5, 20, 11, 2, 75, 13, 3, 2, 2, 2, 76, 77, 7, 29, 2, 2, 77, 15, 3, 2, 2, 2, 78, 79, 9, 2, 2, 2, 79, 17, 3, 2, 2, 2, 80, 84, 5, 22, 12, 2, 81, 84, 5, 24, 13, 2, 82, 84, 5, 26, 14, 2, 83, 80, 3, 2, 2, 2, 83, 81, 3, 2, 2, 2, 83, 82, 3, 2, 2, 2, 84, 87, 3, 2, 2, 2, 85, 83, 3, 2, 2, 2, 85, 86, 3, 2, 2, 2, 86, 19, 3, 2, 2, 2, 87, 85, 3, 2, 2, 2, 88, 93, 5, 22, 12, 2, 89, 90, 7, 18, 2, 2, 90, 92, 5, 22, 12, 2, 91, 89, 3, 2, 2, 2, 92, 95, 3, 2, 2, 2, 93, 91, 3, 2, 2, 2, 93, 94, 3, 2, 2, 2, 94, 21, 3, 2, 2, 2, 95, 93, 3, 2, 2, 2, 96, 97, 9, 3, 2, 2, 97, 23, 3, 2, 2, 2, 98, 99, 9, 4, 2, 2, 99, 25, 3, 2, 2, 2, 100, 101, 9, 5, 2, 2, 101, 27, 3, 2, 2, 2, 10, 32, 50, 55, 60, 64, 83, 85, 93]
\ No newline at end of file
diff --git a/grammar/unl/unl.tokens b/grammar/unl/unl.tokens
new file mode 100644
index 0000000..c2b43c1
--- /dev/null
+++ b/grammar/unl/unl.tokens
@@ -0,0 +1,50 @@
+T__0=1
+T__1=2
+WS=3
+DOT=4
+COMMA=5
+SEMCOL=6
+COLON=7
+DASH=8
+LP=9
+RP=10
+LC=11
+RC=12
+LESS=13
+GREATER=14
+AT=15
+UNDERSCORE=16
+AND=17
+AOJ=18
+BEN=19
+CNT=20
+EQU=21
+ICL=22
+OBJ=23
+QUA=24
+LETTER=25
+WORD=26
+VALUE=27
+'{unl}'=1
+'{/unl}'=2
+'.'=4
+','=5
+';'=6
+':'=7
+'-'=8
+'('=9
+')'=10
+'{'=11
+'}'=12
+'<'=13
+'>'=14
+'@'=15
+'_'=16
+'and'=17
+'aoj'=18
+'ben'=19
+'cnt'=20
+'equ'=21
+'icl'=22
+'obj'=23
+'qua'=24
diff --git a/grammar/unl/unlLexer.interp b/grammar/unl/unlLexer.interp
new file mode 100644
index 0000000..bab2ee3
--- /dev/null
+++ b/grammar/unl/unlLexer.interp
@@ -0,0 +1,102 @@
+token literal names:
+null
+'{unl}'
+'{/unl}'
+null
+'.'
+','
+';'
+':'
+'-'
+'('
+')'
+'{'
+'}'
+'<'
+'>'
+'@'
+'_'
+'and'
+'aoj'
+'ben'
+'cnt'
+'equ'
+'icl'
+'obj'
+'qua'
+null
+null
+null
+
+token symbolic names:
+null
+null
+null
+WS
+DOT
+COMMA
+SEMCOL
+COLON
+DASH
+LP
+RP
+LC
+RC
+LESS
+GREATER
+AT
+UNDERSCORE
+AND
+AOJ
+BEN
+CNT
+EQU
+ICL
+OBJ
+QUA
+LETTER
+WORD
+VALUE
+
+rule names:
+T__0
+T__1
+WS
+LOWERCASE
+UPPERCASE
+DIGIT
+ASCII
+DOT
+COMMA
+SEMCOL
+COLON
+DASH
+LP
+RP
+LC
+RC
+LESS
+GREATER
+AT
+UNDERSCORE
+AND
+AOJ
+BEN
+CNT
+EQU
+ICL
+OBJ
+QUA
+LETTER
+WORD
+VALUE
+
+channel names:
+DEFAULT_TOKEN_CHANNEL
+HIDDEN
+
+mode names:
+DEFAULT_MODE
+
+atn:
+[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 29, 173, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 6, 4, 80, 10, 4, 13, 4, 14, 4, 81, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 17, 3, 17, 3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 5, 30, 154, 10, 30, 3, 31, 6, 31, 157, 10, 31, 13, 31, 14, 31, 158, 3, 32, 6, 32, 162, 10, 32, 13, 32, 14, 32, 163, 3, 32, 3, 32, 6, 32, 168, 10, 32, 13, 32, 14, 32, 169, 5, 32, 172, 10, 32, 2, 2, 33, 3, 3, 5, 4, 7, 5, 9, 2, 11, 2, 13, 2, 15, 2, 17, 6, 19, 7, 21, 8, 23, 9, 25, 10, 27, 11, 29, 12, 31, 13, 33, 14, 35, 15, 37, 16, 39, 17, 41, 18, 43, 19, 45, 20, 47, 21, 49, 22, 51, 23, 53, 24, 55, 25, 57, 26, 59, 27, 61, 28, 63, 29, 3, 2, 6, 5, 2, 11, 12, 14, 15, 34, 34, 3, 2, 99, 124, 3, 2, 67, 92, 7, 2, 12, 12, 36, 36, 42, 43, 62, 62, 64, 64, 2, 174, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 3, 65, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 79, 3, 2, 2, 2, 9, 85, 3, 2, 2, 2, 11, 87, 3, 2, 2, 2, 13, 89, 3, 2, 2, 2, 15, 91, 3, 2, 2, 2, 17, 93, 3, 2, 2, 2, 19, 95, 3, 2, 2, 2, 21, 97, 3, 2, 2, 2, 23, 99, 3, 2, 2, 2, 25, 101, 3, 2, 2, 2, 27, 103, 3, 2, 2, 2, 29, 105, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 109, 3, 2, 2, 2, 35, 111, 3, 2, 2, 2, 37, 113, 3, 2, 2, 2, 39, 115, 3, 2, 2, 2, 41, 117, 3, 2, 2, 2, 43, 119, 3, 2, 2, 2, 45, 123, 3, 2, 2, 2, 47, 127, 3, 2, 2, 2, 49, 131, 3, 2, 2, 2, 51, 135, 3, 2, 2, 2, 53, 139, 3, 2, 2, 2, 55, 143, 3, 2, 2, 2, 57, 147, 3, 2, 2, 2, 59, 153, 3, 2, 2, 2, 61, 156, 3, 2, 2, 2, 63, 161, 3, 2, 2, 2, 65, 66, 7, 125, 2, 2, 66, 67, 7, 119, 2, 2, 67, 68, 7, 112, 2, 2, 68, 69, 7, 110, 2, 2, 69, 70, 7, 127, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 125, 2, 2, 72, 73, 7, 49, 2, 2, 73, 74, 7, 119, 2, 2, 74, 75, 7, 112, 2, 2, 75, 76, 7, 110, 2, 2, 76, 77, 7, 127, 2, 2, 77, 6, 3, 2, 2, 2, 78, 80, 9, 2, 2, 2, 79, 78, 3, 2, 2, 2, 80, 81, 3, 2, 2, 2, 81, 79, 3, 2, 2, 2, 81, 82, 3, 2, 2, 2, 82, 83, 3, 2, 2, 2, 83, 84, 8, 4, 2, 2, 84, 8, 3, 2, 2, 2, 85, 86, 9, 3, 2, 2, 86, 10, 3, 2, 2, 2, 87, 88, 9, 4, 2, 2, 88, 12, 3, 2, 2, 2, 89, 90, 4, 50, 59, 2, 90, 14, 3, 2, 2, 2, 91, 92, 10, 5, 2, 2, 92, 16, 3, 2, 2, 2, 93, 94, 7, 48, 2, 2, 94, 18, 3, 2, 2, 2, 95, 96, 7, 46, 2, 2, 96, 20, 3, 2, 2, 2, 97, 98, 7, 61, 2, 2, 98, 22, 3, 2, 2, 2, 99, 100, 7, 60, 2, 2, 100, 24, 3, 2, 2, 2, 101, 102, 7, 47, 2, 2, 102, 26, 3, 2, 2, 2, 103, 104, 7, 42, 2, 2, 104, 28, 3, 2, 2, 2, 105, 106, 7, 43, 2, 2, 106, 30, 3, 2, 2, 2, 107, 108, 7, 125, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 127, 2, 2, 110, 34, 3, 2, 2, 2, 111, 112, 7, 62, 2, 2, 112, 36, 3, 2, 2, 2, 113, 114, 7, 64, 2, 2, 114, 38, 3, 2, 2, 2, 115, 116, 7, 66, 2, 2, 116, 40, 3, 2, 2, 2, 117, 118, 7, 97, 2, 2, 118, 42, 3, 2, 2, 2, 119, 120, 7, 99, 2, 2, 120, 121, 7, 112, 2, 2, 121, 122, 7, 102, 2, 2, 122, 44, 3, 2, 2, 2, 123, 124, 7, 99, 2, 2, 124, 125, 7, 113, 2, 2, 125, 126, 7, 108, 2, 2, 126, 46, 3, 2, 2, 2, 127, 128, 7, 100, 2, 2, 128, 129, 7, 103, 2, 2, 129, 130, 7, 112, 2, 2, 130, 48, 3, 2, 2, 2, 131, 132, 7, 101, 2, 2, 132, 133, 7, 112, 2, 2, 133, 134, 7, 118, 2, 2, 134, 50, 3, 2, 2, 2, 135, 136, 7, 103, 2, 2, 136, 137, 7, 115, 2, 2, 137, 138, 7, 119, 2, 2, 138, 52, 3, 2, 2, 2, 139, 140, 7, 107, 2, 2, 140, 141, 7, 101, 2, 2, 141, 142, 7, 110, 2, 2, 142, 54, 3, 2, 2, 2, 143, 144, 7, 113, 2, 2, 144, 145, 7, 100, 2, 2, 145, 146, 7, 108, 2, 2, 146, 56, 3, 2, 2, 2, 147, 148, 7, 115, 2, 2, 148, 149, 7, 119, 2, 2, 149, 150, 7, 99, 2, 2, 150, 58, 3, 2, 2, 2, 151, 154, 5, 9, 5, 2, 152, 154, 5, 11, 6, 2, 153, 151, 3, 2, 2, 2, 153, 152, 3, 2, 2, 2, 154, 60, 3, 2, 2, 2, 155, 157, 5, 59, 30, 2, 156, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 156, 3, 2, 2, 2, 158, 159, 3, 2, 2, 2, 159, 62, 3, 2, 2, 2, 160, 162, 5, 13, 7, 2, 161, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 161, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, 171, 3, 2, 2, 2, 165, 167, 5, 17, 9, 2, 166, 168, 5, 13, 7, 2, 167, 166, 3, 2, 2, 2, 168, 169, 3, 2, 2, 2, 169, 167, 3, 2, 2, 2, 169, 170, 3, 2, 2, 2, 170, 172, 3, 2, 2, 2, 171, 165, 3, 2, 2, 2, 171, 172, 3, 2, 2, 2, 172, 64, 3, 2, 2, 2, 9, 2, 81, 153, 158, 163, 169, 171, 3, 8, 2, 2]
\ No newline at end of file
diff --git a/grammar/unl/unlLexer.py b/grammar/unl/unlLexer.py
new file mode 100644
index 0000000..58e84e7
--- /dev/null
+++ b/grammar/unl/unlLexer.py
@@ -0,0 +1,145 @@
+# Generated from grammar/unl/unl.g4 by ANTLR 4.9.3
+from antlr4 import *
+from io import StringIO
+import sys
+if sys.version_info[1] > 5:
+    from typing import TextIO
+else:
+    from typing.io import TextIO
+
+
+
+def serializedATN():
+    with StringIO() as buf:
+        buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\35")
+        buf.write("\u00ad\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
+        buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
+        buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
+        buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
+        buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
+        buf.write("\t\36\4\37\t\37\4 \t \3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3")
+        buf.write("\3\3\3\3\3\3\3\3\3\3\3\4\6\4P\n\4\r\4\16\4Q\3\4\3\4\3")
+        buf.write("\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3")
+        buf.write("\13\3\f\3\f\3\r\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\21")
+        buf.write("\3\21\3\22\3\22\3\23\3\23\3\24\3\24\3\25\3\25\3\26\3\26")
+        buf.write("\3\26\3\26\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\31")
+        buf.write("\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33")
+        buf.write("\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\36\3\36\5\36")
+        buf.write("\u009a\n\36\3\37\6\37\u009d\n\37\r\37\16\37\u009e\3 \6")
+        buf.write(" \u00a2\n \r \16 \u00a3\3 \3 \6 \u00a8\n \r \16 \u00a9")
+        buf.write("\5 \u00ac\n \2\2!\3\3\5\4\7\5\t\2\13\2\r\2\17\2\21\6\23")
+        buf.write("\7\25\b\27\t\31\n\33\13\35\f\37\r!\16#\17%\20\'\21)\22")
+        buf.write("+\23-\24/\25\61\26\63\27\65\30\67\319\32;\33=\34?\35\3")
+        buf.write("\2\6\5\2\13\f\16\17\"\"\3\2c|\3\2C\\\7\2\f\f$$*+>>@@\2")
+        buf.write("\u00ae\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\21\3\2\2")
+        buf.write("\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2")
+        buf.write("\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#")
+        buf.write("\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2")
+        buf.write("\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65")
+        buf.write("\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2")
+        buf.write("\2?\3\2\2\2\3A\3\2\2\2\5G\3\2\2\2\7O\3\2\2\2\tU\3\2\2")
+        buf.write("\2\13W\3\2\2\2\rY\3\2\2\2\17[\3\2\2\2\21]\3\2\2\2\23_")
+        buf.write("\3\2\2\2\25a\3\2\2\2\27c\3\2\2\2\31e\3\2\2\2\33g\3\2\2")
+        buf.write("\2\35i\3\2\2\2\37k\3\2\2\2!m\3\2\2\2#o\3\2\2\2%q\3\2\2")
+        buf.write("\2\'s\3\2\2\2)u\3\2\2\2+w\3\2\2\2-{\3\2\2\2/\177\3\2\2")
+        buf.write("\2\61\u0083\3\2\2\2\63\u0087\3\2\2\2\65\u008b\3\2\2\2")
+        buf.write("\67\u008f\3\2\2\29\u0093\3\2\2\2;\u0099\3\2\2\2=\u009c")
+        buf.write("\3\2\2\2?\u00a1\3\2\2\2AB\7}\2\2BC\7w\2\2CD\7p\2\2DE\7")
+        buf.write("n\2\2EF\7\177\2\2F\4\3\2\2\2GH\7}\2\2HI\7\61\2\2IJ\7w")
+        buf.write("\2\2JK\7p\2\2KL\7n\2\2LM\7\177\2\2M\6\3\2\2\2NP\t\2\2")
+        buf.write("\2ON\3\2\2\2PQ\3\2\2\2QO\3\2\2\2QR\3\2\2\2RS\3\2\2\2S")
+        buf.write("T\b\4\2\2T\b\3\2\2\2UV\t\3\2\2V\n\3\2\2\2WX\t\4\2\2X\f")
+        buf.write("\3\2\2\2YZ\4\62;\2Z\16\3\2\2\2[\\\n\5\2\2\\\20\3\2\2\2")
+        buf.write("]^\7\60\2\2^\22\3\2\2\2_`\7.\2\2`\24\3\2\2\2ab\7=\2\2")
+        buf.write("b\26\3\2\2\2cd\7<\2\2d\30\3\2\2\2ef\7/\2\2f\32\3\2\2\2")
+        buf.write("gh\7*\2\2h\34\3\2\2\2ij\7+\2\2j\36\3\2\2\2kl\7}\2\2l ")
+        buf.write("\3\2\2\2mn\7\177\2\2n\"\3\2\2\2op\7>\2\2p$\3\2\2\2qr\7")
+        buf.write("@\2\2r&\3\2\2\2st\7B\2\2t(\3\2\2\2uv\7a\2\2v*\3\2\2\2")
+        buf.write("wx\7c\2\2xy\7p\2\2yz\7f\2\2z,\3\2\2\2{|\7c\2\2|}\7q\2")
+        buf.write("\2}~\7l\2\2~.\3\2\2\2\177\u0080\7d\2\2\u0080\u0081\7g")
+        buf.write("\2\2\u0081\u0082\7p\2\2\u0082\60\3\2\2\2\u0083\u0084\7")
+        buf.write("e\2\2\u0084\u0085\7p\2\2\u0085\u0086\7v\2\2\u0086\62\3")
+        buf.write("\2\2\2\u0087\u0088\7g\2\2\u0088\u0089\7s\2\2\u0089\u008a")
+        buf.write("\7w\2\2\u008a\64\3\2\2\2\u008b\u008c\7k\2\2\u008c\u008d")
+        buf.write("\7e\2\2\u008d\u008e\7n\2\2\u008e\66\3\2\2\2\u008f\u0090")
+        buf.write("\7q\2\2\u0090\u0091\7d\2\2\u0091\u0092\7l\2\2\u00928\3")
+        buf.write("\2\2\2\u0093\u0094\7s\2\2\u0094\u0095\7w\2\2\u0095\u0096")
+        buf.write("\7c\2\2\u0096:\3\2\2\2\u0097\u009a\5\t\5\2\u0098\u009a")
+        buf.write("\5\13\6\2\u0099\u0097\3\2\2\2\u0099\u0098\3\2\2\2\u009a")
+        buf.write("<\3\2\2\2\u009b\u009d\5;\36\2\u009c\u009b\3\2\2\2\u009d")
+        buf.write("\u009e\3\2\2\2\u009e\u009c\3\2\2\2\u009e\u009f\3\2\2\2")
+        buf.write("\u009f>\3\2\2\2\u00a0\u00a2\5\r\7\2\u00a1\u00a0\3\2\2")
+        buf.write("\2\u00a2\u00a3\3\2\2\2\u00a3\u00a1\3\2\2\2\u00a3\u00a4")
+        buf.write("\3\2\2\2\u00a4\u00ab\3\2\2\2\u00a5\u00a7\5\21\t\2\u00a6")
+        buf.write("\u00a8\5\r\7\2\u00a7\u00a6\3\2\2\2\u00a8\u00a9\3\2\2\2")
+        buf.write("\u00a9\u00a7\3\2\2\2\u00a9\u00aa\3\2\2\2\u00aa\u00ac\3")
+        buf.write("\2\2\2\u00ab\u00a5\3\2\2\2\u00ab\u00ac\3\2\2\2\u00ac@")
+        buf.write("\3\2\2\2\t\2Q\u0099\u009e\u00a3\u00a9\u00ab\3\b\2\2")
+        return buf.getvalue()
+
+
+class unlLexer(Lexer):
+
+    atn = ATNDeserializer().deserialize(serializedATN())
+
+    decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
+
+    T__0 = 1
+    T__1 = 2
+    WS = 3
+    DOT = 4
+    COMMA = 5
+    SEMCOL = 6
+    COLON = 7
+    DASH = 8
+    LP = 9
+    RP = 10
+    LC = 11
+    RC = 12
+    LESS = 13
+    GREATER = 14
+    AT = 15
+    UNDERSCORE = 16
+    AND = 17
+    AOJ = 18
+    BEN = 19
+    CNT = 20
+    EQU = 21
+    ICL = 22
+    OBJ = 23
+    QUA = 24
+    LETTER = 25
+    WORD = 26
+    VALUE = 27
+
+    channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
+
+    modeNames = [ "DEFAULT_MODE" ]
+
+    literalNames = [ "<INVALID>",
+            "'{unl}'", "'{/unl}'", "'.'", "','", "';'", "':'", "'-'", "'('", 
+            "')'", "'{'", "'}'", "'<'", "'>'", "'@'", "'_'", "'and'", "'aoj'", 
+            "'ben'", "'cnt'", "'equ'", "'icl'", "'obj'", "'qua'" ]
+
+    symbolicNames = [ "<INVALID>",
+            "WS", "DOT", "COMMA", "SEMCOL", "COLON", "DASH", "LP", "RP", 
+            "LC", "RC", "LESS", "GREATER", "AT", "UNDERSCORE", "AND", "AOJ", 
+            "BEN", "CNT", "EQU", "ICL", "OBJ", "QUA", "LETTER", "WORD", 
+            "VALUE" ]
+
+    ruleNames = [ "T__0", "T__1", "WS", "LOWERCASE", "UPPERCASE", "DIGIT", 
+                  "ASCII", "DOT", "COMMA", "SEMCOL", "COLON", "DASH", "LP", 
+                  "RP", "LC", "RC", "LESS", "GREATER", "AT", "UNDERSCORE", 
+                  "AND", "AOJ", "BEN", "CNT", "EQU", "ICL", "OBJ", "QUA", 
+                  "LETTER", "WORD", "VALUE" ]
+
+    grammarFileName = "unl.g4"
+
+    def __init__(self, input=None, output:TextIO = sys.stdout):
+        super().__init__(input, output)
+        self.checkVersion("4.9.3")
+        self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
+        self._actions = None
+        self._predicates = None
+
+
diff --git a/grammar/unl/unlLexer.tokens b/grammar/unl/unlLexer.tokens
new file mode 100644
index 0000000..c2b43c1
--- /dev/null
+++ b/grammar/unl/unlLexer.tokens
@@ -0,0 +1,50 @@
+T__0=1
+T__1=2
+WS=3
+DOT=4
+COMMA=5
+SEMCOL=6
+COLON=7
+DASH=8
+LP=9
+RP=10
+LC=11
+RC=12
+LESS=13
+GREATER=14
+AT=15
+UNDERSCORE=16
+AND=17
+AOJ=18
+BEN=19
+CNT=20
+EQU=21
+ICL=22
+OBJ=23
+QUA=24
+LETTER=25
+WORD=26
+VALUE=27
+'{unl}'=1
+'{/unl}'=2
+'.'=4
+','=5
+';'=6
+':'=7
+'-'=8
+'('=9
+')'=10
+'{'=11
+'}'=12
+'<'=13
+'>'=14
+'@'=15
+'_'=16
+'and'=17
+'aoj'=18
+'ben'=19
+'cnt'=20
+'equ'=21
+'icl'=22
+'obj'=23
+'qua'=24
diff --git a/grammar/unl/unlListener.py b/grammar/unl/unlListener.py
new file mode 100644
index 0000000..d107a9c
--- /dev/null
+++ b/grammar/unl/unlListener.py
@@ -0,0 +1,129 @@
+# Generated from grammar/unl/unl.g4 by ANTLR 4.9.3
+from antlr4 import *
+if __name__ is not None and "." in __name__:
+    from .unlParser import unlParser
+else:
+    from unlParser import unlParser
+
+# This class defines a complete listener for a parse tree produced by unlParser.
+class unlListener(ParseTreeListener):
+
+    # Enter a parse tree produced by unlParser#unlPart.
+    def enterUnlPart(self, ctx:unlParser.UnlPartContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#unlPart.
+    def exitUnlPart(self, ctx:unlParser.UnlPartContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#relationOccurrence.
+    def enterRelationOccurrence(self, ctx:unlParser.RelationOccurrenceContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#relationOccurrence.
+    def exitRelationOccurrence(self, ctx:unlParser.RelationOccurrenceContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#universalWord.
+    def enterUniversalWord(self, ctx:unlParser.UniversalWordContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#universalWord.
+    def exitUniversalWord(self, ctx:unlParser.UniversalWordContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#headword.
+    def enterHeadword(self, ctx:unlParser.HeadwordContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#headword.
+    def exitHeadword(self, ctx:unlParser.HeadwordContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#restriction.
+    def enterRestriction(self, ctx:unlParser.RestrictionContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#restriction.
+    def exitRestriction(self, ctx:unlParser.RestrictionContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#attribute.
+    def enterAttribute(self, ctx:unlParser.AttributeContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#attribute.
+    def exitAttribute(self, ctx:unlParser.AttributeContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#value.
+    def enterValue(self, ctx:unlParser.ValueContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#value.
+    def exitValue(self, ctx:unlParser.ValueContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#universalRelation.
+    def enterUniversalRelation(self, ctx:unlParser.UniversalRelationContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#universalRelation.
+    def exitUniversalRelation(self, ctx:unlParser.UniversalRelationContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#sentence.
+    def enterSentence(self, ctx:unlParser.SentenceContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#sentence.
+    def exitSentence(self, ctx:unlParser.SentenceContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#ident.
+    def enterIdent(self, ctx:unlParser.IdentContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#ident.
+    def exitIdent(self, ctx:unlParser.IdentContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#word.
+    def enterWord(self, ctx:unlParser.WordContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#word.
+    def exitWord(self, ctx:unlParser.WordContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#punctuation.
+    def enterPunctuation(self, ctx:unlParser.PunctuationContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#punctuation.
+    def exitPunctuation(self, ctx:unlParser.PunctuationContext):
+        pass
+
+
+    # Enter a parse tree produced by unlParser#bracket.
+    def enterBracket(self, ctx:unlParser.BracketContext):
+        pass
+
+    # Exit a parse tree produced by unlParser#bracket.
+    def exitBracket(self, ctx:unlParser.BracketContext):
+        pass
+
+
+
+del unlParser
\ No newline at end of file
diff --git a/grammar/unl/unlParser.py b/grammar/unl/unlParser.py
new file mode 100644
index 0000000..6609289
--- /dev/null
+++ b/grammar/unl/unlParser.py
@@ -0,0 +1,945 @@
+# Generated from grammar/unl/unl.g4 by ANTLR 4.9.3
+# encoding: utf-8
+from antlr4 import *
+from io import StringIO
+import sys
+if sys.version_info[1] > 5:
+	from typing import TextIO
+else:
+	from typing.io import TextIO
+
+
+def serializedATN():
+    with StringIO() as buf:
+        buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\35")
+        buf.write("g\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b")
+        buf.write("\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t")
+        buf.write("\16\3\2\3\2\6\2\37\n\2\r\2\16\2 \3\2\3\2\3\3\3\3\3\3\3")
+        buf.write("\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\7\4\61\n\4\f\4\16\4")
+        buf.write("\64\13\4\3\4\3\4\5\48\n\4\3\4\7\4;\n\4\f\4\16\4>\13\4")
+        buf.write("\3\4\5\4A\n\4\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7")
+        buf.write("\3\b\3\b\3\t\3\t\3\n\3\n\3\n\7\nT\n\n\f\n\16\nW\13\n\3")
+        buf.write("\13\3\13\3\13\7\13\\\n\13\f\13\16\13_\13\13\3\f\3\f\3")
+        buf.write("\r\3\r\3\16\3\16\3\16\2\2\17\2\4\6\b\n\f\16\20\22\24\26")
+        buf.write("\30\32\2\6\3\2\23\32\3\2\33\34\3\2\6\n\3\2\13\16\2b\2")
+        buf.write("\34\3\2\2\2\4$\3\2\2\2\6@\3\2\2\2\bB\3\2\2\2\nD\3\2\2")
+        buf.write("\2\fH\3\2\2\2\16L\3\2\2\2\20N\3\2\2\2\22U\3\2\2\2\24X")
+        buf.write("\3\2\2\2\26`\3\2\2\2\30b\3\2\2\2\32d\3\2\2\2\34\36\7\3")
+        buf.write("\2\2\35\37\5\4\3\2\36\35\3\2\2\2\37 \3\2\2\2 \36\3\2\2")
+        buf.write("\2 !\3\2\2\2!\"\3\2\2\2\"#\7\4\2\2#\3\3\2\2\2$%\5\20\t")
+        buf.write("\2%&\7\13\2\2&\'\5\6\4\2\'(\7\7\2\2()\5\6\4\2)*\7\f\2")
+        buf.write("\2*\5\3\2\2\2+\67\5\b\5\2,-\7\13\2\2-\62\5\n\6\2./\7\7")
+        buf.write("\2\2/\61\5\n\6\2\60.\3\2\2\2\61\64\3\2\2\2\62\60\3\2\2")
+        buf.write("\2\62\63\3\2\2\2\63\65\3\2\2\2\64\62\3\2\2\2\65\66\7\f")
+        buf.write("\2\2\668\3\2\2\2\67,\3\2\2\2\678\3\2\2\28<\3\2\2\29;\5")
+        buf.write("\f\7\2:9\3\2\2\2;>\3\2\2\2<:\3\2\2\2<=\3\2\2\2=A\3\2\2")
+        buf.write("\2><\3\2\2\2?A\5\16\b\2@+\3\2\2\2@?\3\2\2\2A\7\3\2\2\2")
+        buf.write("BC\5\24\13\2C\t\3\2\2\2DE\5\20\t\2EF\7\20\2\2FG\5\24\13")
+        buf.write("\2G\13\3\2\2\2HI\7\6\2\2IJ\7\21\2\2JK\5\24\13\2K\r\3\2")
+        buf.write("\2\2LM\7\35\2\2M\17\3\2\2\2NO\t\2\2\2O\21\3\2\2\2PT\5")
+        buf.write("\26\f\2QT\5\30\r\2RT\5\32\16\2SP\3\2\2\2SQ\3\2\2\2SR\3")
+        buf.write("\2\2\2TW\3\2\2\2US\3\2\2\2UV\3\2\2\2V\23\3\2\2\2WU\3\2")
+        buf.write("\2\2X]\5\26\f\2YZ\7\22\2\2Z\\\5\26\f\2[Y\3\2\2\2\\_\3")
+        buf.write("\2\2\2][\3\2\2\2]^\3\2\2\2^\25\3\2\2\2_]\3\2\2\2`a\t\3")
+        buf.write("\2\2a\27\3\2\2\2bc\t\4\2\2c\31\3\2\2\2de\t\5\2\2e\33\3")
+        buf.write("\2\2\2\n \62\67<@SU]")
+        return buf.getvalue()
+
+
+class unlParser ( Parser ):
+
+    grammarFileName = "unl.g4"
+
+    atn = ATNDeserializer().deserialize(serializedATN())
+
+    decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
+
+    sharedContextCache = PredictionContextCache()
+
+    literalNames = [ "<INVALID>", "'{unl}'", "'{/unl}'", "<INVALID>", "'.'", 
+                     "','", "';'", "':'", "'-'", "'('", "')'", "'{'", "'}'", 
+                     "'<'", "'>'", "'@'", "'_'", "'and'", "'aoj'", "'ben'", 
+                     "'cnt'", "'equ'", "'icl'", "'obj'", "'qua'" ]
+
+    symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "WS", "DOT", 
+                      "COMMA", "SEMCOL", "COLON", "DASH", "LP", "RP", "LC", 
+                      "RC", "LESS", "GREATER", "AT", "UNDERSCORE", "AND", 
+                      "AOJ", "BEN", "CNT", "EQU", "ICL", "OBJ", "QUA", "LETTER", 
+                      "WORD", "VALUE" ]
+
+    RULE_unlPart = 0
+    RULE_relationOccurrence = 1
+    RULE_universalWord = 2
+    RULE_headword = 3
+    RULE_restriction = 4
+    RULE_attribute = 5
+    RULE_value = 6
+    RULE_universalRelation = 7
+    RULE_sentence = 8
+    RULE_ident = 9
+    RULE_word = 10
+    RULE_punctuation = 11
+    RULE_bracket = 12
+
+    ruleNames =  [ "unlPart", "relationOccurrence", "universalWord", "headword", 
+                   "restriction", "attribute", "value", "universalRelation", 
+                   "sentence", "ident", "word", "punctuation", "bracket" ]
+
+    EOF = Token.EOF
+    T__0=1
+    T__1=2
+    WS=3
+    DOT=4
+    COMMA=5
+    SEMCOL=6
+    COLON=7
+    DASH=8
+    LP=9
+    RP=10
+    LC=11
+    RC=12
+    LESS=13
+    GREATER=14
+    AT=15
+    UNDERSCORE=16
+    AND=17
+    AOJ=18
+    BEN=19
+    CNT=20
+    EQU=21
+    ICL=22
+    OBJ=23
+    QUA=24
+    LETTER=25
+    WORD=26
+    VALUE=27
+
+    def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
+        super().__init__(input, output)
+        self.checkVersion("4.9.3")
+        self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
+        self._predicates = None
+
+
+
+
+    class UnlPartContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def relationOccurrence(self, i:int=None):
+            if i is None:
+                return self.getTypedRuleContexts(unlParser.RelationOccurrenceContext)
+            else:
+                return self.getTypedRuleContext(unlParser.RelationOccurrenceContext,i)
+
+
+        def getRuleIndex(self):
+            return unlParser.RULE_unlPart
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterUnlPart" ):
+                listener.enterUnlPart(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitUnlPart" ):
+                listener.exitUnlPart(self)
+
+
+
+
+    def unlPart(self):
+
+        localctx = unlParser.UnlPartContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 0, self.RULE_unlPart)
+        self._la = 0 # Token type
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 26
+            self.match(unlParser.T__0)
+            self.state = 28 
+            self._errHandler.sync(self)
+            _la = self._input.LA(1)
+            while True:
+                self.state = 27
+                self.relationOccurrence()
+                self.state = 30 
+                self._errHandler.sync(self)
+                _la = self._input.LA(1)
+                if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << unlParser.AND) | (1 << unlParser.AOJ) | (1 << unlParser.BEN) | (1 << unlParser.CNT) | (1 << unlParser.EQU) | (1 << unlParser.ICL) | (1 << unlParser.OBJ) | (1 << unlParser.QUA))) != 0)):
+                    break
+
+            self.state = 32
+            self.match(unlParser.T__1)
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class RelationOccurrenceContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def universalRelation(self):
+            return self.getTypedRuleContext(unlParser.UniversalRelationContext,0)
+
+
+        def LP(self):
+            return self.getToken(unlParser.LP, 0)
+
+        def universalWord(self, i:int=None):
+            if i is None:
+                return self.getTypedRuleContexts(unlParser.UniversalWordContext)
+            else:
+                return self.getTypedRuleContext(unlParser.UniversalWordContext,i)
+
+
+        def COMMA(self):
+            return self.getToken(unlParser.COMMA, 0)
+
+        def RP(self):
+            return self.getToken(unlParser.RP, 0)
+
+        def getRuleIndex(self):
+            return unlParser.RULE_relationOccurrence
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterRelationOccurrence" ):
+                listener.enterRelationOccurrence(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitRelationOccurrence" ):
+                listener.exitRelationOccurrence(self)
+
+
+
+
+    def relationOccurrence(self):
+
+        localctx = unlParser.RelationOccurrenceContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 2, self.RULE_relationOccurrence)
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 34
+            self.universalRelation()
+            self.state = 35
+            self.match(unlParser.LP)
+            self.state = 36
+            self.universalWord()
+            self.state = 37
+            self.match(unlParser.COMMA)
+            self.state = 38
+            self.universalWord()
+            self.state = 39
+            self.match(unlParser.RP)
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class UniversalWordContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def headword(self):
+            return self.getTypedRuleContext(unlParser.HeadwordContext,0)
+
+
+        def LP(self):
+            return self.getToken(unlParser.LP, 0)
+
+        def restriction(self, i:int=None):
+            if i is None:
+                return self.getTypedRuleContexts(unlParser.RestrictionContext)
+            else:
+                return self.getTypedRuleContext(unlParser.RestrictionContext,i)
+
+
+        def RP(self):
+            return self.getToken(unlParser.RP, 0)
+
+        def attribute(self, i:int=None):
+            if i is None:
+                return self.getTypedRuleContexts(unlParser.AttributeContext)
+            else:
+                return self.getTypedRuleContext(unlParser.AttributeContext,i)
+
+
+        def COMMA(self, i:int=None):
+            if i is None:
+                return self.getTokens(unlParser.COMMA)
+            else:
+                return self.getToken(unlParser.COMMA, i)
+
+        def value(self):
+            return self.getTypedRuleContext(unlParser.ValueContext,0)
+
+
+        def getRuleIndex(self):
+            return unlParser.RULE_universalWord
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterUniversalWord" ):
+                listener.enterUniversalWord(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitUniversalWord" ):
+                listener.exitUniversalWord(self)
+
+
+
+
+    def universalWord(self):
+
+        localctx = unlParser.UniversalWordContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 4, self.RULE_universalWord)
+        self._la = 0 # Token type
+        try:
+            self.state = 62
+            self._errHandler.sync(self)
+            token = self._input.LA(1)
+            if token in [unlParser.LETTER, unlParser.WORD]:
+                self.enterOuterAlt(localctx, 1)
+                self.state = 41
+                self.headword()
+                self.state = 53
+                self._errHandler.sync(self)
+                _la = self._input.LA(1)
+                if _la==unlParser.LP:
+                    self.state = 42
+                    self.match(unlParser.LP)
+                    self.state = 43
+                    self.restriction()
+                    self.state = 48
+                    self._errHandler.sync(self)
+                    _la = self._input.LA(1)
+                    while _la==unlParser.COMMA:
+                        self.state = 44
+                        self.match(unlParser.COMMA)
+                        self.state = 45
+                        self.restriction()
+                        self.state = 50
+                        self._errHandler.sync(self)
+                        _la = self._input.LA(1)
+
+                    self.state = 51
+                    self.match(unlParser.RP)
+
+
+                self.state = 58
+                self._errHandler.sync(self)
+                _la = self._input.LA(1)
+                while _la==unlParser.DOT:
+                    self.state = 55
+                    self.attribute()
+                    self.state = 60
+                    self._errHandler.sync(self)
+                    _la = self._input.LA(1)
+
+                pass
+            elif token in [unlParser.VALUE]:
+                self.enterOuterAlt(localctx, 2)
+                self.state = 61
+                self.value()
+                pass
+            else:
+                raise NoViableAltException(self)
+
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class HeadwordContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def ident(self):
+            return self.getTypedRuleContext(unlParser.IdentContext,0)
+
+
+        def getRuleIndex(self):
+            return unlParser.RULE_headword
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterHeadword" ):
+                listener.enterHeadword(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitHeadword" ):
+                listener.exitHeadword(self)
+
+
+
+
+    def headword(self):
+
+        localctx = unlParser.HeadwordContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 6, self.RULE_headword)
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 64
+            self.ident()
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class RestrictionContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def universalRelation(self):
+            return self.getTypedRuleContext(unlParser.UniversalRelationContext,0)
+
+
+        def GREATER(self):
+            return self.getToken(unlParser.GREATER, 0)
+
+        def ident(self):
+            return self.getTypedRuleContext(unlParser.IdentContext,0)
+
+
+        def getRuleIndex(self):
+            return unlParser.RULE_restriction
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterRestriction" ):
+                listener.enterRestriction(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitRestriction" ):
+                listener.exitRestriction(self)
+
+
+
+
+    def restriction(self):
+
+        localctx = unlParser.RestrictionContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 8, self.RULE_restriction)
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 66
+            self.universalRelation()
+            self.state = 67
+            self.match(unlParser.GREATER)
+            self.state = 68
+            self.ident()
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class AttributeContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def DOT(self):
+            return self.getToken(unlParser.DOT, 0)
+
+        def AT(self):
+            return self.getToken(unlParser.AT, 0)
+
+        def ident(self):
+            return self.getTypedRuleContext(unlParser.IdentContext,0)
+
+
+        def getRuleIndex(self):
+            return unlParser.RULE_attribute
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterAttribute" ):
+                listener.enterAttribute(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitAttribute" ):
+                listener.exitAttribute(self)
+
+
+
+
+    def attribute(self):
+
+        localctx = unlParser.AttributeContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 10, self.RULE_attribute)
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 70
+            self.match(unlParser.DOT)
+            self.state = 71
+            self.match(unlParser.AT)
+            self.state = 72
+            self.ident()
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class ValueContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def VALUE(self):
+            return self.getToken(unlParser.VALUE, 0)
+
+        def getRuleIndex(self):
+            return unlParser.RULE_value
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterValue" ):
+                listener.enterValue(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitValue" ):
+                listener.exitValue(self)
+
+
+
+
+    def value(self):
+
+        localctx = unlParser.ValueContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 12, self.RULE_value)
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 74
+            self.match(unlParser.VALUE)
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class UniversalRelationContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def AND(self):
+            return self.getToken(unlParser.AND, 0)
+
+        def AOJ(self):
+            return self.getToken(unlParser.AOJ, 0)
+
+        def BEN(self):
+            return self.getToken(unlParser.BEN, 0)
+
+        def CNT(self):
+            return self.getToken(unlParser.CNT, 0)
+
+        def EQU(self):
+            return self.getToken(unlParser.EQU, 0)
+
+        def ICL(self):
+            return self.getToken(unlParser.ICL, 0)
+
+        def OBJ(self):
+            return self.getToken(unlParser.OBJ, 0)
+
+        def QUA(self):
+            return self.getToken(unlParser.QUA, 0)
+
+        def getRuleIndex(self):
+            return unlParser.RULE_universalRelation
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterUniversalRelation" ):
+                listener.enterUniversalRelation(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitUniversalRelation" ):
+                listener.exitUniversalRelation(self)
+
+
+
+
+    def universalRelation(self):
+
+        localctx = unlParser.UniversalRelationContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 14, self.RULE_universalRelation)
+        self._la = 0 # Token type
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 76
+            _la = self._input.LA(1)
+            if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << unlParser.AND) | (1 << unlParser.AOJ) | (1 << unlParser.BEN) | (1 << unlParser.CNT) | (1 << unlParser.EQU) | (1 << unlParser.ICL) | (1 << unlParser.OBJ) | (1 << unlParser.QUA))) != 0)):
+                self._errHandler.recoverInline(self)
+            else:
+                self._errHandler.reportMatch(self)
+                self.consume()
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class SentenceContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def word(self, i:int=None):
+            if i is None:
+                return self.getTypedRuleContexts(unlParser.WordContext)
+            else:
+                return self.getTypedRuleContext(unlParser.WordContext,i)
+
+
+        def punctuation(self, i:int=None):
+            if i is None:
+                return self.getTypedRuleContexts(unlParser.PunctuationContext)
+            else:
+                return self.getTypedRuleContext(unlParser.PunctuationContext,i)
+
+
+        def bracket(self, i:int=None):
+            if i is None:
+                return self.getTypedRuleContexts(unlParser.BracketContext)
+            else:
+                return self.getTypedRuleContext(unlParser.BracketContext,i)
+
+
+        def getRuleIndex(self):
+            return unlParser.RULE_sentence
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterSentence" ):
+                listener.enterSentence(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitSentence" ):
+                listener.exitSentence(self)
+
+
+
+
+    def sentence(self):
+
+        localctx = unlParser.SentenceContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 16, self.RULE_sentence)
+        self._la = 0 # Token type
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 83
+            self._errHandler.sync(self)
+            _la = self._input.LA(1)
+            while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << unlParser.DOT) | (1 << unlParser.COMMA) | (1 << unlParser.SEMCOL) | (1 << unlParser.COLON) | (1 << unlParser.DASH) | (1 << unlParser.LP) | (1 << unlParser.RP) | (1 << unlParser.LC) | (1 << unlParser.RC) | (1 << unlParser.LETTER) | (1 << unlParser.WORD))) != 0):
+                self.state = 81
+                self._errHandler.sync(self)
+                token = self._input.LA(1)
+                if token in [unlParser.LETTER, unlParser.WORD]:
+                    self.state = 78
+                    self.word()
+                    pass
+                elif token in [unlParser.DOT, unlParser.COMMA, unlParser.SEMCOL, unlParser.COLON, unlParser.DASH]:
+                    self.state = 79
+                    self.punctuation()
+                    pass
+                elif token in [unlParser.LP, unlParser.RP, unlParser.LC, unlParser.RC]:
+                    self.state = 80
+                    self.bracket()
+                    pass
+                else:
+                    raise NoViableAltException(self)
+
+                self.state = 85
+                self._errHandler.sync(self)
+                _la = self._input.LA(1)
+
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class IdentContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def word(self, i:int=None):
+            if i is None:
+                return self.getTypedRuleContexts(unlParser.WordContext)
+            else:
+                return self.getTypedRuleContext(unlParser.WordContext,i)
+
+
+        def UNDERSCORE(self, i:int=None):
+            if i is None:
+                return self.getTokens(unlParser.UNDERSCORE)
+            else:
+                return self.getToken(unlParser.UNDERSCORE, i)
+
+        def getRuleIndex(self):
+            return unlParser.RULE_ident
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterIdent" ):
+                listener.enterIdent(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitIdent" ):
+                listener.exitIdent(self)
+
+
+
+
+    def ident(self):
+
+        localctx = unlParser.IdentContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 18, self.RULE_ident)
+        self._la = 0 # Token type
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 86
+            self.word()
+            self.state = 91
+            self._errHandler.sync(self)
+            _la = self._input.LA(1)
+            while _la==unlParser.UNDERSCORE:
+                self.state = 87
+                self.match(unlParser.UNDERSCORE)
+                self.state = 88
+                self.word()
+                self.state = 93
+                self._errHandler.sync(self)
+                _la = self._input.LA(1)
+
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class WordContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def LETTER(self):
+            return self.getToken(unlParser.LETTER, 0)
+
+        def WORD(self):
+            return self.getToken(unlParser.WORD, 0)
+
+        def getRuleIndex(self):
+            return unlParser.RULE_word
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterWord" ):
+                listener.enterWord(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitWord" ):
+                listener.exitWord(self)
+
+
+
+
+    def word(self):
+
+        localctx = unlParser.WordContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 20, self.RULE_word)
+        self._la = 0 # Token type
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 94
+            _la = self._input.LA(1)
+            if not(_la==unlParser.LETTER or _la==unlParser.WORD):
+                self._errHandler.recoverInline(self)
+            else:
+                self._errHandler.reportMatch(self)
+                self.consume()
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class PunctuationContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def DOT(self):
+            return self.getToken(unlParser.DOT, 0)
+
+        def COMMA(self):
+            return self.getToken(unlParser.COMMA, 0)
+
+        def SEMCOL(self):
+            return self.getToken(unlParser.SEMCOL, 0)
+
+        def COLON(self):
+            return self.getToken(unlParser.COLON, 0)
+
+        def DASH(self):
+            return self.getToken(unlParser.DASH, 0)
+
+        def getRuleIndex(self):
+            return unlParser.RULE_punctuation
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterPunctuation" ):
+                listener.enterPunctuation(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitPunctuation" ):
+                listener.exitPunctuation(self)
+
+
+
+
+    def punctuation(self):
+
+        localctx = unlParser.PunctuationContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 22, self.RULE_punctuation)
+        self._la = 0 # Token type
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 96
+            _la = self._input.LA(1)
+            if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << unlParser.DOT) | (1 << unlParser.COMMA) | (1 << unlParser.SEMCOL) | (1 << unlParser.COLON) | (1 << unlParser.DASH))) != 0)):
+                self._errHandler.recoverInline(self)
+            else:
+                self._errHandler.reportMatch(self)
+                self.consume()
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+    class BracketContext(ParserRuleContext):
+        __slots__ = 'parser'
+
+        def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
+            super().__init__(parent, invokingState)
+            self.parser = parser
+
+        def LP(self):
+            return self.getToken(unlParser.LP, 0)
+
+        def RP(self):
+            return self.getToken(unlParser.RP, 0)
+
+        def LC(self):
+            return self.getToken(unlParser.LC, 0)
+
+        def RC(self):
+            return self.getToken(unlParser.RC, 0)
+
+        def getRuleIndex(self):
+            return unlParser.RULE_bracket
+
+        def enterRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "enterBracket" ):
+                listener.enterBracket(self)
+
+        def exitRule(self, listener:ParseTreeListener):
+            if hasattr( listener, "exitBracket" ):
+                listener.exitBracket(self)
+
+
+
+
+    def bracket(self):
+
+        localctx = unlParser.BracketContext(self, self._ctx, self.state)
+        self.enterRule(localctx, 24, self.RULE_bracket)
+        self._la = 0 # Token type
+        try:
+            self.enterOuterAlt(localctx, 1)
+            self.state = 98
+            _la = self._input.LA(1)
+            if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << unlParser.LP) | (1 << unlParser.RP) | (1 << unlParser.LC) | (1 << unlParser.RC))) != 0)):
+                self._errHandler.recoverInline(self)
+            else:
+                self._errHandler.reportMatch(self)
+                self.consume()
+        except RecognitionException as re:
+            localctx.exception = re
+            self._errHandler.reportError(self, re)
+            self._errHandler.recover(self, re)
+        finally:
+            self.exitRule()
+        return localctx
+
+
+
+
+
diff --git a/input/r1.txt b/input/r1.txt
new file mode 100644
index 0000000..d1728b7
--- /dev/null
+++ b/input/r1.txt
@@ -0,0 +1,18 @@
+[D]
+[S:R1]
+{org:en}
+The system allows a radio channel to take on two states: Listening and Traffic.
+{/org}
+{unl}
+aoj( allow(icl>be, aoj>thing, ben>thing, obj>uw, equ>make_possible).@entry, system(icl>group).@def )
+obj( allow(icl>be, aoj>thing, ben>thing, obj>uw, equ>make_possible).@entry, take_on(aoj>thing, equ>assume,icl>change, obj>thing) )
+ben( allow(icl>be, aoj>thing, ben>thing, obj>uw, equ>make_possible).@entry, channel(icl>radiowave).@indef)
+aoj( take_on(aoj>thing, equ>assume, icl>change, obj>thing), channel(icl>radiowave).@indef )
+obj( take_on(aoj>thing, equ>assume, icl>change, obj>thing), state(icl>attribute).@plu )
+qua( state(icl>attribute).@plu, 2 )
+cnt( state(icl>attribute).@plu, listening(icl>sensing) )
+and( listening(icl>sensing),traffic(icl>communication) )
+{/unl}
+[/S]
+[/D]
+
diff --git a/parse.py b/parse.py
index fa573f0..174ba84 100644
--- a/parse.py
+++ b/parse.py
@@ -85,11 +85,35 @@ def parse_document(input):
     
 
 def parse_org(input):
-    pass 
+    
+    # -- Create python lexer and parser
+    create_lexer_parser_with_antlr(org_grammar)
+    
+    # -- Import Lexer/Parser (after creation by ANTLR4)
+    from grammar.org.orgLexer import orgLexer
+    from grammar.org.orgParser import orgParser
+    
+    # -- Parse UNL part
+    parser = instantiate_lexer_parser(input, orgLexer, orgParser)
+    print("--- Parse origin sentence")
+    tree = parser.orgPart()
+    print("----- resulting tree:\n" + tree.toStringTree(recog=parser))   
     
 
 def parse_unl(input):
-    pass  
+    
+    # -- Create python lexer and parser
+    create_lexer_parser_with_antlr(unl_grammar)
+    
+    # -- Import Lexer/Parser (after creation by ANTLR4)
+    from grammar.unl.unlLexer import unlLexer
+    from grammar.unl.unlParser import unlParser
+    
+    # -- Parse UNL part
+    parser = instantiate_lexer_parser(input, unlLexer, unlParser)
+    print("--- Parse UNL representation")
+    tree = parser.unlPart()
+    print("----- resulting tree:\n" + tree.toStringTree(recog=parser))  
     
      
 #==============================================================================
@@ -109,6 +133,14 @@ def main(argv):
     unl_part = document.sentence.unl_part.to_string()
     print("----- org_part:\n" + org_part)
     print("----- unl_part:\n" + unl_part)
+    
+    # -- ORG Parsing (Sentence Original Part)
+    print("-- ORG Parsing (Origin Sentence) ")
+    parse_org(InputStream(org_part)) 
+    
+    # -- UNL Parsing (Sentence UNL Part)
+    print("-- UNL Parsing (UNL Representation) ")
+    parse_unl(InputStream(unl_part)) 
 
 
 if __name__ == '__main__':
-- 
GitLab