"
else:
s = "<" + str(t.type) + ">"
s = s.replace("\n","\\n")
s = s.replace("\r","\\r")
s = s.replace("\t","\\t")
return "'" + s + "'"
def getErrorListenerDispatch(self):
return ProxyErrorListener(self._listeners)
# subclass needs to override these if there are sempreds or actions
# that the ATN interp needs to execute
def sempred(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
return True
def precpred(self, localctx:RuleContext , precedence:int):
return True
@property
def state(self):
return self._stateNumber
# Indicate that the recognizer has changed internal state that is
# consistent with the ATN state passed in. This way we always know
# where we are in the ATN as the parser goes along. The rule
# context objects form a stack that lets us see the stack of
# invoking rules. Combine this and we have complete ATN
# configuration information.
@state.setter
def state(self, atnState:int):
self._stateNumber = atnState
del RecognitionException
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/RuleContext.py 0000644 0000766 0000000 00000017643 00000000000 023120 0 ustar 00parrt wheel 0000000 0000000 # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# A rule context is a record of a single rule invocation. It knows
# which context invoked it, if any. If there is no parent context, then
# naturally the invoking state is not valid. The parent link
# provides a chain upwards from the current rule invocation to the root
# of the invocation tree, forming a stack. We actually carry no
# information about the rule associated with this context (except
# when parsing). We keep only the state number of the invoking state from
# the ATN submachine that invoked this. Contrast this with the s
# pointer inside ParserRuleContext that tracks the current state
# being "executed" for the current rule.
#
# The parent contexts are useful for computing lookahead sets and
# getting error information.
#
# These objects are used during parsing and prediction.
# For the special case of parsers, we use the subclass
# ParserRuleContext.
#
# @see ParserRuleContext
#/
from io import StringIO
from antlr4.tree.Tree import RuleNode, INVALID_INTERVAL, ParseTreeVisitor
from antlr4.tree.Trees import Trees
# need forward declarations
RuleContext = None
Parser = None
class RuleContext(RuleNode):
__slots__ = ('parentCtx', 'invokingState')
EMPTY = None
def __init__(self, parent:RuleContext=None, invokingState:int=-1):
super().__init__()
# What context invoked this rule?
self.parentCtx = parent
# What state invoked the rule associated with this context?
# The "return address" is the followState of invokingState
# If parent is null, this should be -1.
self.invokingState = invokingState
def depth(self):
n = 0
p = self
while p is not None:
p = p.parentCtx
n += 1
return n
# A context is empty if there is no invoking state; meaning nobody call
# current context.
def isEmpty(self):
return self.invokingState == -1
# satisfy the ParseTree / SyntaxTree interface
def getSourceInterval(self):
return INVALID_INTERVAL
def getRuleContext(self):
return self
def getPayload(self):
return self
# Return the combined text of all child nodes. This method only considers
# tokens which have been added to the parse tree.
#
# Since tokens on hidden channels (e.g. whitespace or comments) are not
# added to the parse trees, they will not appear in the output of this
# method.
#/
def getText(self):
if self.getChildCount() == 0:
return ""
with StringIO() as builder:
for child in self.getChildren():
builder.write(child.getText())
return builder.getvalue()
def getRuleIndex(self):
return -1
# For rule associated with this parse tree internal node, return
# the outer alternative number used to match the input. Default
# implementation does not compute nor store this alt num. Create
# a subclass of ParserRuleContext with backing field and set
# option contextSuperClass.
# to set it.
def getAltNumber(self):
return 0 # should use ATN.INVALID_ALT_NUMBER but won't compile
# Set the outer alternative number for this context node. Default
# implementation does nothing to avoid backing field overhead for
# trees that don't need it. Create
# a subclass of ParserRuleContext with backing field and set
# option contextSuperClass.
def setAltNumber(self, altNumber:int):
pass
def getChild(self, i:int):
return None
def getChildCount(self):
return 0
def getChildren(self):
for c in []:
yield c
def accept(self, visitor:ParseTreeVisitor):
return visitor.visitChildren(self)
# # Call this method to view a parse tree in a dialog box visually.#/
# public Future inspect(@Nullable Parser parser) {
# List ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
# return inspect(ruleNames);
# }
#
# public Future inspect(@Nullable List ruleNames) {
# TreeViewer viewer = new TreeViewer(ruleNames, this);
# return viewer.open();
# }
#
# # Save this tree in a postscript file#/
# public void save(@Nullable Parser parser, String fileName)
# throws IOException, PrintException
# {
# List ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
# save(ruleNames, fileName);
# }
#
# # Save this tree in a postscript file using a particular font name and size#/
# public void save(@Nullable Parser parser, String fileName,
# String fontName, int fontSize)
# throws IOException
# {
# List ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
# save(ruleNames, fileName, fontName, fontSize);
# }
#
# # Save this tree in a postscript file#/
# public void save(@Nullable List ruleNames, String fileName)
# throws IOException, PrintException
# {
# Trees.writePS(this, ruleNames, fileName);
# }
#
# # Save this tree in a postscript file using a particular font name and size#/
# public void save(@Nullable List ruleNames, String fileName,
# String fontName, int fontSize)
# throws IOException
# {
# Trees.writePS(this, ruleNames, fileName, fontName, fontSize);
# }
#
# # Print out a whole tree, not just a node, in LISP format
# # (root child1 .. childN). Print just a node if this is a leaf.
# # We have to know the recognizer so we can get rule names.
# #/
# @Override
# public String toStringTree(@Nullable Parser recog) {
# return Trees.toStringTree(this, recog);
# }
#
# Print out a whole tree, not just a node, in LISP format
# (root child1 .. childN). Print just a node if this is a leaf.
#
def toStringTree(self, ruleNames:list=None, recog:Parser=None):
return Trees.toStringTree(self, ruleNames=ruleNames, recog=recog)
# }
#
# @Override
# public String toStringTree() {
# return toStringTree((List)null);
# }
#
def __str__(self):
return self.toString(None, None)
# @Override
# public String toString() {
# return toString((List)null, (RuleContext)null);
# }
#
# public final String toString(@Nullable Recognizer,?> recog) {
# return toString(recog, ParserRuleContext.EMPTY);
# }
#
# public final String toString(@Nullable List ruleNames) {
# return toString(ruleNames, null);
# }
#
# // recog null unless ParserRuleContext, in which case we use subclass toString(...)
# public String toString(@Nullable Recognizer,?> recog, @Nullable RuleContext stop) {
# String[] ruleNames = recog != null ? recog.getRuleNames() : null;
# List ruleNamesList = ruleNames != null ? Arrays.asList(ruleNames) : null;
# return toString(ruleNamesList, stop);
# }
def toString(self, ruleNames:list, stop:RuleContext)->str:
with StringIO() as buf:
p = self
buf.write("[")
while p is not None and p is not stop:
if ruleNames is None:
if not p.isEmpty():
buf.write(str(p.invokingState))
else:
ri = p.getRuleIndex()
ruleName = ruleNames[ri] if ri >= 0 and ri < len(ruleNames) else str(ri)
buf.write(ruleName)
if p.parentCtx is not None and (ruleNames is not None or not p.parentCtx.isEmpty()):
buf.write(" ")
p = p.parentCtx
buf.write("]")
return buf.getvalue()
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/StdinStream.py 0000644 0000766 0000000 00000000457 00000000000 023074 0 ustar 00parrt wheel 0000000 0000000 import codecs
import sys
from antlr4.InputStream import InputStream
class StdinStream(InputStream):
def __init__(self, encoding:str='ascii', errors:str='strict') -> None:
bytes = sys.stdin.buffer.read()
data = codecs.decode(bytes, encoding, errors)
super().__init__(data)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/Token.py 0000644 0000766 0000000 00000012126 00000000000 021713 0 ustar 00parrt wheel 0000000 0000000 # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
# A token has properties: text, type, line, character position in the line
# (so we can ignore tabs), token channel, index, and source from which
# we obtained this token.
from io import StringIO
class Token (object):
__slots__ = ('source', 'type', 'channel', 'start', 'stop', 'tokenIndex', 'line', 'column', '_text')
INVALID_TYPE = 0
# During lookahead operations, this "token" signifies we hit rule end ATN state
# and did not follow it despite needing to.
EPSILON = -2
MIN_USER_TOKEN_TYPE = 1
EOF = -1
# All tokens go to the parser (unless skip() is called in that rule)
# on a particular "channel". The parser tunes to a particular channel
# so that whitespace etc... can go to the parser on a "hidden" channel.
DEFAULT_CHANNEL = 0
# Anything on different channel than DEFAULT_CHANNEL is not parsed
# by parser.
HIDDEN_CHANNEL = 1
def __init__(self):
self.source = None
self.type = None # token type of the token
self.channel = None # The parser ignores everything not on DEFAULT_CHANNEL
self.start = None # optional; return -1 if not implemented.
self.stop = None # optional; return -1 if not implemented.
self.tokenIndex = None # from 0..n-1 of the token object in the input stream
self.line = None # line=1..n of the 1st character
self.column = None # beginning of the line at which it occurs, 0..n-1
self._text = None # text of the token.
@property
def text(self):
return self._text
# Explicitly set the text for this token. If {code text} is not
# {@code null}, then {@link #getText} will return this value rather than
# extracting the text from the input.
#
# @param text The explicit text of the token, or {@code null} if the text
# should be obtained from the input along with the start and stop indexes
# of the token.
@text.setter
def text(self, text:str):
self._text = text
def getTokenSource(self):
return self.source[0]
def getInputStream(self):
return self.source[1]
class CommonToken(Token):
# An empty {@link Pair} which is used as the default value of
# {@link #source} for tokens that do not have a source.
EMPTY_SOURCE = (None, None)
def __init__(self, source:tuple = EMPTY_SOURCE, type:int = None, channel:int=Token.DEFAULT_CHANNEL, start:int=-1, stop:int=-1):
super().__init__()
self.source = source
self.type = type
self.channel = channel
self.start = start
self.stop = stop
self.tokenIndex = -1
if source[0] is not None:
self.line = source[0].line
self.column = source[0].column
else:
self.column = -1
# Constructs a new {@link CommonToken} as a copy of another {@link Token}.
#
#
# If {@code oldToken} is also a {@link CommonToken} instance, the newly
# constructed token will share a reference to the {@link #text} field and
# the {@link Pair} stored in {@link #source}. Otherwise, {@link #text} will
# be assigned the result of calling {@link #getText}, and {@link #source}
# will be constructed from the result of {@link Token#getTokenSource} and
# {@link Token#getInputStream}.
#
# @param oldToken The token to copy.
#
def clone(self):
t = CommonToken(self.source, self.type, self.channel, self.start, self.stop)
t.tokenIndex = self.tokenIndex
t.line = self.line
t.column = self.column
t.text = self.text
return t
@property
def text(self):
if self._text is not None:
return self._text
input = self.getInputStream()
if input is None:
return None
n = input.size
if self.start < n and self.stop < n:
return input.getText(self.start, self.stop)
else:
return ""
@text.setter
def text(self, text:str):
self._text = text
def __str__(self):
with StringIO() as buf:
buf.write("[@")
buf.write(str(self.tokenIndex))
buf.write(",")
buf.write(str(self.start))
buf.write(":")
buf.write(str(self.stop))
buf.write("='")
txt = self.text
if txt is not None:
txt = txt.replace("\n","\\n")
txt = txt.replace("\r","\\r")
txt = txt.replace("\t","\\t")
else:
txt = ""
buf.write(txt)
buf.write("',<")
buf.write(str(self.type))
buf.write(">")
if self.channel > 0:
buf.write(",channel=")
buf.write(str(self.channel))
buf.write(",")
buf.write(str(self.line))
buf.write(":")
buf.write(str(self.column))
buf.write("]")
return buf.getvalue()
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/TokenStreamRewriter.py 0000644 0000766 0000000 00000024124 00000000000 024614 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
from io import StringIO
from antlr4.Token import Token
from antlr4.CommonTokenStream import CommonTokenStream
class TokenStreamRewriter(object):
__slots__ = ('tokens', 'programs', 'lastRewriteTokenIndexes')
DEFAULT_PROGRAM_NAME = "default"
PROGRAM_INIT_SIZE = 100
MIN_TOKEN_INDEX = 0
def __init__(self, tokens):
"""
:type tokens: antlr4.BufferedTokenStream.BufferedTokenStream
:param tokens:
:return:
"""
super(TokenStreamRewriter, self).__init__()
self.tokens = tokens
self.programs = {self.DEFAULT_PROGRAM_NAME: []}
self.lastRewriteTokenIndexes = {}
def getTokenStream(self):
return self.tokens
def rollback(self, instruction_index, program_name):
ins = self.programs.get(program_name, None)
if ins:
self.programs[program_name] = ins[self.MIN_TOKEN_INDEX: instruction_index]
def deleteProgram(self, program_name=DEFAULT_PROGRAM_NAME):
self.rollback(self.MIN_TOKEN_INDEX, program_name)
def insertAfterToken(self, token, text, program_name=DEFAULT_PROGRAM_NAME):
self.insertAfter(token.tokenIndex, text, program_name)
def insertAfter(self, index, text, program_name=DEFAULT_PROGRAM_NAME):
op = self.InsertAfterOp(self.tokens, index + 1, text)
rewrites = self.getProgram(program_name)
op.instructionIndex = len(rewrites)
rewrites.append(op)
def insertBeforeIndex(self, index, text):
self.insertBefore(self.DEFAULT_PROGRAM_NAME, index, text)
def insertBeforeToken(self, token, text, program_name=DEFAULT_PROGRAM_NAME):
self.insertBefore(program_name, token.tokenIndex, text)
def insertBefore(self, program_name, index, text):
op = self.InsertBeforeOp(self.tokens, index, text)
rewrites = self.getProgram(program_name)
op.instructionIndex = len(rewrites)
rewrites.append(op)
def replaceIndex(self, index, text):
self.replace(self.DEFAULT_PROGRAM_NAME, index, index, text)
def replaceRange(self, from_idx, to_idx, text):
self.replace(self.DEFAULT_PROGRAM_NAME, from_idx, to_idx, text)
def replaceSingleToken(self, token, text):
self.replace(self.DEFAULT_PROGRAM_NAME, token.tokenIndex, token.tokenIndex, text)
def replaceRangeTokens(self, from_token, to_token, text, program_name=DEFAULT_PROGRAM_NAME):
self.replace(program_name, from_token.tokenIndex, to_token.tokenIndex, text)
def replace(self, program_name, from_idx, to_idx, text):
if any((from_idx > to_idx, from_idx < 0, to_idx < 0, to_idx >= len(self.tokens.tokens))):
raise ValueError(
'replace: range invalid: {}..{}(size={})'.format(from_idx, to_idx, len(self.tokens.tokens)))
op = self.ReplaceOp(from_idx, to_idx, self.tokens, text)
rewrites = self.getProgram(program_name)
op.instructionIndex = len(rewrites)
rewrites.append(op)
def deleteToken(self, token):
self.delete(self.DEFAULT_PROGRAM_NAME, token, token)
def deleteIndex(self, index):
self.delete(self.DEFAULT_PROGRAM_NAME, index, index)
def delete(self, program_name, from_idx, to_idx):
if isinstance(from_idx, Token):
self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, "")
else:
self.replace(program_name, from_idx, to_idx, "")
def lastRewriteTokenIndex(self, program_name=DEFAULT_PROGRAM_NAME):
return self.lastRewriteTokenIndexes.get(program_name, -1)
def setLastRewriteTokenIndex(self, program_name, i):
self.lastRewriteTokenIndexes[program_name] = i
def getProgram(self, program_name):
return self.programs.setdefault(program_name, [])
def getDefaultText(self):
return self.getText(self.DEFAULT_PROGRAM_NAME, 0, len(self.tokens.tokens) - 1)
def getText(self, program_name, start:int, stop:int):
"""
:return: the text in tokens[start, stop](closed interval)
"""
rewrites = self.programs.get(program_name)
# ensure start/end are in range
if stop > len(self.tokens.tokens) - 1:
stop = len(self.tokens.tokens) - 1
if start < 0:
start = 0
# if no instructions to execute
if not rewrites: return self.tokens.getText(start, stop)
buf = StringIO()
indexToOp = self._reduceToSingleOperationPerIndex(rewrites)
i = start
while all((i <= stop, i < len(self.tokens.tokens))):
op = indexToOp.pop(i, None)
token = self.tokens.get(i)
if op is None:
if token.type != Token.EOF: buf.write(token.text)
i += 1
else:
i = op.execute(buf)
if stop == len(self.tokens.tokens)-1:
for op in indexToOp.values():
if op.index >= len(self.tokens.tokens)-1: buf.write(op.text)
return buf.getvalue()
def _reduceToSingleOperationPerIndex(self, rewrites):
# Walk replaces
for i, rop in enumerate(rewrites):
if any((rop is None, not isinstance(rop, TokenStreamRewriter.ReplaceOp))):
continue
# Wipe prior inserts within range
inserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)]
for iop in inserts:
if iop.index == rop.index:
rewrites[iop.instructionIndex] = None
rop.text = '{}{}'.format(iop.text, rop.text)
elif all((iop.index > rop.index, iop.index <= rop.last_index)):
rewrites[iop.instructionIndex] = None
# Drop any prior replaces contained within
prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)]
for prevRop in prevReplaces:
if all((prevRop.index >= rop.index, prevRop.last_index <= rop.last_index)):
rewrites[prevRop.instructionIndex] = None
continue
isDisjoint = any((prevRop.last_indexrop.last_index))
if all((prevRop.text is None, rop.text is None, not isDisjoint)):
rewrites[prevRop.instructionIndex] = None
rop.index = min(prevRop.index, rop.index)
rop.last_index = min(prevRop.last_index, rop.last_index)
print('New rop {}'.format(rop))
elif (not(isDisjoint)):
raise ValueError("replace op boundaries of {} overlap with previous {}".format(rop, prevRop))
# Walk inserts
for i, iop in enumerate(rewrites):
if any((iop is None, not isinstance(iop, TokenStreamRewriter.InsertBeforeOp))):
continue
prevInserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)]
for prev_index, prevIop in enumerate(prevInserts):
if prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertBeforeOp:
iop.text += prevIop.text
rewrites[prev_index] = None
elif prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertAfterOp:
iop.text = prevIop.text + iop.text
rewrites[prev_index] = None
# look for replaces where iop.index is in range; error
prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)]
for rop in prevReplaces:
if iop.index == rop.index:
rop.text = iop.text + rop.text
rewrites[i] = None
continue
if all((iop.index >= rop.index, iop.index <= rop.last_index)):
raise ValueError("insert op {} within boundaries of previous {}".format(iop, rop))
reduced = {}
for i, op in enumerate(rewrites):
if op is None: continue
if reduced.get(op.index): raise ValueError('should be only one op per index')
reduced[op.index] = op
return reduced
class RewriteOperation(object):
__slots__ = ('tokens', 'index', 'text', 'instructionIndex')
def __init__(self, tokens, index, text=""):
"""
:type tokens: CommonTokenStream
:param tokens:
:param index:
:param text:
:return:
"""
self.tokens = tokens
self.index = index
self.text = text
self.instructionIndex = 0
def execute(self, buf):
"""
:type buf: StringIO.StringIO
:param buf:
:return:
"""
return self.index
def __str__(self):
return '<{}@{}:"{}">'.format(self.__class__.__name__, self.tokens.get(self.index), self.text)
class InsertBeforeOp(RewriteOperation):
def __init__(self, tokens, index, text=""):
super(TokenStreamRewriter.InsertBeforeOp, self).__init__(tokens, index, text)
def execute(self, buf):
buf.write(self.text)
if self.tokens.get(self.index).type != Token.EOF:
buf.write(self.tokens.get(self.index).text)
return self.index + 1
class InsertAfterOp(InsertBeforeOp):
pass
class ReplaceOp(RewriteOperation):
__slots__ = 'last_index'
def __init__(self, from_idx, to_idx, tokens, text):
super(TokenStreamRewriter.ReplaceOp, self).__init__(tokens, from_idx, text)
self.last_index = to_idx
def execute(self, buf):
if self.text:
buf.write(self.text)
return self.last_index + 1
def __str__(self):
if self.text:
return ''.format(self.tokens.get(self.index), self.tokens.get(self.last_index),
self.text)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/Utils.py 0000644 0000766 0000000 00000001643 00000000000 021735 0 ustar 00parrt wheel 0000000 0000000 # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
from io import StringIO
def str_list(val):
with StringIO() as buf:
buf.write('[')
first = True
for item in val:
if not first:
buf.write(', ')
buf.write(str(item))
first = False
buf.write(']')
return buf.getvalue()
def escapeWhitespace(s:str, escapeSpaces:bool):
with StringIO() as buf:
for c in s:
if c==' ' and escapeSpaces:
buf.write('\u00B7')
elif c=='\t':
buf.write("\\t")
elif c=='\n':
buf.write("\\n")
elif c=='\r':
buf.write("\\r")
else:
buf.write(c)
return buf.getvalue()
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/__init__.py 0000644 0000766 0000000 00000002145 00000000000 022372 0 ustar 00parrt wheel 0000000 0000000 from antlr4.Token import Token
from antlr4.InputStream import InputStream
from antlr4.FileStream import FileStream
from antlr4.StdinStream import StdinStream
from antlr4.BufferedTokenStream import TokenStream
from antlr4.CommonTokenStream import CommonTokenStream
from antlr4.Lexer import Lexer
from antlr4.Parser import Parser
from antlr4.dfa.DFA import DFA
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNDeserializer import ATNDeserializer
from antlr4.atn.LexerATNSimulator import LexerATNSimulator
from antlr4.atn.ParserATNSimulator import ParserATNSimulator
from antlr4.atn.PredictionMode import PredictionMode
from antlr4.PredictionContext import PredictionContextCache
from antlr4.ParserRuleContext import RuleContext, ParserRuleContext
from antlr4.tree.Tree import ParseTreeListener, ParseTreeVisitor, ParseTreeWalker, TerminalNode, ErrorNode, RuleNode
from antlr4.error.Errors import RecognitionException, IllegalStateException, NoViableAltException
from antlr4.error.ErrorStrategy import BailErrorStrategy
from antlr4.error.DiagnosticErrorListener import DiagnosticErrorListener
from antlr4.Utils import str_list
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1609880862.5301232
antlr4-python3-runtime-4.9.1/src/antlr4/atn/ 0000755 0000766 0000000 00000000000 00000000000 021041 5 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/ATN.py 0000644 0000766 0000000 00000013235 00000000000 022041 0 ustar 00parrt wheel 0000000 0000000 # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
from antlr4.IntervalSet import IntervalSet
from antlr4.RuleContext import RuleContext
from antlr4.Token import Token
from antlr4.atn.ATNType import ATNType
from antlr4.atn.ATNState import ATNState, DecisionState
class ATN(object):
__slots__ = (
'grammarType', 'maxTokenType', 'states', 'decisionToState',
'ruleToStartState', 'ruleToStopState', 'modeNameToStartState',
'ruleToTokenType', 'lexerActions', 'modeToStartState'
)
INVALID_ALT_NUMBER = 0
# Used for runtime deserialization of ATNs from strings#/
def __init__(self, grammarType:ATNType , maxTokenType:int ):
# The type of the ATN.
self.grammarType = grammarType
# The maximum value for any symbol recognized by a transition in the ATN.
self.maxTokenType = maxTokenType
self.states = []
# Each subrule/rule is a decision point and we must track them so we
# can go back later and build DFA predictors for them. This includes
# all the rules, subrules, optional blocks, ()+, ()* etc...
self.decisionToState = []
# Maps from rule index to starting state number.
self.ruleToStartState = []
# Maps from rule index to stop state number.
self.ruleToStopState = None
self.modeNameToStartState = dict()
# For lexer ATNs, this maps the rule index to the resulting token type.
# For parser ATNs, this maps the rule index to the generated bypass token
# type if the
# {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions}
# deserialization option was specified; otherwise, this is {@code null}.
self.ruleToTokenType = None
# For lexer ATNs, this is an array of {@link LexerAction} objects which may
# be referenced by action transitions in the ATN.
self.lexerActions = None
self.modeToStartState = []
# Compute the set of valid tokens that can occur starting in state {@code s}.
# If {@code ctx} is null, the set of tokens will not include what can follow
# the rule surrounding {@code s}. In other words, the set will be
# restricted to tokens reachable staying within {@code s}'s rule.
def nextTokensInContext(self, s:ATNState, ctx:RuleContext):
from antlr4.LL1Analyzer import LL1Analyzer
anal = LL1Analyzer(self)
return anal.LOOK(s, ctx=ctx)
# Compute the set of valid tokens that can occur starting in {@code s} and
# staying in same rule. {@link Token#EPSILON} is in set if we reach end of
# rule.
def nextTokensNoContext(self, s:ATNState):
if s.nextTokenWithinRule is not None:
return s.nextTokenWithinRule
s.nextTokenWithinRule = self.nextTokensInContext(s, None)
s.nextTokenWithinRule.readOnly = True
return s.nextTokenWithinRule
def nextTokens(self, s:ATNState, ctx:RuleContext = None):
if ctx==None:
return self.nextTokensNoContext(s)
else:
return self.nextTokensInContext(s, ctx)
def addState(self, state:ATNState):
if state is not None:
state.atn = self
state.stateNumber = len(self.states)
self.states.append(state)
def removeState(self, state:ATNState):
self.states[state.stateNumber] = None # just free mem, don't shift states in list
def defineDecisionState(self, s:DecisionState):
self.decisionToState.append(s)
s.decision = len(self.decisionToState)-1
return s.decision
def getDecisionState(self, decision:int):
if len(self.decisionToState)==0:
return None
else:
return self.decisionToState[decision]
# Computes the set of input symbols which could follow ATN state number
# {@code stateNumber} in the specified full {@code context}. This method
# considers the complete parser context, but does not evaluate semantic
# predicates (i.e. all predicates encountered during the calculation are
# assumed true). If a path in the ATN exists from the starting state to the
# {@link RuleStopState} of the outermost context without matching any
# symbols, {@link Token#EOF} is added to the returned set.
#
# If {@code context} is {@code null}, it is treated as
# {@link ParserRuleContext#EMPTY}.
#
# @param stateNumber the ATN state number
# @param context the full parse context
# @return The set of potentially valid input symbols which could follow the
# specified state in the specified context.
# @throws IllegalArgumentException if the ATN does not contain a state with
# number {@code stateNumber}
#/
def getExpectedTokens(self, stateNumber:int, ctx:RuleContext ):
if stateNumber < 0 or stateNumber >= len(self.states):
raise Exception("Invalid state number.")
s = self.states[stateNumber]
following = self.nextTokens(s)
if Token.EPSILON not in following:
return following
expected = IntervalSet()
expected.addSet(following)
expected.removeOne(Token.EPSILON)
while (ctx != None and ctx.invokingState >= 0 and Token.EPSILON in following):
invokingState = self.states[ctx.invokingState]
rt = invokingState.transitions[0]
following = self.nextTokens(rt.followState)
expected.addSet(following)
expected.removeOne(Token.EPSILON)
ctx = ctx.parentCtx
if Token.EPSILON in following:
expected.addOne(Token.EOF)
return expected
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/ATNConfig.py 0000644 0000766 0000000 00000014645 00000000000 023175 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# A tuple: (ATN state, predicted alt, syntactic, semantic context).
# The syntactic context is a graph-structured stack node whose
# path(s) to the root is the rule invocation(s)
# chain used to arrive at the state. The semantic context is
# the tree of semantic predicates encountered before reaching
# an ATN state.
#/
from io import StringIO
from antlr4.PredictionContext import PredictionContext
from antlr4.atn.ATNState import ATNState, DecisionState
from antlr4.atn.LexerActionExecutor import LexerActionExecutor
from antlr4.atn.SemanticContext import SemanticContext
# need a forward declaration
ATNConfig = None
class ATNConfig(object):
__slots__ = (
'state', 'alt', 'context', 'semanticContext', 'reachesIntoOuterContext',
'precedenceFilterSuppressed'
)
def __init__(self, state:ATNState=None, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=None, config:ATNConfig=None):
if config is not None:
if state is None:
state = config.state
if alt is None:
alt = config.alt
if context is None:
context = config.context
if semantic is None:
semantic = config.semanticContext
if semantic is None:
semantic = SemanticContext.NONE
# The ATN state associated with this configuration#/
self.state = state
# What alt (or lexer rule) is predicted by this configuration#/
self.alt = alt
# The stack of invoking states leading to the rule/states associated
# with this config. We track only those contexts pushed during
# execution of the ATN simulator.
self.context = context
self.semanticContext = semantic
# We cannot execute predicates dependent upon local context unless
# we know for sure we are in the correct context. Because there is
# no way to do this efficiently, we simply cannot evaluate
# dependent predicates unless we are in the rule that initially
# invokes the ATN simulator.
#
# closure() tracks the depth of how far we dip into the
# outer context: depth > 0. Note that it may not be totally
# accurate depth since I don't ever decrement. TODO: make it a boolean then
self.reachesIntoOuterContext = 0 if config is None else config.reachesIntoOuterContext
self.precedenceFilterSuppressed = False if config is None else config.precedenceFilterSuppressed
# An ATN configuration is equal to another if both have
# the same state, they predict the same alternative, and
# syntactic/semantic contexts are the same.
#/
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, ATNConfig):
return False
else:
return self.state.stateNumber==other.state.stateNumber \
and self.alt==other.alt \
and ((self.context is other.context) or (self.context==other.context)) \
and self.semanticContext==other.semanticContext \
and self.precedenceFilterSuppressed==other.precedenceFilterSuppressed
def __hash__(self):
return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext))
def hashCodeForConfigSet(self):
return hash((self.state.stateNumber, self.alt, hash(self.semanticContext)))
def equalsForConfigSet(self, other):
if self is other:
return True
elif not isinstance(other, ATNConfig):
return False
else:
return self.state.stateNumber==other.state.stateNumber \
and self.alt==other.alt \
and self.semanticContext==other.semanticContext
def __str__(self):
with StringIO() as buf:
buf.write('(')
buf.write(str(self.state))
buf.write(",")
buf.write(str(self.alt))
if self.context is not None:
buf.write(",[")
buf.write(str(self.context))
buf.write("]")
if self.semanticContext is not None and self.semanticContext is not SemanticContext.NONE:
buf.write(",")
buf.write(str(self.semanticContext))
if self.reachesIntoOuterContext>0:
buf.write(",up=")
buf.write(str(self.reachesIntoOuterContext))
buf.write(')')
return buf.getvalue()
# need a forward declaration
LexerATNConfig = None
class LexerATNConfig(ATNConfig):
__slots__ = ('lexerActionExecutor', 'passedThroughNonGreedyDecision')
def __init__(self, state:ATNState, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=SemanticContext.NONE,
lexerActionExecutor:LexerActionExecutor=None, config:LexerATNConfig=None):
super().__init__(state=state, alt=alt, context=context, semantic=semantic, config=config)
if config is not None:
if lexerActionExecutor is None:
lexerActionExecutor = config.lexerActionExecutor
# This is the backing field for {@link #getLexerActionExecutor}.
self.lexerActionExecutor = lexerActionExecutor
self.passedThroughNonGreedyDecision = False if config is None else self.checkNonGreedyDecision(config, state)
def __hash__(self):
return hash((self.state.stateNumber, self.alt, self.context,
self.semanticContext, self.passedThroughNonGreedyDecision,
self.lexerActionExecutor))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerATNConfig):
return False
if self.passedThroughNonGreedyDecision != other.passedThroughNonGreedyDecision:
return False
if not(self.lexerActionExecutor == other.lexerActionExecutor):
return False
return super().__eq__(other)
def hashCodeForConfigSet(self):
return hash(self)
def equalsForConfigSet(self, other):
return self==other
def checkNonGreedyDecision(self, source:LexerATNConfig, target:ATNState):
return source.passedThroughNonGreedyDecision \
or isinstance(target, DecisionState) and target.nonGreedy
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/ATNConfigSet.py 0000644 0000766 0000000 00000020170 00000000000 023637 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
# Specialized {@link Set}{@code <}{@link ATNConfig}{@code >} that can track
# info about the set, with support for combining similar configurations using a
# graph-structured stack.
#/
from io import StringIO
from functools import reduce
from antlr4.PredictionContext import PredictionContext, merge
from antlr4.Utils import str_list
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNConfig import ATNConfig
from antlr4.atn.SemanticContext import SemanticContext
from antlr4.error.Errors import UnsupportedOperationException, IllegalStateException
ATNSimulator = None
class ATNConfigSet(object):
__slots__ = (
'configLookup', 'fullCtx', 'readonly', 'configs', 'uniqueAlt',
'conflictingAlts', 'hasSemanticContext', 'dipsIntoOuterContext',
'cachedHashCode'
)
#
# The reason that we need this is because we don't want the hash map to use
# the standard hash code and equals. We need all configurations with the same
# {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles
# the number of objects associated with ATNConfigs. The other solution is to
# use a hash table that lets us specify the equals/hashcode operation.
def __init__(self, fullCtx:bool=True):
# All configs but hashed by (s, i, _, pi) not including context. Wiped out
# when we go readonly as this set becomes a DFA state.
self.configLookup = dict()
# Indicates that this configuration set is part of a full context
# LL prediction. It will be used to determine how to merge $. With SLL
# it's a wildcard whereas it is not for LL context merge.
self.fullCtx = fullCtx
# Indicates that the set of configurations is read-only. Do not
# allow any code to manipulate the set; DFA states will point at
# the sets and they must not change. This does not protect the other
# fields; in particular, conflictingAlts is set after
# we've made this readonly.
self.readonly = False
# Track the elements as they are added to the set; supports get(i)#/
self.configs = []
# TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation
# TODO: can we track conflicts as they are added to save scanning configs later?
self.uniqueAlt = 0
self.conflictingAlts = None
# Used in parser and lexer. In lexer, it indicates we hit a pred
# while computing a closure operation. Don't make a DFA state from this.
self.hasSemanticContext = False
self.dipsIntoOuterContext = False
self.cachedHashCode = -1
def __iter__(self):
return self.configs.__iter__()
# Adding a new config means merging contexts with existing configs for
# {@code (s, i, pi, _)}, where {@code s} is the
# {@link ATNConfig#state}, {@code i} is the {@link ATNConfig#alt}, and
# {@code pi} is the {@link ATNConfig#semanticContext}. We use
# {@code (s,i,pi)} as key.
#
# This method updates {@link #dipsIntoOuterContext} and
# {@link #hasSemanticContext} when necessary.
#/
def add(self, config:ATNConfig, mergeCache=None):
if self.readonly:
raise Exception("This set is readonly")
if config.semanticContext is not SemanticContext.NONE:
self.hasSemanticContext = True
if config.reachesIntoOuterContext > 0:
self.dipsIntoOuterContext = True
existing = self.getOrAdd(config)
if existing is config:
self.cachedHashCode = -1
self.configs.append(config) # track order here
return True
# a previous (s,i,pi,_), merge with it and save result
rootIsWildcard = not self.fullCtx
merged = merge(existing.context, config.context, rootIsWildcard, mergeCache)
# no need to check for existing.context, config.context in cache
# since only way to create new graphs is "call rule" and here.
# We cache at both places.
existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext)
# make sure to preserve the precedence filter suppression during the merge
if config.precedenceFilterSuppressed:
existing.precedenceFilterSuppressed = True
existing.context = merged # replace context; no need to alt mapping
return True
def getOrAdd(self, config:ATNConfig):
h = config.hashCodeForConfigSet()
l = self.configLookup.get(h, None)
if l is not None:
r = next((cfg for cfg in l if config.equalsForConfigSet(cfg)), None)
if r is not None:
return r
if l is None:
l = [config]
self.configLookup[h] = l
else:
l.append(config)
return config
def getStates(self):
return set(c.state for c in self.configs)
def getPredicates(self):
return list(cfg.semanticContext for cfg in self.configs if cfg.semanticContext!=SemanticContext.NONE)
def get(self, i:int):
return self.configs[i]
def optimizeConfigs(self, interpreter:ATNSimulator):
if self.readonly:
raise IllegalStateException("This set is readonly")
if len(self.configs)==0:
return
for config in self.configs:
config.context = interpreter.getCachedContext(config.context)
def addAll(self, coll:list):
for c in coll:
self.add(c)
return False
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, ATNConfigSet):
return False
same = self.configs is not None and \
self.configs==other.configs and \
self.fullCtx == other.fullCtx and \
self.uniqueAlt == other.uniqueAlt and \
self.conflictingAlts == other.conflictingAlts and \
self.hasSemanticContext == other.hasSemanticContext and \
self.dipsIntoOuterContext == other.dipsIntoOuterContext
return same
def __hash__(self):
if self.readonly:
if self.cachedHashCode == -1:
self.cachedHashCode = self.hashConfigs()
return self.cachedHashCode
return self.hashConfigs()
def hashConfigs(self):
return reduce(lambda h, cfg: hash((h, cfg)), self.configs, 0)
def __len__(self):
return len(self.configs)
def isEmpty(self):
return len(self.configs)==0
def __contains__(self, config):
if self.configLookup is None:
raise UnsupportedOperationException("This method is not implemented for readonly sets.")
h = config.hashCodeForConfigSet()
l = self.configLookup.get(h, None)
if l is not None:
for c in l:
if config.equalsForConfigSet(c):
return True
return False
def clear(self):
if self.readonly:
raise IllegalStateException("This set is readonly")
self.configs.clear()
self.cachedHashCode = -1
self.configLookup.clear()
def setReadonly(self, readonly:bool):
self.readonly = readonly
self.configLookup = None # can't mod, no need for lookup cache
def __str__(self):
with StringIO() as buf:
buf.write(str_list(self.configs))
if self.hasSemanticContext:
buf.write(",hasSemanticContext=")
buf.write(str(self.hasSemanticContext))
if self.uniqueAlt!=ATN.INVALID_ALT_NUMBER:
buf.write(",uniqueAlt=")
buf.write(str(self.uniqueAlt))
if self.conflictingAlts is not None:
buf.write(",conflictingAlts=")
buf.write(str(self.conflictingAlts))
if self.dipsIntoOuterContext:
buf.write(",dipsIntoOuterContext")
return buf.getvalue()
class OrderedATNConfigSet(ATNConfigSet):
def __init__(self):
super().__init__()
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/ATNDeserializationOptions.py 0000644 0000766 0000000 00000001762 00000000000 026466 0 ustar 00parrt wheel 0000000 0000000 # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
# need a forward declaration
ATNDeserializationOptions = None
class ATNDeserializationOptions(object):
__slots__ = ('readOnly', 'verifyATN', 'generateRuleBypassTransitions')
defaultOptions = None
def __init__(self, copyFrom:ATNDeserializationOptions = None):
self.readOnly = False
self.verifyATN = True if copyFrom is None else copyFrom.verifyATN
self.generateRuleBypassTransitions = False if copyFrom is None else copyFrom.generateRuleBypassTransitions
def __setattr__(self, key, value):
if key!="readOnly" and self.readOnly:
raise Exception("The object is read only.")
super(type(self), self).__setattr__(key,value)
ATNDeserializationOptions.defaultOptions = ATNDeserializationOptions()
ATNDeserializationOptions.defaultOptions.readOnly = True
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/ATNDeserializer.py 0000644 0000766 0000000 00000053354 00000000000 024412 0 ustar 00parrt wheel 0000000 0000000 # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
from uuid import UUID
from io import StringIO
from typing import Callable
from antlr4.Token import Token
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNType import ATNType
from antlr4.atn.ATNState import *
from antlr4.atn.Transition import *
from antlr4.atn.LexerAction import *
from antlr4.atn.ATNDeserializationOptions import ATNDeserializationOptions
# This is the earliest supported serialized UUID.
BASE_SERIALIZED_UUID = UUID("AADB8D7E-AEEF-4415-AD2B-8204D6CF042E")
# This UUID indicates the serialized ATN contains two sets of
# IntervalSets, where the second set's values are encoded as
# 32-bit integers to support the full Unicode SMP range up to U+10FFFF.
ADDED_UNICODE_SMP = UUID("59627784-3BE5-417A-B9EB-8131A7286089")
# This list contains all of the currently supported UUIDs, ordered by when
# the feature first appeared in this branch.
SUPPORTED_UUIDS = [ BASE_SERIALIZED_UUID, ADDED_UNICODE_SMP ]
SERIALIZED_VERSION = 3
# This is the current serialized UUID.
SERIALIZED_UUID = ADDED_UNICODE_SMP
class ATNDeserializer (object):
__slots__ = ('deserializationOptions', 'data', 'pos', 'uuid')
def __init__(self, options : ATNDeserializationOptions = None):
if options is None:
options = ATNDeserializationOptions.defaultOptions
self.deserializationOptions = options
# Determines if a particular serialized representation of an ATN supports
# a particular feature, identified by the {@link UUID} used for serializing
# the ATN at the time the feature was first introduced.
#
# @param feature The {@link UUID} marking the first time the feature was
# supported in the serialized ATN.
# @param actualUuid The {@link UUID} of the actual serialized ATN which is
# currently being deserialized.
# @return {@code true} if the {@code actualUuid} value represents a
# serialized ATN at or after the feature identified by {@code feature} was
# introduced; otherwise, {@code false}.
def isFeatureSupported(self, feature : UUID , actualUuid : UUID ):
idx1 = SUPPORTED_UUIDS.index(feature)
if idx1<0:
return False
idx2 = SUPPORTED_UUIDS.index(actualUuid)
return idx2 >= idx1
def deserialize(self, data : str):
self.reset(data)
self.checkVersion()
self.checkUUID()
atn = self.readATN()
self.readStates(atn)
self.readRules(atn)
self.readModes(atn)
sets = []
# First, read all sets with 16-bit Unicode code points <= U+FFFF.
self.readSets(atn, sets, self.readInt)
# Next, if the ATN was serialized with the Unicode SMP feature,
# deserialize sets with 32-bit arguments <= U+10FFFF.
if self.isFeatureSupported(ADDED_UNICODE_SMP, self.uuid):
self.readSets(atn, sets, self.readInt32)
self.readEdges(atn, sets)
self.readDecisions(atn)
self.readLexerActions(atn)
self.markPrecedenceDecisions(atn)
self.verifyATN(atn)
if self.deserializationOptions.generateRuleBypassTransitions \
and atn.grammarType == ATNType.PARSER:
self.generateRuleBypassTransitions(atn)
# re-verify after modification
self.verifyATN(atn)
return atn
def reset(self, data:str):
def adjust(c):
v = ord(c)
return v-2 if v>1 else v + 65533
temp = [ adjust(c) for c in data ]
# don't adjust the first value since that's the version number
temp[0] = ord(data[0])
self.data = temp
self.pos = 0
def checkVersion(self):
version = self.readInt()
if version != SERIALIZED_VERSION:
raise Exception("Could not deserialize ATN with version " + str(version) + " (expected " + str(SERIALIZED_VERSION) + ").")
def checkUUID(self):
uuid = self.readUUID()
if not uuid in SUPPORTED_UUIDS:
raise Exception("Could not deserialize ATN with UUID: " + str(uuid) + \
" (expected " + str(SERIALIZED_UUID) + " or a legacy UUID).", uuid, SERIALIZED_UUID)
self.uuid = uuid
def readATN(self):
idx = self.readInt()
grammarType = ATNType.fromOrdinal(idx)
maxTokenType = self.readInt()
return ATN(grammarType, maxTokenType)
def readStates(self, atn:ATN):
loopBackStateNumbers = []
endStateNumbers = []
nstates = self.readInt()
for i in range(0, nstates):
stype = self.readInt()
# ignore bad type of states
if stype==ATNState.INVALID_TYPE:
atn.addState(None)
continue
ruleIndex = self.readInt()
if ruleIndex == 0xFFFF:
ruleIndex = -1
s = self.stateFactory(stype, ruleIndex)
if stype == ATNState.LOOP_END: # special case
loopBackStateNumber = self.readInt()
loopBackStateNumbers.append((s, loopBackStateNumber))
elif isinstance(s, BlockStartState):
endStateNumber = self.readInt()
endStateNumbers.append((s, endStateNumber))
atn.addState(s)
# delay the assignment of loop back and end states until we know all the state instances have been initialized
for pair in loopBackStateNumbers:
pair[0].loopBackState = atn.states[pair[1]]
for pair in endStateNumbers:
pair[0].endState = atn.states[pair[1]]
numNonGreedyStates = self.readInt()
for i in range(0, numNonGreedyStates):
stateNumber = self.readInt()
atn.states[stateNumber].nonGreedy = True
numPrecedenceStates = self.readInt()
for i in range(0, numPrecedenceStates):
stateNumber = self.readInt()
atn.states[stateNumber].isPrecedenceRule = True
def readRules(self, atn:ATN):
nrules = self.readInt()
if atn.grammarType == ATNType.LEXER:
atn.ruleToTokenType = [0] * nrules
atn.ruleToStartState = [0] * nrules
for i in range(0, nrules):
s = self.readInt()
startState = atn.states[s]
atn.ruleToStartState[i] = startState
if atn.grammarType == ATNType.LEXER:
tokenType = self.readInt()
if tokenType == 0xFFFF:
tokenType = Token.EOF
atn.ruleToTokenType[i] = tokenType
atn.ruleToStopState = [0] * nrules
for state in atn.states:
if not isinstance(state, RuleStopState):
continue
atn.ruleToStopState[state.ruleIndex] = state
atn.ruleToStartState[state.ruleIndex].stopState = state
def readModes(self, atn:ATN):
nmodes = self.readInt()
for i in range(0, nmodes):
s = self.readInt()
atn.modeToStartState.append(atn.states[s])
def readSets(self, atn:ATN, sets:list, readUnicode:Callable[[], int]):
m = self.readInt()
for i in range(0, m):
iset = IntervalSet()
sets.append(iset)
n = self.readInt()
containsEof = self.readInt()
if containsEof!=0:
iset.addOne(-1)
for j in range(0, n):
i1 = readUnicode()
i2 = readUnicode()
iset.addRange(range(i1, i2 + 1)) # range upper limit is exclusive
def readEdges(self, atn:ATN, sets:list):
nedges = self.readInt()
for i in range(0, nedges):
src = self.readInt()
trg = self.readInt()
ttype = self.readInt()
arg1 = self.readInt()
arg2 = self.readInt()
arg3 = self.readInt()
trans = self.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
srcState = atn.states[src]
srcState.addTransition(trans)
# edges for rule stop states can be derived, so they aren't serialized
for state in atn.states:
for i in range(0, len(state.transitions)):
t = state.transitions[i]
if not isinstance(t, RuleTransition):
continue
outermostPrecedenceReturn = -1
if atn.ruleToStartState[t.target.ruleIndex].isPrecedenceRule:
if t.precedence == 0:
outermostPrecedenceReturn = t.target.ruleIndex
trans = EpsilonTransition(t.followState, outermostPrecedenceReturn)
atn.ruleToStopState[t.target.ruleIndex].addTransition(trans)
for state in atn.states:
if isinstance(state, BlockStartState):
# we need to know the end state to set its start state
if state.endState is None:
raise Exception("IllegalState")
# block end states can only be associated to a single block start state
if state.endState.startState is not None:
raise Exception("IllegalState")
state.endState.startState = state
if isinstance(state, PlusLoopbackState):
for i in range(0, len(state.transitions)):
target = state.transitions[i].target
if isinstance(target, PlusBlockStartState):
target.loopBackState = state
elif isinstance(state, StarLoopbackState):
for i in range(0, len(state.transitions)):
target = state.transitions[i].target
if isinstance(target, StarLoopEntryState):
target.loopBackState = state
def readDecisions(self, atn:ATN):
ndecisions = self.readInt()
for i in range(0, ndecisions):
s = self.readInt()
decState = atn.states[s]
atn.decisionToState.append(decState)
decState.decision = i
def readLexerActions(self, atn:ATN):
if atn.grammarType == ATNType.LEXER:
count = self.readInt()
atn.lexerActions = [ None ] * count
for i in range(0, count):
actionType = self.readInt()
data1 = self.readInt()
if data1 == 0xFFFF:
data1 = -1
data2 = self.readInt()
if data2 == 0xFFFF:
data2 = -1
lexerAction = self.lexerActionFactory(actionType, data1, data2)
atn.lexerActions[i] = lexerAction
def generateRuleBypassTransitions(self, atn:ATN):
count = len(atn.ruleToStartState)
atn.ruleToTokenType = [ 0 ] * count
for i in range(0, count):
atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
for i in range(0, count):
self.generateRuleBypassTransition(atn, i)
def generateRuleBypassTransition(self, atn:ATN, idx:int):
bypassStart = BasicBlockStartState()
bypassStart.ruleIndex = idx
atn.addState(bypassStart)
bypassStop = BlockEndState()
bypassStop.ruleIndex = idx
atn.addState(bypassStop)
bypassStart.endState = bypassStop
atn.defineDecisionState(bypassStart)
bypassStop.startState = bypassStart
excludeTransition = None
if atn.ruleToStartState[idx].isPrecedenceRule:
# wrap from the beginning of the rule to the StarLoopEntryState
endState = None
for state in atn.states:
if self.stateIsEndStateFor(state, idx):
endState = state
excludeTransition = state.loopBackState.transitions[0]
break
if excludeTransition is None:
raise Exception("Couldn't identify final state of the precedence rule prefix section.")
else:
endState = atn.ruleToStopState[idx]
# all non-excluded transitions that currently target end state need to target blockEnd instead
for state in atn.states:
for transition in state.transitions:
if transition == excludeTransition:
continue
if transition.target == endState:
transition.target = bypassStop
# all transitions leaving the rule start state need to leave blockStart instead
ruleToStartState = atn.ruleToStartState[idx]
count = len(ruleToStartState.transitions)
while count > 0:
bypassStart.addTransition(ruleToStartState.transitions[count-1])
del ruleToStartState.transitions[-1]
# link the new states
atn.ruleToStartState[idx].addTransition(EpsilonTransition(bypassStart))
bypassStop.addTransition(EpsilonTransition(endState))
matchState = BasicState()
atn.addState(matchState)
matchState.addTransition(AtomTransition(bypassStop, atn.ruleToTokenType[idx]))
bypassStart.addTransition(EpsilonTransition(matchState))
def stateIsEndStateFor(self, state:ATNState, idx:int):
if state.ruleIndex != idx:
return None
if not isinstance(state, StarLoopEntryState):
return None
maybeLoopEndState = state.transitions[len(state.transitions) - 1].target
if not isinstance(maybeLoopEndState, LoopEndState):
return None
if maybeLoopEndState.epsilonOnlyTransitions and \
isinstance(maybeLoopEndState.transitions[0].target, RuleStopState):
return state
else:
return None
#
# Analyze the {@link StarLoopEntryState} states in the specified ATN to set
# the {@link StarLoopEntryState#isPrecedenceDecision} field to the
# correct value.
#
# @param atn The ATN.
#
def markPrecedenceDecisions(self, atn:ATN):
for state in atn.states:
if not isinstance(state, StarLoopEntryState):
continue
# We analyze the ATN to determine if this ATN decision state is the
# decision for the closure block that determines whether a
# precedence rule should continue or complete.
#
if atn.ruleToStartState[state.ruleIndex].isPrecedenceRule:
maybeLoopEndState = state.transitions[len(state.transitions) - 1].target
if isinstance(maybeLoopEndState, LoopEndState):
if maybeLoopEndState.epsilonOnlyTransitions and \
isinstance(maybeLoopEndState.transitions[0].target, RuleStopState):
state.isPrecedenceDecision = True
def verifyATN(self, atn:ATN):
if not self.deserializationOptions.verifyATN:
return
# verify assumptions
for state in atn.states:
if state is None:
continue
self.checkCondition(state.epsilonOnlyTransitions or len(state.transitions) <= 1)
if isinstance(state, PlusBlockStartState):
self.checkCondition(state.loopBackState is not None)
if isinstance(state, StarLoopEntryState):
self.checkCondition(state.loopBackState is not None)
self.checkCondition(len(state.transitions) == 2)
if isinstance(state.transitions[0].target, StarBlockStartState):
self.checkCondition(isinstance(state.transitions[1].target, LoopEndState))
self.checkCondition(not state.nonGreedy)
elif isinstance(state.transitions[0].target, LoopEndState):
self.checkCondition(isinstance(state.transitions[1].target, StarBlockStartState))
self.checkCondition(state.nonGreedy)
else:
raise Exception("IllegalState")
if isinstance(state, StarLoopbackState):
self.checkCondition(len(state.transitions) == 1)
self.checkCondition(isinstance(state.transitions[0].target, StarLoopEntryState))
if isinstance(state, LoopEndState):
self.checkCondition(state.loopBackState is not None)
if isinstance(state, RuleStartState):
self.checkCondition(state.stopState is not None)
if isinstance(state, BlockStartState):
self.checkCondition(state.endState is not None)
if isinstance(state, BlockEndState):
self.checkCondition(state.startState is not None)
if isinstance(state, DecisionState):
self.checkCondition(len(state.transitions) <= 1 or state.decision >= 0)
else:
self.checkCondition(len(state.transitions) <= 1 or isinstance(state, RuleStopState))
def checkCondition(self, condition:bool, message=None):
if not condition:
if message is None:
message = "IllegalState"
raise Exception(message)
def readInt(self):
i = self.data[self.pos]
self.pos += 1
return i
def readInt32(self):
low = self.readInt()
high = self.readInt()
return low | (high << 16)
def readLong(self):
low = self.readInt32()
high = self.readInt32()
return (low & 0x00000000FFFFFFFF) | (high << 32)
def readUUID(self):
low = self.readLong()
high = self.readLong()
allBits = (low & 0xFFFFFFFFFFFFFFFF) | (high << 64)
return UUID(int=allBits)
edgeFactories = [ lambda args : None,
lambda atn, src, trg, arg1, arg2, arg3, sets, target : EpsilonTransition(target),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
RangeTransition(target, Token.EOF, arg2) if arg3 != 0 else RangeTransition(target, arg1, arg2),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
RuleTransition(atn.states[arg1], arg2, arg3, target),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
PredicateTransition(target, arg1, arg2, arg3 != 0),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
AtomTransition(target, Token.EOF) if arg3 != 0 else AtomTransition(target, arg1),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
ActionTransition(target, arg1, arg2, arg3 != 0),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
SetTransition(target, sets[arg1]),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
NotSetTransition(target, sets[arg1]),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
WildcardTransition(target),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
PrecedencePredicateTransition(target, arg1)
]
def edgeFactory(self, atn:ATN, type:int, src:int, trg:int, arg1:int, arg2:int, arg3:int, sets:list):
target = atn.states[trg]
if type > len(self.edgeFactories) or self.edgeFactories[type] is None:
raise Exception("The specified transition type: " + str(type) + " is not valid.")
else:
return self.edgeFactories[type](atn, src, trg, arg1, arg2, arg3, sets, target)
stateFactories = [ lambda : None,
lambda : BasicState(),
lambda : RuleStartState(),
lambda : BasicBlockStartState(),
lambda : PlusBlockStartState(),
lambda : StarBlockStartState(),
lambda : TokensStartState(),
lambda : RuleStopState(),
lambda : BlockEndState(),
lambda : StarLoopbackState(),
lambda : StarLoopEntryState(),
lambda : PlusLoopbackState(),
lambda : LoopEndState()
]
def stateFactory(self, type:int, ruleIndex:int):
if type> len(self.stateFactories) or self.stateFactories[type] is None:
raise Exception("The specified state type " + str(type) + " is not valid.")
else:
s = self.stateFactories[type]()
if s is not None:
s.ruleIndex = ruleIndex
return s
CHANNEL = 0 #The type of a {@link LexerChannelAction} action.
CUSTOM = 1 #The type of a {@link LexerCustomAction} action.
MODE = 2 #The type of a {@link LexerModeAction} action.
MORE = 3 #The type of a {@link LexerMoreAction} action.
POP_MODE = 4 #The type of a {@link LexerPopModeAction} action.
PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action.
SKIP = 6 #The type of a {@link LexerSkipAction} action.
TYPE = 7 #The type of a {@link LexerTypeAction} action.
actionFactories = [ lambda data1, data2: LexerChannelAction(data1),
lambda data1, data2: LexerCustomAction(data1, data2),
lambda data1, data2: LexerModeAction(data1),
lambda data1, data2: LexerMoreAction.INSTANCE,
lambda data1, data2: LexerPopModeAction.INSTANCE,
lambda data1, data2: LexerPushModeAction(data1),
lambda data1, data2: LexerSkipAction.INSTANCE,
lambda data1, data2: LexerTypeAction(data1)
]
def lexerActionFactory(self, type:int, data1:int, data2:int):
if type > len(self.actionFactories) or self.actionFactories[type] is None:
raise Exception("The specified lexer action type " + str(type) + " is not valid.")
else:
return self.actionFactories[type](data1, data2)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/ATNSimulator.py 0000644 0000766 0000000 00000004372 00000000000 023743 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
from antlr4.PredictionContext import PredictionContextCache, PredictionContext, getCachedPredictionContext
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNConfigSet import ATNConfigSet
from antlr4.dfa.DFAState import DFAState
class ATNSimulator(object):
__slots__ = ('atn', 'sharedContextCache', '__dict__')
# Must distinguish between missing edge and edge we know leads nowhere#/
ERROR = DFAState(configs=ATNConfigSet())
ERROR.stateNumber = 0x7FFFFFFF
# The context cache maps all PredictionContext objects that are ==
# to a single cached copy. This cache is shared across all contexts
# in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet
# to use only cached nodes/graphs in addDFAState(). We don't want to
# fill this during closure() since there are lots of contexts that
# pop up but are not used ever again. It also greatly slows down closure().
#
# This cache makes a huge difference in memory and a little bit in speed.
# For the Java grammar on java.*, it dropped the memory requirements
# at the end from 25M to 16M. We don't store any of the full context
# graphs in the DFA because they are limited to local context only,
# but apparently there's a lot of repetition there as well. We optimize
# the config contexts before storing the config set in the DFA states
# by literally rebuilding them with cached subgraphs only.
#
# I tried a cache for use during closure operations, that was
# whacked after each adaptivePredict(). It cost a little bit
# more time I think and doesn't save on the overall footprint
# so it's not worth the complexity.
#/
def __init__(self, atn:ATN, sharedContextCache:PredictionContextCache):
self.atn = atn
self.sharedContextCache = sharedContextCache
def getCachedContext(self, context:PredictionContext):
if self.sharedContextCache is None:
return context
visited = dict()
return getCachedPredictionContext(context, self.sharedContextCache, visited)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/ATNState.py 0000644 0000766 0000000 00000016757 00000000000 023056 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
# The following images show the relation of states and
# {@link ATNState#transitions} for various grammar constructs.
#
#
#
# - Solid edges marked with an ε indicate a required
# {@link EpsilonTransition}.
#
# - Dashed edges indicate locations where any transition derived from
# {@link Transition} might appear.
#
# - Dashed nodes are place holders for either a sequence of linked
# {@link BasicState} states or the inclusion of a block representing a nested
# construct in one of the forms below.
#
# - Nodes showing multiple outgoing alternatives with a {@code ...} support
# any number of alternatives (one or more). Nodes without the {@code ...} only
# support the exact number of alternatives shown in the diagram.
#
#
#
# Basic Blocks
#
# Rule
#
#
#
# Block of 1 or more alternatives
#
#
#
# Greedy Loops
#
# Greedy Closure: {@code (...)*}
#
#
#
# Greedy Positive Closure: {@code (...)+}
#
#
#
# Greedy Optional: {@code (...)?}
#
#
#
# Non-Greedy Loops
#
# Non-Greedy Closure: {@code (...)*?}
#
#
#
# Non-Greedy Positive Closure: {@code (...)+?}
#
#
#
# Non-Greedy Optional: {@code (...)??}
#
#
#
from antlr4.atn.Transition import Transition
INITIAL_NUM_TRANSITIONS = 4
class ATNState(object):
__slots__ = (
'atn', 'stateNumber', 'stateType', 'ruleIndex', 'epsilonOnlyTransitions',
'transitions', 'nextTokenWithinRule',
)
# constants for serialization
INVALID_TYPE = 0
BASIC = 1
RULE_START = 2
BLOCK_START = 3
PLUS_BLOCK_START = 4
STAR_BLOCK_START = 5
TOKEN_START = 6
RULE_STOP = 7
BLOCK_END = 8
STAR_LOOP_BACK = 9
STAR_LOOP_ENTRY = 10
PLUS_LOOP_BACK = 11
LOOP_END = 12
serializationNames = [
"INVALID",
"BASIC",
"RULE_START",
"BLOCK_START",
"PLUS_BLOCK_START",
"STAR_BLOCK_START",
"TOKEN_START",
"RULE_STOP",
"BLOCK_END",
"STAR_LOOP_BACK",
"STAR_LOOP_ENTRY",
"PLUS_LOOP_BACK",
"LOOP_END" ]
INVALID_STATE_NUMBER = -1
def __init__(self):
# Which ATN are we in?
self.atn = None
self.stateNumber = ATNState.INVALID_STATE_NUMBER
self.stateType = None
self.ruleIndex = 0 # at runtime, we don't have Rule objects
self.epsilonOnlyTransitions = False
# Track the transitions emanating from this ATN state.
self.transitions = []
# Used to cache lookahead during parsing, not used during construction
self.nextTokenWithinRule = None
def __hash__(self):
return self.stateNumber
def __eq__(self, other):
return isinstance(other, ATNState) and self.stateNumber==other.stateNumber
def onlyHasEpsilonTransitions(self):
return self.epsilonOnlyTransitions
def isNonGreedyExitState(self):
return False
def __str__(self):
return str(self.stateNumber)
def addTransition(self, trans:Transition, index:int=-1):
if len(self.transitions)==0:
self.epsilonOnlyTransitions = trans.isEpsilon
elif self.epsilonOnlyTransitions != trans.isEpsilon:
self.epsilonOnlyTransitions = False
# TODO System.err.format(Locale.getDefault(), "ATN state %d has both epsilon and non-epsilon transitions.\n", stateNumber);
if index==-1:
self.transitions.append(trans)
else:
self.transitions.insert(index, trans)
class BasicState(ATNState):
def __init__(self):
super().__init__()
self.stateType = self.BASIC
class DecisionState(ATNState):
__slots__ = ('decision', 'nonGreedy')
def __init__(self):
super().__init__()
self.decision = -1
self.nonGreedy = False
# The start of a regular {@code (...)} block.
class BlockStartState(DecisionState):
__slots__ = 'endState'
def __init__(self):
super().__init__()
self.endState = None
class BasicBlockStartState(BlockStartState):
def __init__(self):
super().__init__()
self.stateType = self.BLOCK_START
# Terminal node of a simple {@code (a|b|c)} block.
class BlockEndState(ATNState):
__slots__ = 'startState'
def __init__(self):
super().__init__()
self.stateType = self.BLOCK_END
self.startState = None
# The last node in the ATN for a rule, unless that rule is the start symbol.
# In that case, there is one transition to EOF. Later, we might encode
# references to all calls to this rule to compute FOLLOW sets for
# error handling.
#
class RuleStopState(ATNState):
def __init__(self):
super().__init__()
self.stateType = self.RULE_STOP
class RuleStartState(ATNState):
__slots__ = ('stopState', 'isPrecedenceRule')
def __init__(self):
super().__init__()
self.stateType = self.RULE_START
self.stopState = None
self.isPrecedenceRule = False
# Decision state for {@code A+} and {@code (A|B)+}. It has two transitions:
# one to the loop back to start of the block and one to exit.
#
class PlusLoopbackState(DecisionState):
def __init__(self):
super().__init__()
self.stateType = self.PLUS_LOOP_BACK
# Start of {@code (A|B|...)+} loop. Technically a decision state, but
# we don't use for code generation; somebody might need it, so I'm defining
# it for completeness. In reality, the {@link PlusLoopbackState} node is the
# real decision-making note for {@code A+}.
#
class PlusBlockStartState(BlockStartState):
__slots__ = 'loopBackState'
def __init__(self):
super().__init__()
self.stateType = self.PLUS_BLOCK_START
self.loopBackState = None
# The block that begins a closure loop.
class StarBlockStartState(BlockStartState):
def __init__(self):
super().__init__()
self.stateType = self.STAR_BLOCK_START
class StarLoopbackState(ATNState):
def __init__(self):
super().__init__()
self.stateType = self.STAR_LOOP_BACK
class StarLoopEntryState(DecisionState):
__slots__ = ('loopBackState', 'isPrecedenceDecision')
def __init__(self):
super().__init__()
self.stateType = self.STAR_LOOP_ENTRY
self.loopBackState = None
# Indicates whether this state can benefit from a precedence DFA during SLL decision making.
self.isPrecedenceDecision = None
# Mark the end of a * or + loop.
class LoopEndState(ATNState):
__slots__ = 'loopBackState'
def __init__(self):
super().__init__()
self.stateType = self.LOOP_END
self.loopBackState = None
# The Tokens rule start state linking to each lexer rule start state */
class TokensStartState(DecisionState):
def __init__(self):
super().__init__()
self.stateType = self.TOKEN_START
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/ATNType.py 0000644 0000766 0000000 00000000646 00000000000 022705 0 ustar 00parrt wheel 0000000 0000000 # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
from enum import IntEnum
# Represents the type of recognizer an ATN applies to.
class ATNType(IntEnum):
LEXER = 0
PARSER = 1
@classmethod
def fromOrdinal(cls, i:int):
return cls._value2member_map_[i]
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/LexerATNSimulator.py 0000644 0000766 0000000 00000061571 00000000000 024747 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# When we hit an accept state in either the DFA or the ATN, we
# have to notify the character stream to start buffering characters
# via {@link IntStream#mark} and record the current state. The current sim state
# includes the current index into the input, the current line,
# and current character position in that line. Note that the Lexer is
# tracking the starting line and characterization of the token. These
# variables track the "state" of the simulator when it hits an accept state.
#
# We track these variables separately for the DFA and ATN simulation
# because the DFA simulation often has to fail over to the ATN
# simulation. If the ATN simulation fails, we need the DFA to fall
# back to its previously accepted state, if any. If the ATN succeeds,
# then the ATN does the accept and the DFA simulator that invoked it
# can simply return the predicted token type.
#/
from antlr4.PredictionContext import PredictionContextCache, SingletonPredictionContext, PredictionContext
from antlr4.InputStream import InputStream
from antlr4.Token import Token
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNConfig import LexerATNConfig
from antlr4.atn.ATNSimulator import ATNSimulator
from antlr4.atn.ATNConfigSet import ATNConfigSet, OrderedATNConfigSet
from antlr4.atn.ATNState import RuleStopState, ATNState
from antlr4.atn.LexerActionExecutor import LexerActionExecutor
from antlr4.atn.Transition import Transition
from antlr4.dfa.DFAState import DFAState
from antlr4.error.Errors import LexerNoViableAltException, UnsupportedOperationException
class SimState(object):
__slots__ = ('index', 'line', 'column', 'dfaState')
def __init__(self):
self.reset()
def reset(self):
self.index = -1
self.line = 0
self.column = -1
self.dfaState = None
# need forward declaration
Lexer = None
LexerATNSimulator = None
class LexerATNSimulator(ATNSimulator):
__slots__ = (
'decisionToDFA', 'recog', 'startIndex', 'line', 'column', 'mode',
'DEFAULT_MODE', 'MAX_CHAR_VALUE', 'prevAccept'
)
debug = False
dfa_debug = False
MIN_DFA_EDGE = 0
MAX_DFA_EDGE = 127 # forces unicode to stay in ATN
ERROR = None
def __init__(self, recog:Lexer, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache):
super().__init__(atn, sharedContextCache)
self.decisionToDFA = decisionToDFA
self.recog = recog
# The current token's starting index into the character stream.
# Shared across DFA to ATN simulation in case the ATN fails and the
# DFA did not have a previous accept state. In this case, we use the
# ATN-generated exception object.
self.startIndex = -1
# line number 1..n within the input#/
self.line = 1
# The index of the character relative to the beginning of the line 0..n-1#/
self.column = 0
from antlr4.Lexer import Lexer
self.mode = Lexer.DEFAULT_MODE
# Cache Lexer properties to avoid further imports
self.DEFAULT_MODE = Lexer.DEFAULT_MODE
self.MAX_CHAR_VALUE = Lexer.MAX_CHAR_VALUE
# Used during DFA/ATN exec to record the most recent accept configuration info
self.prevAccept = SimState()
def copyState(self, simulator:LexerATNSimulator ):
self.column = simulator.column
self.line = simulator.line
self.mode = simulator.mode
self.startIndex = simulator.startIndex
def match(self, input:InputStream , mode:int):
self.mode = mode
mark = input.mark()
try:
self.startIndex = input.index
self.prevAccept.reset()
dfa = self.decisionToDFA[mode]
if dfa.s0 is None:
return self.matchATN(input)
else:
return self.execATN(input, dfa.s0)
finally:
input.release(mark)
def reset(self):
self.prevAccept.reset()
self.startIndex = -1
self.line = 1
self.column = 0
self.mode = self.DEFAULT_MODE
def matchATN(self, input:InputStream):
startState = self.atn.modeToStartState[self.mode]
if LexerATNSimulator.debug:
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
old_mode = self.mode
s0_closure = self.computeStartState(input, startState)
suppressEdge = s0_closure.hasSemanticContext
s0_closure.hasSemanticContext = False
next = self.addDFAState(s0_closure)
if not suppressEdge:
self.decisionToDFA[self.mode].s0 = next
predict = self.execATN(input, next)
if LexerATNSimulator.debug:
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
return predict
def execATN(self, input:InputStream, ds0:DFAState):
if LexerATNSimulator.debug:
print("start state closure=" + str(ds0.configs))
if ds0.isAcceptState:
# allow zero-length tokens
self.captureSimState(self.prevAccept, input, ds0)
t = input.LA(1)
s = ds0 # s is current/from DFA state
while True: # while more work
if LexerATNSimulator.debug:
print("execATN loop starting closure:", str(s.configs))
# As we move src->trg, src->trg, we keep track of the previous trg to
# avoid looking up the DFA state again, which is expensive.
# If the previous target was already part of the DFA, we might
# be able to avoid doing a reach operation upon t. If s!=null,
# it means that semantic predicates didn't prevent us from
# creating a DFA state. Once we know s!=null, we check to see if
# the DFA state has an edge already for t. If so, we can just reuse
# it's configuration set; there's no point in re-computing it.
# This is kind of like doing DFA simulation within the ATN
# simulation because DFA simulation is really just a way to avoid
# computing reach/closure sets. Technically, once we know that
# we have a previously added DFA state, we could jump over to
# the DFA simulator. But, that would mean popping back and forth
# a lot and making things more complicated algorithmically.
# This optimization makes a lot of sense for loops within DFA.
# A character will take us back to an existing DFA state
# that already has lots of edges out of it. e.g., .* in comments.
# print("Target for:" + str(s) + " and:" + str(t))
target = self.getExistingTargetState(s, t)
# print("Existing:" + str(target))
if target is None:
target = self.computeTargetState(input, s, t)
# print("Computed:" + str(target))
if target == self.ERROR:
break
# If this is a consumable input element, make sure to consume before
# capturing the accept state so the input index, line, and char
# position accurately reflect the state of the interpreter at the
# end of the token.
if t != Token.EOF:
self.consume(input)
if target.isAcceptState:
self.captureSimState(self.prevAccept, input, target)
if t == Token.EOF:
break
t = input.LA(1)
s = target # flip; current DFA target becomes new src/from state
return self.failOrAccept(self.prevAccept, input, s.configs, t)
# Get an existing target state for an edge in the DFA. If the target state
# for the edge has not yet been computed or is otherwise not available,
# this method returns {@code null}.
#
# @param s The current DFA state
# @param t The next input symbol
# @return The existing target DFA state for the given input symbol
# {@code t}, or {@code null} if the target state for this edge is not
# already cached
def getExistingTargetState(self, s:DFAState, t:int):
if s.edges is None or t < self.MIN_DFA_EDGE or t > self.MAX_DFA_EDGE:
return None
target = s.edges[t - self.MIN_DFA_EDGE]
if LexerATNSimulator.debug and target is not None:
print("reuse state", str(s.stateNumber), "edge to", str(target.stateNumber))
return target
# Compute a target state for an edge in the DFA, and attempt to add the
# computed state and corresponding edge to the DFA.
#
# @param input The input stream
# @param s The current DFA state
# @param t The next input symbol
#
# @return The computed target DFA state for the given input symbol
# {@code t}. If {@code t} does not lead to a valid DFA state, this method
# returns {@link #ERROR}.
def computeTargetState(self, input:InputStream, s:DFAState, t:int):
reach = OrderedATNConfigSet()
# if we don't find an existing DFA state
# Fill reach starting from closure, following t transitions
self.getReachableConfigSet(input, s.configs, reach, t)
if len(reach)==0: # we got nowhere on t from s
if not reach.hasSemanticContext:
# we got nowhere on t, don't throw out this knowledge; it'd
# cause a failover from DFA later.
self. addDFAEdge(s, t, self.ERROR)
# stop when we can't match any more char
return self.ERROR
# Add an edge from s to target DFA found/created for reach
return self.addDFAEdge(s, t, cfgs=reach)
def failOrAccept(self, prevAccept:SimState , input:InputStream, reach:ATNConfigSet, t:int):
if self.prevAccept.dfaState is not None:
lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor
self.accept(input, lexerActionExecutor, self.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
return prevAccept.dfaState.prediction
else:
# if no accept and EOF is first char, return EOF
if t==Token.EOF and input.index==self.startIndex:
return Token.EOF
raise LexerNoViableAltException(self.recog, input, self.startIndex, reach)
# Given a starting configuration set, figure out all ATN configurations
# we can reach upon input {@code t}. Parameter {@code reach} is a return
# parameter.
def getReachableConfigSet(self, input:InputStream, closure:ATNConfigSet, reach:ATNConfigSet, t:int):
# this is used to skip processing for configs which have a lower priority
# than a config that already reached an accept state for the same rule
skipAlt = ATN.INVALID_ALT_NUMBER
for cfg in closure:
currentAltReachedAcceptState = ( cfg.alt == skipAlt )
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
continue
if LexerATNSimulator.debug:
print("testing", self.getTokenName(t), "at", str(cfg))
for trans in cfg.state.transitions: # for each transition
target = self.getReachableTarget(trans, t)
if target is not None:
lexerActionExecutor = cfg.lexerActionExecutor
if lexerActionExecutor is not None:
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - self.startIndex)
treatEofAsEpsilon = (t == Token.EOF)
config = LexerATNConfig(state=target, lexerActionExecutor=lexerActionExecutor, config=cfg)
if self.closure(input, config, reach, currentAltReachedAcceptState, True, treatEofAsEpsilon):
# any remaining configs for this alt have a lower priority than
# the one that just reached an accept state.
skipAlt = cfg.alt
def accept(self, input:InputStream, lexerActionExecutor:LexerActionExecutor, startIndex:int, index:int, line:int, charPos:int):
if LexerATNSimulator.debug:
print("ACTION", lexerActionExecutor)
# seek to after last char in token
input.seek(index)
self.line = line
self.column = charPos
if lexerActionExecutor is not None and self.recog is not None:
lexerActionExecutor.execute(self.recog, input, startIndex)
def getReachableTarget(self, trans:Transition, t:int):
if trans.matches(t, 0, self.MAX_CHAR_VALUE):
return trans.target
else:
return None
def computeStartState(self, input:InputStream, p:ATNState):
initialContext = PredictionContext.EMPTY
configs = OrderedATNConfigSet()
for i in range(0,len(p.transitions)):
target = p.transitions[i].target
c = LexerATNConfig(state=target, alt=i+1, context=initialContext)
self.closure(input, c, configs, False, False, False)
return configs
# Since the alternatives within any lexer decision are ordered by
# preference, this method stops pursuing the closure as soon as an accept
# state is reached. After the first accept state is reached by depth-first
# search from {@code config}, all other (potentially reachable) states for
# this rule would have a lower priority.
#
# @return {@code true} if an accept state is reached, otherwise
# {@code false}.
def closure(self, input:InputStream, config:LexerATNConfig, configs:ATNConfigSet, currentAltReachedAcceptState:bool,
speculative:bool, treatEofAsEpsilon:bool):
if LexerATNSimulator.debug:
print("closure(" + str(config) + ")")
if isinstance( config.state, RuleStopState ):
if LexerATNSimulator.debug:
if self.recog is not None:
print("closure at", self.recog.symbolicNames[config.state.ruleIndex], "rule stop", str(config))
else:
print("closure at rule stop", str(config))
if config.context is None or config.context.hasEmptyPath():
if config.context is None or config.context.isEmpty():
configs.add(config)
return True
else:
configs.add(LexerATNConfig(state=config.state, config=config, context=PredictionContext.EMPTY))
currentAltReachedAcceptState = True
if config.context is not None and not config.context.isEmpty():
for i in range(0,len(config.context)):
if config.context.getReturnState(i) != PredictionContext.EMPTY_RETURN_STATE:
newContext = config.context.getParent(i) # "pop" return state
returnState = self.atn.states[config.context.getReturnState(i)]
c = LexerATNConfig(state=returnState, config=config, context=newContext)
currentAltReachedAcceptState = self.closure(input, c, configs,
currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
return currentAltReachedAcceptState
# optimization
if not config.state.epsilonOnlyTransitions:
if not currentAltReachedAcceptState or not config.passedThroughNonGreedyDecision:
configs.add(config)
for t in config.state.transitions:
c = self.getEpsilonTarget(input, config, t, configs, speculative, treatEofAsEpsilon)
if c is not None:
currentAltReachedAcceptState = self.closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
return currentAltReachedAcceptState
# side-effect: can alter configs.hasSemanticContext
def getEpsilonTarget(self, input:InputStream, config:LexerATNConfig, t:Transition, configs:ATNConfigSet,
speculative:bool, treatEofAsEpsilon:bool):
c = None
if t.serializationType==Transition.RULE:
newContext = SingletonPredictionContext.create(config.context, t.followState.stateNumber)
c = LexerATNConfig(state=t.target, config=config, context=newContext)
elif t.serializationType==Transition.PRECEDENCE:
raise UnsupportedOperationException("Precedence predicates are not supported in lexers.")
elif t.serializationType==Transition.PREDICATE:
# Track traversing semantic predicates. If we traverse,
# we cannot add a DFA state for this "reach" computation
# because the DFA would not test the predicate again in the
# future. Rather than creating collections of semantic predicates
# like v3 and testing them on prediction, v4 will test them on the
# fly all the time using the ATN not the DFA. This is slower but
# semantically it's not used that often. One of the key elements to
# this predicate mechanism is not adding DFA states that see
# predicates immediately afterwards in the ATN. For example,
# a : ID {p1}? | ID {p2}? ;
# should create the start state for rule 'a' (to save start state
# competition), but should not create target of ID state. The
# collection of ATN states the following ID references includes
# states reached by traversing predicates. Since this is when we
# test them, we cannot cash the DFA state target of ID.
if LexerATNSimulator.debug:
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
configs.hasSemanticContext = True
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
c = LexerATNConfig(state=t.target, config=config)
elif t.serializationType==Transition.ACTION:
if config.context is None or config.context.hasEmptyPath():
# execute actions anywhere in the start rule for a token.
#
# TODO: if the entry rule is invoked recursively, some
# actions may be executed during the recursive call. The
# problem can appear when hasEmptyPath() is true but
# isEmpty() is false. In this case, the config needs to be
# split into two contexts - one with just the empty path
# and another with everything but the empty path.
# Unfortunately, the current algorithm does not allow
# getEpsilonTarget to return two configurations, so
# additional modifications are needed before we can support
# the split operation.
lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor,
self.atn.lexerActions[t.actionIndex])
c = LexerATNConfig(state=t.target, config=config, lexerActionExecutor=lexerActionExecutor)
else:
# ignore actions in referenced rules
c = LexerATNConfig(state=t.target, config=config)
elif t.serializationType==Transition.EPSILON:
c = LexerATNConfig(state=t.target, config=config)
elif t.serializationType in [ Transition.ATOM, Transition.RANGE, Transition.SET ]:
if treatEofAsEpsilon:
if t.matches(Token.EOF, 0, self.MAX_CHAR_VALUE):
c = LexerATNConfig(state=t.target, config=config)
return c
# Evaluate a predicate specified in the lexer.
#
# If {@code speculative} is {@code true}, this method was called before
# {@link #consume} for the matched character. This method should call
# {@link #consume} before evaluating the predicate to ensure position
# sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine},
# and {@link Lexer#getcolumn}, properly reflect the current
# lexer state. This method should restore {@code input} and the simulator
# to the original state before returning (i.e. undo the actions made by the
# call to {@link #consume}.
#
# @param input The input stream.
# @param ruleIndex The rule containing the predicate.
# @param predIndex The index of the predicate within the rule.
# @param speculative {@code true} if the current index in {@code input} is
# one character before the predicate's location.
#
# @return {@code true} if the specified predicate evaluates to
# {@code true}.
#/
def evaluatePredicate(self, input:InputStream, ruleIndex:int, predIndex:int, speculative:bool):
# assume true if no recognizer was provided
if self.recog is None:
return True
if not speculative:
return self.recog.sempred(None, ruleIndex, predIndex)
savedcolumn = self.column
savedLine = self.line
index = input.index
marker = input.mark()
try:
self.consume(input)
return self.recog.sempred(None, ruleIndex, predIndex)
finally:
self.column = savedcolumn
self.line = savedLine
input.seek(index)
input.release(marker)
def captureSimState(self, settings:SimState, input:InputStream, dfaState:DFAState):
settings.index = input.index
settings.line = self.line
settings.column = self.column
settings.dfaState = dfaState
def addDFAEdge(self, from_:DFAState, tk:int, to:DFAState=None, cfgs:ATNConfigSet=None) -> DFAState:
if to is None and cfgs is not None:
# leading to this call, ATNConfigSet.hasSemanticContext is used as a
# marker indicating dynamic predicate evaluation makes this edge
# dependent on the specific input sequence, so the static edge in the
# DFA should be omitted. The target DFAState is still created since
# execATN has the ability to resynchronize with the DFA state cache
# following the predicate evaluation step.
#
# TJP notes: next time through the DFA, we see a pred again and eval.
# If that gets us to a previously created (but dangling) DFA
# state, we can continue in pure DFA mode from there.
#/
suppressEdge = cfgs.hasSemanticContext
cfgs.hasSemanticContext = False
to = self.addDFAState(cfgs)
if suppressEdge:
return to
# add the edge
if tk < self.MIN_DFA_EDGE or tk > self.MAX_DFA_EDGE:
# Only track edges within the DFA bounds
return to
if LexerATNSimulator.debug:
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
if from_.edges is None:
# make room for tokens 1..n and -1 masquerading as index 0
from_.edges = [ None ] * (self.MAX_DFA_EDGE - self.MIN_DFA_EDGE + 1)
from_.edges[tk - self.MIN_DFA_EDGE] = to # connect
return to
# Add a new DFA state if there isn't one with this set of
# configurations already. This method also detects the first
# configuration containing an ATN rule stop state. Later, when
# traversing the DFA, we will know which rule to accept.
def addDFAState(self, configs:ATNConfigSet) -> DFAState:
proposed = DFAState(configs=configs)
firstConfigWithRuleStopState = next((cfg for cfg in configs if isinstance(cfg.state, RuleStopState)), None)
if firstConfigWithRuleStopState is not None:
proposed.isAcceptState = True
proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
proposed.prediction = self.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]
dfa = self.decisionToDFA[self.mode]
existing = dfa.states.get(proposed, None)
if existing is not None:
return existing
newState = proposed
newState.stateNumber = len(dfa.states)
configs.setReadonly(True)
newState.configs = configs
dfa.states[newState] = newState
return newState
def getDFA(self, mode:int):
return self.decisionToDFA[mode]
# Get the text matched so far for the current token.
def getText(self, input:InputStream):
# index is first lookahead char, don't include.
return input.getText(self.startIndex, input.index-1)
def consume(self, input:InputStream):
curChar = input.LA(1)
if curChar==ord('\n'):
self.line += 1
self.column = 0
else:
self.column += 1
input.consume()
def getTokenName(self, t:int):
if t==-1:
return "EOF"
else:
return "'" + chr(t) + "'"
LexerATNSimulator.ERROR = DFAState(0x7FFFFFFF, ATNConfigSet())
del Lexer
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/LexerAction.py 0000644 0000766 0000000 00000023436 00000000000 023640 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
from enum import IntEnum
# need forward declaration
Lexer = None
class LexerActionType(IntEnum):
CHANNEL = 0 #The type of a {@link LexerChannelAction} action.
CUSTOM = 1 #The type of a {@link LexerCustomAction} action.
MODE = 2 #The type of a {@link LexerModeAction} action.
MORE = 3 #The type of a {@link LexerMoreAction} action.
POP_MODE = 4 #The type of a {@link LexerPopModeAction} action.
PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action.
SKIP = 6 #The type of a {@link LexerSkipAction} action.
TYPE = 7 #The type of a {@link LexerTypeAction} action.
class LexerAction(object):
__slots__ = ('actionType', 'isPositionDependent')
def __init__(self, action:LexerActionType):
self.actionType = action
self.isPositionDependent = False
def __hash__(self):
return hash(self.actionType)
def __eq__(self, other):
return self is other
#
# Implements the {@code skip} lexer action by calling {@link Lexer#skip}.
#
# The {@code skip} command does not have any parameters, so this action is
# implemented as a singleton instance exposed by {@link #INSTANCE}.
class LexerSkipAction(LexerAction):
# Provides a singleton instance of this parameterless lexer action.
INSTANCE = None
def __init__(self):
super().__init__(LexerActionType.SKIP)
def execute(self, lexer:Lexer):
lexer.skip()
def __str__(self):
return "skip"
LexerSkipAction.INSTANCE = LexerSkipAction()
# Implements the {@code type} lexer action by calling {@link Lexer#setType}
# with the assigned type.
class LexerTypeAction(LexerAction):
__slots__ = 'type'
def __init__(self, type:int):
super().__init__(LexerActionType.TYPE)
self.type = type
def execute(self, lexer:Lexer):
lexer.type = self.type
def __hash__(self):
return hash((self.actionType, self.type))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerTypeAction):
return False
else:
return self.type == other.type
def __str__(self):
return "type(" + str(self.type) + ")"
# Implements the {@code pushMode} lexer action by calling
# {@link Lexer#pushMode} with the assigned mode.
class LexerPushModeAction(LexerAction):
__slots__ = 'mode'
def __init__(self, mode:int):
super().__init__(LexerActionType.PUSH_MODE)
self.mode = mode
# This action is implemented by calling {@link Lexer#pushMode} with the
# value provided by {@link #getMode}.
def execute(self, lexer:Lexer):
lexer.pushMode(self.mode)
def __hash__(self):
return hash((self.actionType, self.mode))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerPushModeAction):
return False
else:
return self.mode == other.mode
def __str__(self):
return "pushMode(" + str(self.mode) + ")"
# Implements the {@code popMode} lexer action by calling {@link Lexer#popMode}.
#
# The {@code popMode} command does not have any parameters, so this action is
# implemented as a singleton instance exposed by {@link #INSTANCE}.
class LexerPopModeAction(LexerAction):
INSTANCE = None
def __init__(self):
super().__init__(LexerActionType.POP_MODE)
# This action is implemented by calling {@link Lexer#popMode}.
def execute(self, lexer:Lexer):
lexer.popMode()
def __str__(self):
return "popMode"
LexerPopModeAction.INSTANCE = LexerPopModeAction()
# Implements the {@code more} lexer action by calling {@link Lexer#more}.
#
# The {@code more} command does not have any parameters, so this action is
# implemented as a singleton instance exposed by {@link #INSTANCE}.
class LexerMoreAction(LexerAction):
INSTANCE = None
def __init__(self):
super().__init__(LexerActionType.MORE)
# This action is implemented by calling {@link Lexer#popMode}.
def execute(self, lexer:Lexer):
lexer.more()
def __str__(self):
return "more"
LexerMoreAction.INSTANCE = LexerMoreAction()
# Implements the {@code mode} lexer action by calling {@link Lexer#mode} with
# the assigned mode.
class LexerModeAction(LexerAction):
__slots__ = 'mode'
def __init__(self, mode:int):
super().__init__(LexerActionType.MODE)
self.mode = mode
# This action is implemented by calling {@link Lexer#mode} with the
# value provided by {@link #getMode}.
def execute(self, lexer:Lexer):
lexer.mode(self.mode)
def __hash__(self):
return hash((self.actionType, self.mode))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerModeAction):
return False
else:
return self.mode == other.mode
def __str__(self):
return "mode(" + str(self.mode) + ")"
# Executes a custom lexer action by calling {@link Recognizer#action} with the
# rule and action indexes assigned to the custom action. The implementation of
# a custom action is added to the generated code for the lexer in an override
# of {@link Recognizer#action} when the grammar is compiled.
#
# This class may represent embedded actions created with the {...}
# syntax in ANTLR 4, as well as actions created for lexer commands where the
# command argument could not be evaluated when the grammar was compiled.
class LexerCustomAction(LexerAction):
__slots__ = ('ruleIndex', 'actionIndex')
# Constructs a custom lexer action with the specified rule and action
# indexes.
#
# @param ruleIndex The rule index to use for calls to
# {@link Recognizer#action}.
# @param actionIndex The action index to use for calls to
# {@link Recognizer#action}.
#/
def __init__(self, ruleIndex:int, actionIndex:int):
super().__init__(LexerActionType.CUSTOM)
self.ruleIndex = ruleIndex
self.actionIndex = actionIndex
self.isPositionDependent = True
# Custom actions are implemented by calling {@link Lexer#action} with the
# appropriate rule and action indexes.
def execute(self, lexer:Lexer):
lexer.action(None, self.ruleIndex, self.actionIndex)
def __hash__(self):
return hash((self.actionType, self.ruleIndex, self.actionIndex))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerCustomAction):
return False
else:
return self.ruleIndex == other.ruleIndex and self.actionIndex == other.actionIndex
# Implements the {@code channel} lexer action by calling
# {@link Lexer#setChannel} with the assigned channel.
class LexerChannelAction(LexerAction):
__slots__ = 'channel'
# Constructs a new {@code channel} action with the specified channel value.
# @param channel The channel value to pass to {@link Lexer#setChannel}.
def __init__(self, channel:int):
super().__init__(LexerActionType.CHANNEL)
self.channel = channel
# This action is implemented by calling {@link Lexer#setChannel} with the
# value provided by {@link #getChannel}.
def execute(self, lexer:Lexer):
lexer._channel = self.channel
def __hash__(self):
return hash((self.actionType, self.channel))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerChannelAction):
return False
else:
return self.channel == other.channel
def __str__(self):
return "channel(" + str(self.channel) + ")"
# This implementation of {@link LexerAction} is used for tracking input offsets
# for position-dependent actions within a {@link LexerActionExecutor}.
#
# This action is not serialized as part of the ATN, and is only required for
# position-dependent lexer actions which appear at a location other than the
# end of a rule. For more information about DFA optimizations employed for
# lexer actions, see {@link LexerActionExecutor#append} and
# {@link LexerActionExecutor#fixOffsetBeforeMatch}.
class LexerIndexedCustomAction(LexerAction):
__slots__ = ('offset', 'action')
# Constructs a new indexed custom action by associating a character offset
# with a {@link LexerAction}.
#
# Note: This class is only required for lexer actions for which
# {@link LexerAction#isPositionDependent} returns {@code true}.
#
# @param offset The offset into the input {@link CharStream}, relative to
# the token start index, at which the specified lexer action should be
# executed.
# @param action The lexer action to execute at a particular offset in the
# input {@link CharStream}.
def __init__(self, offset:int, action:LexerAction):
super().__init__(action.actionType)
self.offset = offset
self.action = action
self.isPositionDependent = True
# This method calls {@link #execute} on the result of {@link #getAction}
# using the provided {@code lexer}.
def execute(self, lexer:Lexer):
# assume the input stream position was properly set by the calling code
self.action.execute(lexer)
def __hash__(self):
return hash((self.actionType, self.offset, self.action))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerIndexedCustomAction):
return False
else:
return self.offset == other.offset and self.action == other.action
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/LexerActionExecutor.py 0000644 0000766 0000000 00000014424 00000000000 025354 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# Represents an executor for a sequence of lexer actions which traversed during
# the matching operation of a lexer rule (token).
#
# The executor tracks position information for position-dependent lexer actions
# efficiently, ensuring that actions appearing only at the end of the rule do
# not cause bloating of the {@link DFA} created for the lexer.
from antlr4.InputStream import InputStream
from antlr4.atn.LexerAction import LexerAction, LexerIndexedCustomAction
# need a forward declaration
Lexer = None
LexerActionExecutor = None
class LexerActionExecutor(object):
__slots__ = ('lexerActions', 'hashCode')
def __init__(self, lexerActions:list=list()):
self.lexerActions = lexerActions
# Caches the result of {@link #hashCode} since the hash code is an element
# of the performance-critical {@link LexerATNConfig#hashCode} operation.
self.hashCode = hash("".join([str(la) for la in lexerActions]))
# Creates a {@link LexerActionExecutor} which executes the actions for
# the input {@code lexerActionExecutor} followed by a specified
# {@code lexerAction}.
#
# @param lexerActionExecutor The executor for actions already traversed by
# the lexer while matching a token within a particular
# {@link LexerATNConfig}. If this is {@code null}, the method behaves as
# though it were an empty executor.
# @param lexerAction The lexer action to execute after the actions
# specified in {@code lexerActionExecutor}.
#
# @return A {@link LexerActionExecutor} for executing the combine actions
# of {@code lexerActionExecutor} and {@code lexerAction}.
@staticmethod
def append(lexerActionExecutor:LexerActionExecutor , lexerAction:LexerAction ):
if lexerActionExecutor is None:
return LexerActionExecutor([ lexerAction ])
lexerActions = lexerActionExecutor.lexerActions + [ lexerAction ]
return LexerActionExecutor(lexerActions)
# Creates a {@link LexerActionExecutor} which encodes the current offset
# for position-dependent lexer actions.
#
# Normally, when the executor encounters lexer actions where
# {@link LexerAction#isPositionDependent} returns {@code true}, it calls
# {@link IntStream#seek} on the input {@link CharStream} to set the input
# position to the end of the current token. This behavior provides
# for efficient DFA representation of lexer actions which appear at the end
# of a lexer rule, even when the lexer rule matches a variable number of
# characters.
#
# Prior to traversing a match transition in the ATN, the current offset
# from the token start index is assigned to all position-dependent lexer
# actions which have not already been assigned a fixed offset. By storing
# the offsets relative to the token start index, the DFA representation of
# lexer actions which appear in the middle of tokens remains efficient due
# to sharing among tokens of the same length, regardless of their absolute
# position in the input stream.
#
# If the current executor already has offsets assigned to all
# position-dependent lexer actions, the method returns {@code this}.
#
# @param offset The current offset to assign to all position-dependent
# lexer actions which do not already have offsets assigned.
#
# @return A {@link LexerActionExecutor} which stores input stream offsets
# for all position-dependent lexer actions.
#/
def fixOffsetBeforeMatch(self, offset:int):
updatedLexerActions = None
for i in range(0, len(self.lexerActions)):
if self.lexerActions[i].isPositionDependent and not isinstance(self.lexerActions[i], LexerIndexedCustomAction):
if updatedLexerActions is None:
updatedLexerActions = [ la for la in self.lexerActions ]
updatedLexerActions[i] = LexerIndexedCustomAction(offset, self.lexerActions[i])
if updatedLexerActions is None:
return self
else:
return LexerActionExecutor(updatedLexerActions)
# Execute the actions encapsulated by this executor within the context of a
# particular {@link Lexer}.
#
# This method calls {@link IntStream#seek} to set the position of the
# {@code input} {@link CharStream} prior to calling
# {@link LexerAction#execute} on a position-dependent action. Before the
# method returns, the input position will be restored to the same position
# it was in when the method was invoked.
#
# @param lexer The lexer instance.
# @param input The input stream which is the source for the current token.
# When this method is called, the current {@link IntStream#index} for
# {@code input} should be the start of the following token, i.e. 1
# character past the end of the current token.
# @param startIndex The token start index. This value may be passed to
# {@link IntStream#seek} to set the {@code input} position to the beginning
# of the token.
#/
def execute(self, lexer:Lexer, input:InputStream, startIndex:int):
requiresSeek = False
stopIndex = input.index
try:
for lexerAction in self.lexerActions:
if isinstance(lexerAction, LexerIndexedCustomAction):
offset = lexerAction.offset
input.seek(startIndex + offset)
lexerAction = lexerAction.action
requiresSeek = (startIndex + offset) != stopIndex
elif lexerAction.isPositionDependent:
input.seek(stopIndex)
requiresSeek = False
lexerAction.execute(lexer)
finally:
if requiresSeek:
input.seek(stopIndex)
def __hash__(self):
return self.hashCode
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerActionExecutor):
return False
else:
return self.hashCode == other.hashCode \
and self.lexerActions == other.lexerActions
del Lexer
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/ParserATNSimulator.py 0000644 0000766 0000000 00000234755 00000000000 025132 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
#
# The embodiment of the adaptive LL(*), ALL(*), parsing strategy.
#
#
# The basic complexity of the adaptive strategy makes it harder to understand.
# We begin with ATN simulation to build paths in a DFA. Subsequent prediction
# requests go through the DFA first. If they reach a state without an edge for
# the current symbol, the algorithm fails over to the ATN simulation to
# complete the DFA path for the current input (until it finds a conflict state
# or uniquely predicting state).
#
#
# All of that is done without using the outer context because we want to create
# a DFA that is not dependent upon the rule invocation stack when we do a
# prediction. One DFA works in all contexts. We avoid using context not
# necessarily because it's slower, although it can be, but because of the DFA
# caching problem. The closure routine only considers the rule invocation stack
# created during prediction beginning in the decision rule. For example, if
# prediction occurs without invoking another rule's ATN, there are no context
# stacks in the configurations. When lack of context leads to a conflict, we
# don't know if it's an ambiguity or a weakness in the strong LL(*) parsing
# strategy (versus full LL(*)).
#
#
# When SLL yields a configuration set with conflict, we rewind the input and
# retry the ATN simulation, this time using full outer context without adding
# to the DFA. Configuration context stacks will be the full invocation stacks
# from the start rule. If we get a conflict using full context, then we can
# definitively say we have a true ambiguity for that input sequence. If we
# don't get a conflict, it implies that the decision is sensitive to the outer
# context. (It is not context-sensitive in the sense of context-sensitive
# grammars.)
#
#
# The next time we reach this DFA state with an SLL conflict, through DFA
# simulation, we will again retry the ATN simulation using full context mode.
# This is slow because we can't save the results and have to "interpret" the
# ATN each time we get that input.
#
#
# CACHING FULL CONTEXT PREDICTIONS
#
#
# We could cache results from full context to predicted alternative easily and
# that saves a lot of time but doesn't work in presence of predicates. The set
# of visible predicates from the ATN start state changes depending on the
# context, because closure can fall off the end of a rule. I tried to cache
# tuples (stack context, semantic context, predicted alt) but it was slower
# than interpreting and much more complicated. Also required a huge amount of
# memory. The goal is not to create the world's fastest parser anyway. I'd like
# to keep this algorithm simple. By launching multiple threads, we can improve
# the speed of parsing across a large number of files.
#
#
# There is no strict ordering between the amount of input used by SLL vs LL,
# which makes it really hard to build a cache for full context. Let's say that
# we have input A B C that leads to an SLL conflict with full context X. That
# implies that using X we might only use A B but we could also use A B C D to
# resolve conflict. Input A B C D could predict alternative 1 in one position
# in the input and A B C E could predict alternative 2 in another position in
# input. The conflicting SLL configurations could still be non-unique in the
# full context prediction, which would lead us to requiring more input than the
# original A B C. To make a prediction cache work, we have to track the exact
# input used during the previous prediction. That amounts to a cache that maps
# X to a specific DFA for that context.
#
#
# Something should be done for left-recursive expression predictions. They are
# likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry
# with full LL thing Sam does.
#
#
# AVOIDING FULL CONTEXT PREDICTION
#
#
# We avoid doing full context retry when the outer context is empty, we did not
# dip into the outer context by falling off the end of the decision state rule,
# or when we force SLL mode.
#
#
# As an example of the not dip into outer context case, consider as super
# constructor calls versus function calls. One grammar might look like
# this:
#
#
# ctorBody
# : '{' superCall? stat* '}'
# ;
#
#
#
# Or, you might see something like
#
#
# stat
# : superCall ';'
# | expression ';'
# | ...
# ;
#
#
#
# In both cases I believe that no closure operations will dip into the outer
# context. In the first case ctorBody in the worst case will stop at the '}'.
# In the 2nd case it should stop at the ';'. Both cases should stay within the
# entry rule and not dip into the outer context.
#
#
# PREDICATES
#
#
# Predicates are always evaluated if present in either SLL or LL both. SLL and
# LL simulation deals with predicates differently. SLL collects predicates as
# it performs closure operations like ANTLR v3 did. It delays predicate
# evaluation until it reaches and accept state. This allows us to cache the SLL
# ATN simulation whereas, if we had evaluated predicates on-the-fly during
# closure, the DFA state configuration sets would be different and we couldn't
# build up a suitable DFA.
#
#
# When building a DFA accept state during ATN simulation, we evaluate any
# predicates and return the sole semantically valid alternative. If there is
# more than 1 alternative, we report an ambiguity. If there are 0 alternatives,
# we throw an exception. Alternatives without predicates act like they have
# true predicates. The simple way to think about it is to strip away all
# alternatives with false predicates and choose the minimum alternative that
# remains.
#
#
# When we start in the DFA and reach an accept state that's predicated, we test
# those and return the minimum semantically viable alternative. If no
# alternatives are viable, we throw an exception.
#
#
# During full LL ATN simulation, closure always evaluates predicates and
# on-the-fly. This is crucial to reducing the configuration set size during
# closure. It hits a landmine when parsing with the Java grammar, for example,
# without this on-the-fly evaluation.
#
#
# SHARING DFA
#
#
# All instances of the same parser share the same decision DFAs through a
# static field. Each instance gets its own ATN simulator but they share the
# same {@link #decisionToDFA} field. They also share a
# {@link PredictionContextCache} object that makes sure that all
# {@link PredictionContext} objects are shared among the DFA states. This makes
# a big size difference.
#
#
# THREAD SAFETY
#
#
# The {@link ParserATNSimulator} locks on the {@link #decisionToDFA} field when
# it adds a new DFA object to that array. {@link #addDFAEdge}
# locks on the DFA for the current decision when setting the
# {@link DFAState#edges} field. {@link #addDFAState} locks on
# the DFA for the current decision when looking up a DFA state to see if it
# already exists. We must make sure that all requests to add DFA states that
# are equivalent result in the same shared DFA object. This is because lots of
# threads will be trying to update the DFA at once. The
# {@link #addDFAState} method also locks inside the DFA lock
# but this time on the shared context cache when it rebuilds the
# configurations' {@link PredictionContext} objects using cached
# subgraphs/nodes. No other locking occurs, even during DFA simulation. This is
# safe as long as we can guarantee that all threads referencing
# {@code s.edge[t]} get the same physical target {@link DFAState}, or
# {@code null}. Once into the DFA, the DFA simulation does not reference the
# {@link DFA#states} map. It follows the {@link DFAState#edges} field to new
# targets. The DFA simulator will either find {@link DFAState#edges} to be
# {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or
# {@code dfa.edges[t]} to be non-null. The
# {@link #addDFAEdge} method could be racing to set the field
# but in either case the DFA simulator works; if {@code null}, and requests ATN
# simulation. It could also race trying to get {@code dfa.edges[t]}, but either
# way it will work because it's not doing a test and set operation.
#
#
# Starting with SLL then failing to combined SLL/LL (Two-Stage
# Parsing)
#
#
# Sam pointed out that if SLL does not give a syntax error, then there is no
# point in doing full LL, which is slower. We only have to try LL if we get a
# syntax error. For maximum speed, Sam starts the parser set to pure SLL
# mode with the {@link BailErrorStrategy}:
#
#
# parser.{@link Parser#getInterpreter() getInterpreter()}.{@link #setPredictionMode setPredictionMode}{@code (}{@link PredictionMode#SLL}{@code )};
# parser.{@link Parser#setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}());
#
#
#
# If it does not get a syntax error, then we're done. If it does get a syntax
# error, we need to retry with the combined SLL/LL strategy.
#
#
# The reason this works is as follows. If there are no SLL conflicts, then the
# grammar is SLL (at least for that input set). If there is an SLL conflict,
# the full LL analysis must yield a set of viable alternatives which is a
# subset of the alternatives reported by SLL. If the LL set is a singleton,
# then the grammar is LL but not SLL. If the LL set is the same size as the SLL
# set, the decision is SLL. If the LL set has size > 1, then that decision
# is truly ambiguous on the current input. If the LL set is smaller, then the
# SLL conflict resolution might choose an alternative that the full LL would
# rule out as a possibility based upon better context information. If that's
# the case, then the SLL parse will definitely get an error because the full LL
# analysis says it's not viable. If SLL conflict resolution chooses an
# alternative within the LL set, them both SLL and LL would choose the same
# alternative because they both choose the minimum of multiple conflicting
# alternatives.
#
#
# Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and
# a smaller LL set called s. If s is {@code {2, 3}}, then SLL
# parsing will get an error because SLL will pursue alternative 1. If
# s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will
# choose the same alternative because alternative one is the minimum of either
# set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax
# error. If s is {@code {1}} then SLL will succeed.
#
#
# Of course, if the input is invalid, then we will get an error for sure in
# both SLL and LL parsing. Erroneous input will therefore require 2 passes over
# the input.
#
import sys
from antlr4 import DFA
from antlr4.PredictionContext import PredictionContextCache, PredictionContext, SingletonPredictionContext, \
PredictionContextFromRuleContext
from antlr4.BufferedTokenStream import TokenStream
from antlr4.Parser import Parser
from antlr4.ParserRuleContext import ParserRuleContext
from antlr4.RuleContext import RuleContext
from antlr4.Token import Token
from antlr4.Utils import str_list
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNConfig import ATNConfig
from antlr4.atn.ATNConfigSet import ATNConfigSet
from antlr4.atn.ATNSimulator import ATNSimulator
from antlr4.atn.ATNState import StarLoopEntryState, DecisionState, RuleStopState, ATNState
from antlr4.atn.PredictionMode import PredictionMode
from antlr4.atn.SemanticContext import SemanticContext, AND, andContext, orContext
from antlr4.atn.Transition import Transition, RuleTransition, ActionTransition, PrecedencePredicateTransition, \
PredicateTransition, AtomTransition, SetTransition, NotSetTransition
from antlr4.dfa.DFAState import DFAState, PredPrediction
from antlr4.error.Errors import NoViableAltException
class ParserATNSimulator(ATNSimulator):
__slots__ = (
'parser', 'decisionToDFA', 'predictionMode', '_input', '_startIndex',
'_outerContext', '_dfa', 'mergeCache'
)
debug = False
debug_list_atn_decisions = False
dfa_debug = False
retry_debug = False
def __init__(self, parser:Parser, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache):
super().__init__(atn, sharedContextCache)
self.parser = parser
self.decisionToDFA = decisionToDFA
# SLL, LL, or LL + exact ambig detection?#
self.predictionMode = PredictionMode.LL
# LAME globals to avoid parameters!!!!! I need these down deep in predTransition
self._input = None
self._startIndex = 0
self._outerContext = None
self._dfa = None
# Each prediction operation uses a cache for merge of prediction contexts.
# Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
# isn't synchronized but we're ok since two threads shouldn't reuse same
# parser/atnsim object because it can only handle one input at a time.
# This maps graphs a and b to merged result c. (a,b)→c. We can avoid
# the merge if we ever see a and b again. Note that (b,a)→c should
# also be examined during cache lookup.
#
self.mergeCache = None
def reset(self):
pass
def adaptivePredict(self, input:TokenStream, decision:int, outerContext:ParserRuleContext):
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("adaptivePredict decision " + str(decision) +
" exec LA(1)==" + self.getLookaheadName(input) +
" line " + str(input.LT(1).line) + ":" +
str(input.LT(1).column))
self._input = input
self._startIndex = input.index
self._outerContext = outerContext
dfa = self.decisionToDFA[decision]
self._dfa = dfa
m = input.mark()
index = input.index
# Now we are certain to have a specific decision's DFA
# But, do we still need an initial state?
try:
if dfa.precedenceDfa:
# the start state for a precedence DFA depends on the current
# parser precedence, and is provided by a DFA method.
s0 = dfa.getPrecedenceStartState(self.parser.getPrecedence())
else:
# the start state for a "regular" DFA is just s0
s0 = dfa.s0
if s0 is None:
if outerContext is None:
outerContext = ParserRuleContext.EMPTY
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("predictATN decision " + str(dfa.decision) +
" exec LA(1)==" + self.getLookaheadName(input) +
", outerContext=" + outerContext.toString(self.parser.literalNames, None))
fullCtx = False
s0_closure = self.computeStartState(dfa.atnStartState, ParserRuleContext.EMPTY, fullCtx)
if dfa.precedenceDfa:
# If this is a precedence DFA, we use applyPrecedenceFilter
# to convert the computed start state to a precedence start
# state. We then use DFA.setPrecedenceStartState to set the
# appropriate start state for the precedence level rather
# than simply setting DFA.s0.
#
dfa.s0.configs = s0_closure # not used for prediction but useful to know start configs anyway
s0_closure = self.applyPrecedenceFilter(s0_closure)
s0 = self.addDFAState(dfa, DFAState(configs=s0_closure))
dfa.setPrecedenceStartState(self.parser.getPrecedence(), s0)
else:
s0 = self.addDFAState(dfa, DFAState(configs=s0_closure))
dfa.s0 = s0
alt = self.execATN(dfa, s0, input, index, outerContext)
if ParserATNSimulator.debug:
print("DFA after predictATN: " + dfa.toString(self.parser.literalNames))
return alt
finally:
self._dfa = None
self.mergeCache = None # wack cache after each prediction
input.seek(index)
input.release(m)
# Performs ATN simulation to compute a predicted alternative based
# upon the remaining input, but also updates the DFA cache to avoid
# having to traverse the ATN again for the same input sequence.
# There are some key conditions we're looking for after computing a new
# set of ATN configs (proposed DFA state):
# if the set is empty, there is no viable alternative for current symbol
# does the state uniquely predict an alternative?
# does the state have a conflict that would prevent us from
# putting it on the work list?
# We also have some key operations to do:
# add an edge from previous DFA state to potentially new DFA state, D,
# upon current symbol but only if adding to work list, which means in all
# cases except no viable alternative (and possibly non-greedy decisions?)
# collecting predicates and adding semantic context to DFA accept states
# adding rule context to context-sensitive DFA accept states
# consuming an input symbol
# reporting a conflict
# reporting an ambiguity
# reporting a context sensitivity
# reporting insufficient predicates
# cover these cases:
# dead end
# single alt
# single alt + preds
# conflict
# conflict + preds
#
def execATN(self, dfa:DFA, s0:DFAState, input:TokenStream, startIndex:int, outerContext:ParserRuleContext ):
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("execATN decision " + str(dfa.decision) +
" exec LA(1)==" + self.getLookaheadName(input) +
" line " + str(input.LT(1).line) + ":" + str(input.LT(1).column))
previousD = s0
if ParserATNSimulator.debug:
print("s0 = " + str(s0))
t = input.LA(1)
while True: # while more work
D = self.getExistingTargetState(previousD, t)
if D is None:
D = self.computeTargetState(dfa, previousD, t)
if D is self.ERROR:
# if any configs in previous dipped into outer context, that
# means that input up to t actually finished entry rule
# at least for SLL decision. Full LL doesn't dip into outer
# so don't need special case.
# We will get an error no matter what so delay until after
# decision; better error message. Also, no reachable target
# ATN states in SLL implies LL will also get nowhere.
# If conflict in states that dip out, choose min since we
# will get error no matter what.
e = self.noViableAlt(input, outerContext, previousD.configs, startIndex)
input.seek(startIndex)
alt = self.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
if alt!=ATN.INVALID_ALT_NUMBER:
return alt
raise e
if D.requiresFullContext and self.predictionMode != PredictionMode.SLL:
# IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
conflictingAlts = D.configs.conflictingAlts
if D.predicates is not None:
if ParserATNSimulator.debug:
print("DFA state has preds in DFA sim LL failover")
conflictIndex = input.index
if conflictIndex != startIndex:
input.seek(startIndex)
conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True)
if len(conflictingAlts)==1:
if ParserATNSimulator.debug:
print("Full LL avoided")
return min(conflictingAlts)
if conflictIndex != startIndex:
# restore the index so reporting the fallback to full
# context occurs with the index at the correct spot
input.seek(conflictIndex)
if ParserATNSimulator.dfa_debug:
print("ctx sensitive state " + str(outerContext) +" in " + str(D))
fullCtx = True
s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx)
self.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index)
alt = self.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext)
return alt
if D.isAcceptState:
if D.predicates is None:
return D.prediction
stopIndex = input.index
input.seek(startIndex)
alts = self.evalSemanticContext(D.predicates, outerContext, True)
if len(alts)==0:
raise self.noViableAlt(input, outerContext, D.configs, startIndex)
elif len(alts)==1:
return min(alts)
else:
# report ambiguity after predicate evaluation to make sure the correct
# set of ambig alts is reported.
self.reportAmbiguity(dfa, D, startIndex, stopIndex, False, alts, D.configs)
return min(alts)
previousD = D
if t != Token.EOF:
input.consume()
t = input.LA(1)
#
# Get an existing target state for an edge in the DFA. If the target state
# for the edge has not yet been computed or is otherwise not available,
# this method returns {@code null}.
#
# @param previousD The current DFA state
# @param t The next input symbol
# @return The existing target DFA state for the given input symbol
# {@code t}, or {@code null} if the target state for this edge is not
# already cached
#
def getExistingTargetState(self, previousD:DFAState, t:int):
edges = previousD.edges
if edges is None or t + 1 < 0 or t + 1 >= len(edges):
return None
else:
return edges[t + 1]
#
# Compute a target state for an edge in the DFA, and attempt to add the
# computed state and corresponding edge to the DFA.
#
# @param dfa The DFA
# @param previousD The current DFA state
# @param t The next input symbol
#
# @return The computed target DFA state for the given input symbol
# {@code t}. If {@code t} does not lead to a valid DFA state, this method
# returns {@link #ERROR}.
#
def computeTargetState(self, dfa:DFA, previousD:DFAState, t:int):
reach = self.computeReachSet(previousD.configs, t, False)
if reach is None:
self.addDFAEdge(dfa, previousD, t, self.ERROR)
return self.ERROR
# create new target state; we'll add to DFA after it's complete
D = DFAState(configs=reach)
predictedAlt = self.getUniqueAlt(reach)
if ParserATNSimulator.debug:
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) +
", predict=" + str(predictedAlt) + ", allSubsetsConflict=" +
str(PredictionMode.allSubsetsConflict(altSubSets)) + ", conflictingAlts=" +
str(self.getConflictingAlts(reach)))
if predictedAlt!=ATN.INVALID_ALT_NUMBER:
# NO CONFLICT, UNIQUELY PREDICTED ALT
D.isAcceptState = True
D.configs.uniqueAlt = predictedAlt
D.prediction = predictedAlt
elif PredictionMode.hasSLLConflictTerminatingPrediction(self.predictionMode, reach):
# MORE THAN ONE VIABLE ALTERNATIVE
D.configs.conflictingAlts = self.getConflictingAlts(reach)
D.requiresFullContext = True
# in SLL-only mode, we will stop at this state and return the minimum alt
D.isAcceptState = True
D.prediction = min(D.configs.conflictingAlts)
if D.isAcceptState and D.configs.hasSemanticContext:
self.predicateDFAState(D, self.atn.getDecisionState(dfa.decision))
if D.predicates is not None:
D.prediction = ATN.INVALID_ALT_NUMBER
# all adds to dfa are done after we've created full D state
D = self.addDFAEdge(dfa, previousD, t, D)
return D
def predicateDFAState(self, dfaState:DFAState, decisionState:DecisionState):
# We need to test all predicates, even in DFA states that
# uniquely predict alternative.
nalts = len(decisionState.transitions)
# Update DFA so reach becomes accept state with (predicate,alt)
# pairs if preds found for conflicting alts
altsToCollectPredsFrom = self.getConflictingAltsOrUniqueAlt(dfaState.configs)
altToPred = self.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
if altToPred is not None:
dfaState.predicates = self.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
dfaState.prediction = ATN.INVALID_ALT_NUMBER # make sure we use preds
else:
# There are preds in configs but they might go away
# when OR'd together like {p}? || NONE == NONE. If neither
# alt has preds, resolve to min alt
dfaState.prediction = min(altsToCollectPredsFrom)
# comes back with reach.uniqueAlt set to a valid alt
def execATNWithFullContext(self, dfa:DFA, D:DFAState, # how far we got before failing over
s0:ATNConfigSet,
input:TokenStream,
startIndex:int,
outerContext:ParserRuleContext):
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("execATNWithFullContext", str(s0))
fullCtx = True
foundExactAmbig = False
reach = None
previous = s0
input.seek(startIndex)
t = input.LA(1)
predictedAlt = -1
while (True): # while more work
reach = self.computeReachSet(previous, t, fullCtx)
if reach is None:
# if any configs in previous dipped into outer context, that
# means that input up to t actually finished entry rule
# at least for LL decision. Full LL doesn't dip into outer
# so don't need special case.
# We will get an error no matter what so delay until after
# decision; better error message. Also, no reachable target
# ATN states in SLL implies LL will also get nowhere.
# If conflict in states that dip out, choose min since we
# will get error no matter what.
e = self.noViableAlt(input, outerContext, previous, startIndex)
input.seek(startIndex)
alt = self.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
if alt!=ATN.INVALID_ALT_NUMBER:
return alt
else:
raise e
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
if ParserATNSimulator.debug:
print("LL altSubSets=" + str(altSubSets) + ", predict=" +
str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
str(PredictionMode.resolvesToJustOneViableAlt(altSubSets)))
reach.uniqueAlt = self.getUniqueAlt(reach)
# unique prediction?
if reach.uniqueAlt!=ATN.INVALID_ALT_NUMBER:
predictedAlt = reach.uniqueAlt
break
elif self.predictionMode is not PredictionMode.LL_EXACT_AMBIG_DETECTION:
predictedAlt = PredictionMode.resolvesToJustOneViableAlt(altSubSets)
if predictedAlt != ATN.INVALID_ALT_NUMBER:
break
else:
# In exact ambiguity mode, we never try to terminate early.
# Just keeps scarfing until we know what the conflict is
if PredictionMode.allSubsetsConflict(altSubSets) and PredictionMode.allSubsetsEqual(altSubSets):
foundExactAmbig = True
predictedAlt = PredictionMode.getSingleViableAlt(altSubSets)
break
# else there are multiple non-conflicting subsets or
# we're not sure what the ambiguity is yet.
# So, keep going.
previous = reach
if t != Token.EOF:
input.consume()
t = input.LA(1)
# If the configuration set uniquely predicts an alternative,
# without conflict, then we know that it's a full LL decision
# not SLL.
if reach.uniqueAlt != ATN.INVALID_ALT_NUMBER :
self.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index)
return predictedAlt
# We do not check predicates here because we have checked them
# on-the-fly when doing full context prediction.
#
# In non-exact ambiguity detection mode, we might actually be able to
# detect an exact ambiguity, but I'm not going to spend the cycles
# needed to check. We only emit ambiguity warnings in exact ambiguity
# mode.
#
# For example, we might know that we have conflicting configurations.
# But, that does not mean that there is no way forward without a
# conflict. It's possible to have nonconflicting alt subsets as in:
# altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
# from
#
# [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
# (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
#
# In this case, (17,1,[5 $]) indicates there is some next sequence that
# would resolve this without conflict to alternative 1. Any other viable
# next sequence, however, is associated with a conflict. We stop
# looking for input because no amount of further lookahead will alter
# the fact that we should predict alternative 1. We just can't say for
# sure that there is an ambiguity without looking further.
self.reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, None, reach)
return predictedAlt
def computeReachSet(self, closure:ATNConfigSet, t:int, fullCtx:bool):
if ParserATNSimulator.debug:
print("in computeReachSet, starting closure: " + str(closure))
if self.mergeCache is None:
self.mergeCache = dict()
intermediate = ATNConfigSet(fullCtx)
# Configurations already in a rule stop state indicate reaching the end
# of the decision rule (local context) or end of the start rule (full
# context). Once reached, these configurations are never updated by a
# closure operation, so they are handled separately for the performance
# advantage of having a smaller intermediate set when calling closure.
#
# For full-context reach operations, separate handling is required to
# ensure that the alternative matching the longest overall sequence is
# chosen when multiple such configurations can match the input.
skippedStopStates = None
# First figure out where we can reach on input t
for c in closure:
if ParserATNSimulator.debug:
print("testing " + self.getTokenName(t) + " at " + str(c))
if isinstance(c.state, RuleStopState):
if fullCtx or t == Token.EOF:
if skippedStopStates is None:
skippedStopStates = list()
skippedStopStates.append(c)
continue
for trans in c.state.transitions:
target = self.getReachableTarget(trans, t)
if target is not None:
intermediate.add(ATNConfig(state=target, config=c), self.mergeCache)
# Now figure out where the reach operation can take us...
reach = None
# This block optimizes the reach operation for intermediate sets which
# trivially indicate a termination state for the overall
# adaptivePredict operation.
#
# The conditions assume that intermediate
# contains all configurations relevant to the reach set, but this
# condition is not true when one or more configurations have been
# withheld in skippedStopStates, or when the current symbol is EOF.
#
if skippedStopStates is None and t!=Token.EOF:
if len(intermediate)==1:
# Don't pursue the closure if there is just one state.
# It can only have one alternative; just add to result
# Also don't pursue the closure if there is unique alternative
# among the configurations.
reach = intermediate
elif self.getUniqueAlt(intermediate)!=ATN.INVALID_ALT_NUMBER:
# Also don't pursue the closure if there is unique alternative
# among the configurations.
reach = intermediate
# If the reach set could not be trivially determined, perform a closure
# operation on the intermediate set to compute its initial value.
#
if reach is None:
reach = ATNConfigSet(fullCtx)
closureBusy = set()
treatEofAsEpsilon = t == Token.EOF
for c in intermediate:
self.closure(c, reach, closureBusy, False, fullCtx, treatEofAsEpsilon)
if t == Token.EOF:
# After consuming EOF no additional input is possible, so we are
# only interested in configurations which reached the end of the
# decision rule (local context) or end of the start rule (full
# context). Update reach to contain only these configurations. This
# handles both explicit EOF transitions in the grammar and implicit
# EOF transitions following the end of the decision or start rule.
#
# When reach==intermediate, no closure operation was performed. In
# this case, removeAllConfigsNotInRuleStopState needs to check for
# reachable rule stop states as well as configurations already in
# a rule stop state.
#
# This is handled before the configurations in skippedStopStates,
# because any configurations potentially added from that list are
# already guaranteed to meet this condition whether or not it's
# required.
#
reach = self.removeAllConfigsNotInRuleStopState(reach, reach is intermediate)
# If skippedStopStates is not null, then it contains at least one
# configuration. For full-context reach operations, these
# configurations reached the end of the start rule, in which case we
# only add them back to reach if no configuration during the current
# closure operation reached such a state. This ensures adaptivePredict
# chooses an alternative matching the longest overall sequence when
# multiple alternatives are viable.
#
if skippedStopStates is not None and ( (not fullCtx) or (not PredictionMode.hasConfigInRuleStopState(reach))):
for c in skippedStopStates:
reach.add(c, self.mergeCache)
if len(reach)==0:
return None
else:
return reach
#
# Return a configuration set containing only the configurations from
# {@code configs} which are in a {@link RuleStopState}. If all
# configurations in {@code configs} are already in a rule stop state, this
# method simply returns {@code configs}.
#
# When {@code lookToEndOfRule} is true, this method uses
# {@link ATN#nextTokens} for each configuration in {@code configs} which is
# not already in a rule stop state to see if a rule stop state is reachable
# from the configuration via epsilon-only transitions.
#
# @param configs the configuration set to update
# @param lookToEndOfRule when true, this method checks for rule stop states
# reachable by epsilon-only transitions from each configuration in
# {@code configs}.
#
# @return {@code configs} if all configurations in {@code configs} are in a
# rule stop state, otherwise return a new configuration set containing only
# the configurations from {@code configs} which are in a rule stop state
#
def removeAllConfigsNotInRuleStopState(self, configs:ATNConfigSet, lookToEndOfRule:bool):
if PredictionMode.allConfigsInRuleStopStates(configs):
return configs
result = ATNConfigSet(configs.fullCtx)
for config in configs:
if isinstance(config.state, RuleStopState):
result.add(config, self.mergeCache)
continue
if lookToEndOfRule and config.state.epsilonOnlyTransitions:
nextTokens = self.atn.nextTokens(config.state)
if Token.EPSILON in nextTokens:
endOfRuleState = self.atn.ruleToStopState[config.state.ruleIndex]
result.add(ATNConfig(state=endOfRuleState, config=config), self.mergeCache)
return result
def computeStartState(self, p:ATNState, ctx:RuleContext, fullCtx:bool):
# always at least the implicit call to start rule
initialContext = PredictionContextFromRuleContext(self.atn, ctx)
configs = ATNConfigSet(fullCtx)
for i in range(0, len(p.transitions)):
target = p.transitions[i].target
c = ATNConfig(target, i+1, initialContext)
closureBusy = set()
self.closure(c, configs, closureBusy, True, fullCtx, False)
return configs
#
# This method transforms the start state computed by
# {@link #computeStartState} to the special start state used by a
# precedence DFA for a particular precedence value. The transformation
# process applies the following changes to the start state's configuration
# set.
#
#
# - Evaluate the precedence predicates for each configuration using
# {@link SemanticContext#evalPrecedence}.
# - Remove all configurations which predict an alternative greater than
# 1, for which another configuration that predicts alternative 1 is in the
# same ATN state with the same prediction context. This transformation is
# valid for the following reasons:
#
# - The closure block cannot contain any epsilon transitions which bypass
# the body of the closure, so all states reachable via alternative 1 are
# part of the precedence alternatives of the transformed left-recursive
# rule.
# - The "primary" portion of a left recursive rule cannot contain an
# epsilon transition, so the only way an alternative other than 1 can exist
# in a state that is also reachable via alternative 1 is by nesting calls
# to the left-recursive rule, with the outer calls not being at the
# preferred precedence level.
#
#
#
#
#
# The prediction context must be considered by this filter to address
# situations like the following.
#
#
#
# grammar TA;
# prog: statement* EOF;
# statement: letterA | statement letterA 'b' ;
# letterA: 'a';
#
#
#
# If the above grammar, the ATN state immediately before the token
# reference {@code 'a'} in {@code letterA} is reachable from the left edge
# of both the primary and closure blocks of the left-recursive rule
# {@code statement}. The prediction context associated with each of these
# configurations distinguishes between them, and prevents the alternative
# which stepped out to {@code prog} (and then back in to {@code statement}
# from being eliminated by the filter.
#
#
# @param configs The configuration set computed by
# {@link #computeStartState} as the start state for the DFA.
# @return The transformed configuration set representing the start state
# for a precedence DFA at a particular precedence level (determined by
# calling {@link Parser#getPrecedence}).
#
def applyPrecedenceFilter(self, configs:ATNConfigSet):
statesFromAlt1 = dict()
configSet = ATNConfigSet(configs.fullCtx)
for config in configs:
# handle alt 1 first
if config.alt != 1:
continue
updatedContext = config.semanticContext.evalPrecedence(self.parser, self._outerContext)
if updatedContext is None:
# the configuration was eliminated
continue
statesFromAlt1[config.state.stateNumber] = config.context
if updatedContext is not config.semanticContext:
configSet.add(ATNConfig(config=config, semantic=updatedContext), self.mergeCache)
else:
configSet.add(config, self.mergeCache)
for config in configs:
if config.alt == 1:
# already handled
continue
# In the future, this elimination step could be updated to also
# filter the prediction context for alternatives predicting alt>1
# (basically a graph subtraction algorithm).
#
if not config.precedenceFilterSuppressed:
context = statesFromAlt1.get(config.state.stateNumber, None)
if context==config.context:
# eliminated
continue
configSet.add(config, self.mergeCache)
return configSet
def getReachableTarget(self, trans:Transition, ttype:int):
if trans.matches(ttype, 0, self.atn.maxTokenType):
return trans.target
else:
return None
def getPredsForAmbigAlts(self, ambigAlts:set, configs:ATNConfigSet, nalts:int):
# REACH=[1|1|[]|0:0, 1|2|[]|0:1]
# altToPred starts as an array of all null contexts. The entry at index i
# corresponds to alternative i. altToPred[i] may have one of three values:
# 1. null: no ATNConfig c is found such that c.alt==i
# 2. SemanticContext.NONE: At least one ATNConfig c exists such that
# c.alt==i and c.semanticContext==SemanticContext.NONE. In other words,
# alt i has at least one unpredicated config.
# 3. Non-NONE Semantic Context: There exists at least one, and for all
# ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE.
#
# From this, it is clear that NONE||anything==NONE.
#
altToPred = [None] * (nalts + 1)
for c in configs:
if c.alt in ambigAlts:
altToPred[c.alt] = orContext(altToPred[c.alt], c.semanticContext)
nPredAlts = 0
for i in range(1, nalts+1):
if altToPred[i] is None:
altToPred[i] = SemanticContext.NONE
elif altToPred[i] is not SemanticContext.NONE:
nPredAlts += 1
# nonambig alts are null in altToPred
if nPredAlts==0:
altToPred = None
if ParserATNSimulator.debug:
print("getPredsForAmbigAlts result " + str_list(altToPred))
return altToPred
def getPredicatePredictions(self, ambigAlts:set, altToPred:list):
pairs = []
containsPredicate = False
for i in range(1, len(altToPred)):
pred = altToPred[i]
# unpredicated is indicated by SemanticContext.NONE
if ambigAlts is not None and i in ambigAlts:
pairs.append(PredPrediction(pred, i))
if pred is not SemanticContext.NONE:
containsPredicate = True
if not containsPredicate:
return None
return pairs
#
# This method is used to improve the localization of error messages by
# choosing an alternative rather than throwing a
# {@link NoViableAltException} in particular prediction scenarios where the
# {@link #ERROR} state was reached during ATN simulation.
#
#
# The default implementation of this method uses the following
# algorithm to identify an ATN configuration which successfully parsed the
# decision entry rule. Choosing such an alternative ensures that the
# {@link ParserRuleContext} returned by the calling rule will be complete
# and valid, and the syntax error will be reported later at a more
# localized location.
#
#
# - If a syntactically valid path or paths reach the end of the decision rule and
# they are semantically valid if predicated, return the min associated alt.
# - Else, if a semantically invalid but syntactically valid path exist
# or paths exist, return the minimum associated alt.
#
# - Otherwise, return {@link ATN#INVALID_ALT_NUMBER}.
#
#
#
# In some scenarios, the algorithm described above could predict an
# alternative which will result in a {@link FailedPredicateException} in
# the parser. Specifically, this could occur if the only configuration
# capable of successfully parsing to the end of the decision rule is
# blocked by a semantic predicate. By choosing this alternative within
# {@link #adaptivePredict} instead of throwing a
# {@link NoViableAltException}, the resulting
# {@link FailedPredicateException} in the parser will identify the specific
# predicate which is preventing the parser from successfully parsing the
# decision rule, which helps developers identify and correct logic errors
# in semantic predicates.
#
#
# @param configs The ATN configurations which were valid immediately before
# the {@link #ERROR} state was reached
# @param outerContext The is the \gamma_0 initial parser context from the paper
# or the parser stack at the instant before prediction commences.
#
# @return The value to return from {@link #adaptivePredict}, or
# {@link ATN#INVALID_ALT_NUMBER} if a suitable alternative was not
# identified and {@link #adaptivePredict} should report an error instead.
#
def getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(self, configs:ATNConfigSet, outerContext:ParserRuleContext):
semValidConfigs, semInvalidConfigs = self.splitAccordingToSemanticValidity(configs, outerContext)
alt = self.getAltThatFinishedDecisionEntryRule(semValidConfigs)
if alt!=ATN.INVALID_ALT_NUMBER: # semantically/syntactically viable path exists
return alt
# Is there a syntactically valid path with a failed pred?
if len(semInvalidConfigs)>0:
alt = self.getAltThatFinishedDecisionEntryRule(semInvalidConfigs)
if alt!=ATN.INVALID_ALT_NUMBER: # syntactically viable path exists
return alt
return ATN.INVALID_ALT_NUMBER
def getAltThatFinishedDecisionEntryRule(self, configs:ATNConfigSet):
alts = set()
for c in configs:
if c.reachesIntoOuterContext>0 or (isinstance(c.state, RuleStopState) and c.context.hasEmptyPath() ):
alts.add(c.alt)
if len(alts)==0:
return ATN.INVALID_ALT_NUMBER
else:
return min(alts)
# Walk the list of configurations and split them according to
# those that have preds evaluating to true/false. If no pred, assume
# true pred and include in succeeded set. Returns Pair of sets.
#
# Create a new set so as not to alter the incoming parameter.
#
# Assumption: the input stream has been restored to the starting point
# prediction, which is where predicates need to evaluate.
#
def splitAccordingToSemanticValidity(self, configs:ATNConfigSet, outerContext:ParserRuleContext):
succeeded = ATNConfigSet(configs.fullCtx)
failed = ATNConfigSet(configs.fullCtx)
for c in configs:
if c.semanticContext is not SemanticContext.NONE:
predicateEvaluationResult = c.semanticContext.eval(self.parser, outerContext)
if predicateEvaluationResult:
succeeded.add(c)
else:
failed.add(c)
else:
succeeded.add(c)
return (succeeded,failed)
# Look through a list of predicate/alt pairs, returning alts for the
# pairs that win. A {@code NONE} predicate indicates an alt containing an
# unpredicated config which behaves as "always true." If !complete
# then we stop at the first predicate that evaluates to true. This
# includes pairs with null predicates.
#
def evalSemanticContext(self, predPredictions:list, outerContext:ParserRuleContext, complete:bool):
predictions = set()
for pair in predPredictions:
if pair.pred is SemanticContext.NONE:
predictions.add(pair.alt)
if not complete:
break
continue
predicateEvaluationResult = pair.pred.eval(self.parser, outerContext)
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult))
if predicateEvaluationResult:
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
print("PREDICT " + str(pair.alt))
predictions.add(pair.alt)
if not complete:
break
return predictions
# TODO: If we are doing predicates, there is no point in pursuing
# closure operations if we reach a DFA state that uniquely predicts
# alternative. We will not be caching that DFA state and it is a
# waste to pursue the closure. Might have to advance when we do
# ambig detection thought :(
#
def closure(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, treatEofAsEpsilon:bool):
initialDepth = 0
self.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
fullCtx, initialDepth, treatEofAsEpsilon)
def closureCheckingStopState(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool):
if ParserATNSimulator.debug:
print("closure(" + str(config) + ")")
if isinstance(config.state, RuleStopState):
# We hit rule end. If we have context info, use it
# run thru all possible stack tops in ctx
if not config.context.isEmpty():
for i in range(0, len(config.context)):
state = config.context.getReturnState(i)
if state is PredictionContext.EMPTY_RETURN_STATE:
if fullCtx:
configs.add(ATNConfig(state=config.state, context=PredictionContext.EMPTY, config=config), self.mergeCache)
continue
else:
# we have no context info, just chase follow links (if greedy)
if ParserATNSimulator.debug:
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
self.closure_(config, configs, closureBusy, collectPredicates,
fullCtx, depth, treatEofAsEpsilon)
continue
returnState = self.atn.states[state]
newContext = config.context.getParent(i) # "pop" return state
c = ATNConfig(state=returnState, alt=config.alt, context=newContext, semantic=config.semanticContext)
# While we have context to pop back from, we may have
# gotten that context AFTER having falling off a rule.
# Make sure we track that we are now out of context.
c.reachesIntoOuterContext = config.reachesIntoOuterContext
self.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth - 1, treatEofAsEpsilon)
return
elif fullCtx:
# reached end of start rule
configs.add(config, self.mergeCache)
return
else:
# else if we have no context info, just chase follow links (if greedy)
if ParserATNSimulator.debug:
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
# Do the actual work of walking epsilon edges#
def closure_(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool):
p = config.state
# optimization
if not p.epsilonOnlyTransitions:
configs.add(config, self.mergeCache)
# make sure to not return here, because EOF transitions can act as
# both epsilon transitions and non-epsilon transitions.
first = True
for t in p.transitions:
if first:
first = False
if self.canDropLoopEntryEdgeInLeftRecursiveRule(config):
continue
continueCollecting = collectPredicates and not isinstance(t, ActionTransition)
c = self.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEofAsEpsilon)
if c is not None:
newDepth = depth
if isinstance( config.state, RuleStopState):
# target fell off end of rule; mark resulting c as having dipped into outer context
# We can't get here if incoming config was rule stop and we had context
# track how far we dip into outer context. Might
# come in handy and we avoid evaluating context dependent
# preds if this is > 0.
if self._dfa is not None and self._dfa.precedenceDfa:
if t.outermostPrecedenceReturn == self._dfa.atnStartState.ruleIndex:
c.precedenceFilterSuppressed = True
c.reachesIntoOuterContext += 1
if c in closureBusy:
# avoid infinite recursion for right-recursive rules
continue
closureBusy.add(c)
configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method
newDepth -= 1
if ParserATNSimulator.debug:
print("dips into outer ctx: " + str(c))
else:
if not t.isEpsilon:
if c in closureBusy:
# avoid infinite recursion for EOF* and EOF+
continue
closureBusy.add(c)
if isinstance(t, RuleTransition):
# latch when newDepth goes negative - once we step out of the entry context we can't return
if newDepth >= 0:
newDepth += 1
self.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon)
# Implements first-edge (loop entry) elimination as an optimization
# during closure operations. See antlr/antlr4#1398.
#
# The optimization is to avoid adding the loop entry config when
# the exit path can only lead back to the same
# StarLoopEntryState after popping context at the rule end state
# (traversing only epsilon edges, so we're still in closure, in
# this same rule).
#
# We need to detect any state that can reach loop entry on
# epsilon w/o exiting rule. We don't have to look at FOLLOW
# links, just ensure that all stack tops for config refer to key
# states in LR rule.
#
# To verify we are in the right situation we must first check
# closure is at a StarLoopEntryState generated during LR removal.
# Then we check that each stack top of context is a return state
# from one of these cases:
#
# 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state
# 2. expr op expr. The return state is the block end of internal block of (...)*
# 3. 'between' expr 'and' expr. The return state of 2nd expr reference.
# That state points at block end of internal block of (...)*.
# 4. expr '?' expr ':' expr. The return state points at block end,
# which points at loop entry state.
#
# If any is true for each stack top, then closure does not add a
# config to the current config set for edge[0], the loop entry branch.
#
# Conditions fail if any context for the current config is:
#
# a. empty (we'd fall out of expr to do a global FOLLOW which could
# even be to some weird spot in expr) or,
# b. lies outside of expr or,
# c. lies within expr but at a state not the BlockEndState
# generated during LR removal
#
# Do we need to evaluate predicates ever in closure for this case?
#
# No. Predicates, including precedence predicates, are only
# evaluated when computing a DFA start state. I.e., only before
# the lookahead (but not parser) consumes a token.
#
# There are no epsilon edges allowed in LR rule alt blocks or in
# the "primary" part (ID here). If closure is in
# StarLoopEntryState any lookahead operation will have consumed a
# token as there are no epsilon-paths that lead to
# StarLoopEntryState. We do not have to evaluate predicates
# therefore if we are in the generated StarLoopEntryState of a LR
# rule. Note that when making a prediction starting at that
# decision point, decision d=2, compute-start-state performs
# closure starting at edges[0], edges[1] emanating from
# StarLoopEntryState. That means it is not performing closure on
# StarLoopEntryState during compute-start-state.
#
# How do we know this always gives same prediction answer?
#
# Without predicates, loop entry and exit paths are ambiguous
# upon remaining input +b (in, say, a+b). Either paths lead to
# valid parses. Closure can lead to consuming + immediately or by
# falling out of this call to expr back into expr and loop back
# again to StarLoopEntryState to match +b. In this special case,
# we choose the more efficient path, which is to take the bypass
# path.
#
# The lookahead language has not changed because closure chooses
# one path over the other. Both paths lead to consuming the same
# remaining input during a lookahead operation. If the next token
# is an operator, lookahead will enter the choice block with
# operators. If it is not, lookahead will exit expr. Same as if
# closure had chosen to enter the choice block immediately.
#
# Closure is examining one config (some loopentrystate, some alt,
# context) which means it is considering exactly one alt. Closure
# always copies the same alt to any derived configs.
#
# How do we know this optimization doesn't mess up precedence in
# our parse trees?
#
# Looking through expr from left edge of stat only has to confirm
# that an input, say, a+b+c; begins with any valid interpretation
# of an expression. The precedence actually doesn't matter when
# making a decision in stat seeing through expr. It is only when
# parsing rule expr that we must use the precedence to get the
# right interpretation and, hence, parse tree.
#
# @since 4.6
#
def canDropLoopEntryEdgeInLeftRecursiveRule(self, config):
# return False
p = config.state
# First check to see if we are in StarLoopEntryState generated during
# left-recursion elimination. For efficiency, also check if
# the context has an empty stack case. If so, it would mean
# global FOLLOW so we can't perform optimization
# Are we the special loop entry/exit state? or SLL wildcard
if p.stateType != ATNState.STAR_LOOP_ENTRY \
or not p.isPrecedenceDecision \
or config.context.isEmpty() \
or config.context.hasEmptyPath():
return False
# Require all return states to return back to the same rule
# that p is in.
numCtxs = len(config.context)
for i in range(0, numCtxs): # for each stack context
returnState = self.atn.states[config.context.getReturnState(i)]
if returnState.ruleIndex != p.ruleIndex:
return False
decisionStartState = p.transitions[0].target
blockEndStateNum = decisionStartState.endState.stateNumber
blockEndState = self.atn.states[blockEndStateNum]
# Verify that the top of each stack context leads to loop entry/exit
# state through epsilon edges and w/o leaving rule.
for i in range(0, numCtxs): # for each stack context
returnStateNumber = config.context.getReturnState(i)
returnState = self.atn.states[returnStateNumber]
# all states must have single outgoing epsilon edge
if len(returnState.transitions) != 1 or not returnState.transitions[0].isEpsilon:
return False
# Look for prefix op case like 'not expr', (' type ')' expr
returnStateTarget = returnState.transitions[0].target
if returnState.stateType == ATNState.BLOCK_END and returnStateTarget is p:
continue
# Look for 'expr op expr' or case where expr's return state is block end
# of (...)* internal block; the block end points to loop back
# which points to p but we don't need to check that
if returnState is blockEndState:
continue
# Look for ternary expr ? expr : expr. The return state points at block end,
# which points at loop entry state
if returnStateTarget is blockEndState:
continue
# Look for complex prefix 'between expr and expr' case where 2nd expr's
# return state points at block end state of (...)* internal block
if returnStateTarget.stateType == ATNState.BLOCK_END \
and len(returnStateTarget.transitions) == 1 \
and returnStateTarget.transitions[0].isEpsilon \
and returnStateTarget.transitions[0].target is p:
continue
# anything else ain't conforming
return False
return True
def getRuleName(self, index:int):
if self.parser is not None and index>=0:
return self.parser.ruleNames[index]
else:
return ""
epsilonTargetMethods = dict()
epsilonTargetMethods[Transition.RULE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
sim.ruleTransition(config, t)
epsilonTargetMethods[Transition.PRECEDENCE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
sim.precedenceTransition(config, t, collectPredicates, inContext, fullCtx)
epsilonTargetMethods[Transition.PREDICATE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
sim.predTransition(config, t, collectPredicates, inContext, fullCtx)
epsilonTargetMethods[Transition.ACTION] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
sim.actionTransition(config, t)
epsilonTargetMethods[Transition.EPSILON] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
ATNConfig(state=t.target, config=config)
epsilonTargetMethods[Transition.ATOM] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None
epsilonTargetMethods[Transition.RANGE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None
epsilonTargetMethods[Transition.SET] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None
def getEpsilonTarget(self, config:ATNConfig, t:Transition, collectPredicates:bool, inContext:bool, fullCtx:bool, treatEofAsEpsilon:bool):
m = self.epsilonTargetMethods.get(t.serializationType, None)
if m is None:
return None
else:
return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon)
def actionTransition(self, config:ATNConfig, t:ActionTransition):
if ParserATNSimulator.debug:
print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex))
return ATNConfig(state=t.target, config=config)
def precedenceTransition(self, config:ATNConfig, pt:PrecedencePredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
if ParserATNSimulator.debug:
print("PRED (collectPredicates=" + str(collectPredicates) + ") " +
str(pt.precedence) + ">=_p, ctx dependent=true")
if self.parser is not None:
print("context surrounding pred is " + str(self.parser.getRuleInvocationStack()))
c = None
if collectPredicates and inContext:
if fullCtx:
# In full context mode, we can evaluate predicates on-the-fly
# during closure, which dramatically reduces the size of
# the config sets. It also obviates the need to test predicates
# later during conflict resolution.
currentPosition = self._input.index
self._input.seek(self._startIndex)
predSucceeds = pt.getPredicate().eval(self.parser, self._outerContext)
self._input.seek(currentPosition)
if predSucceeds:
c = ATNConfig(state=pt.target, config=config) # no pred context
else:
newSemCtx = andContext(config.semanticContext, pt.getPredicate())
c = ATNConfig(state=pt.target, semantic=newSemCtx, config=config)
else:
c = ATNConfig(state=pt.target, config=config)
if ParserATNSimulator.debug:
print("config from pred transition=" + str(c))
return c
def predTransition(self, config:ATNConfig, pt:PredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
if ParserATNSimulator.debug:
print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) +
":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent))
if self.parser is not None:
print("context surrounding pred is " + str(self.parser.getRuleInvocationStack()))
c = None
if collectPredicates and (not pt.isCtxDependent or (pt.isCtxDependent and inContext)):
if fullCtx:
# In full context mode, we can evaluate predicates on-the-fly
# during closure, which dramatically reduces the size of
# the config sets. It also obviates the need to test predicates
# later during conflict resolution.
currentPosition = self._input.index
self._input.seek(self._startIndex)
predSucceeds = pt.getPredicate().eval(self.parser, self._outerContext)
self._input.seek(currentPosition)
if predSucceeds:
c = ATNConfig(state=pt.target, config=config) # no pred context
else:
newSemCtx = andContext(config.semanticContext, pt.getPredicate())
c = ATNConfig(state=pt.target, semantic=newSemCtx, config=config)
else:
c = ATNConfig(state=pt.target, config=config)
if ParserATNSimulator.debug:
print("config from pred transition=" + str(c))
return c
def ruleTransition(self, config:ATNConfig, t:RuleTransition):
if ParserATNSimulator.debug:
print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context))
returnState = t.followState
newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber)
return ATNConfig(state=t.target, context=newContext, config=config )
def getConflictingAlts(self, configs:ATNConfigSet):
altsets = PredictionMode.getConflictingAltSubsets(configs)
return PredictionMode.getAlts(altsets)
# Sam pointed out a problem with the previous definition, v3, of
# ambiguous states. If we have another state associated with conflicting
# alternatives, we should keep going. For example, the following grammar
#
# s : (ID | ID ID?) ';' ;
#
# When the ATN simulation reaches the state before ';', it has a DFA
# state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
# 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node
# because alternative to has another way to continue, via [6|2|[]].
# The key is that we have a single state that has config's only associated
# with a single alternative, 2, and crucially the state transitions
# among the configurations are all non-epsilon transitions. That means
# we don't consider any conflicts that include alternative 2. So, we
# ignore the conflict between alts 1 and 2. We ignore a set of
# conflicting alts when there is an intersection with an alternative
# associated with a single alt state in the state→config-list map.
#
# It's also the case that we might have two conflicting configurations but
# also a 3rd nonconflicting configuration for a different alternative:
# [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
#
# a : A | A | A B ;
#
# After matching input A, we reach the stop state for rule A, state 1.
# State 8 is the state right before B. Clearly alternatives 1 and 2
# conflict and no amount of further lookahead will separate the two.
# However, alternative 3 will be able to continue and so we do not
# stop working on this state. In the previous example, we're concerned
# with states associated with the conflicting alternatives. Here alt
# 3 is not associated with the conflicting configs, but since we can continue
# looking for input reasonably, I don't declare the state done. We
# ignore a set of conflicting alts when we have an alternative
# that we still need to pursue.
#
def getConflictingAltsOrUniqueAlt(self, configs:ATNConfigSet):
conflictingAlts = None
if configs.uniqueAlt!= ATN.INVALID_ALT_NUMBER:
conflictingAlts = set()
conflictingAlts.add(configs.uniqueAlt)
else:
conflictingAlts = configs.conflictingAlts
return conflictingAlts
def getTokenName(self, t:int):
if t==Token.EOF:
return "EOF"
if self.parser is not None and \
self.parser.literalNames is not None and \
t < len(self.parser.literalNames):
return self.parser.literalNames[t] + "<" + str(t) + ">"
else:
return str(t)
def getLookaheadName(self, input:TokenStream):
return self.getTokenName(input.LA(1))
# Used for debugging in adaptivePredict around execATN but I cut
# it out for clarity now that alg. works well. We can leave this
# "dead" code for a bit.
#
def dumpDeadEndConfigs(self, nvae:NoViableAltException):
print("dead end configs: ")
for c in nvae.getDeadEndConfigs():
trans = "no edges"
if len(c.state.transitions)>0:
t = c.state.transitions[0]
if isinstance(t, AtomTransition):
trans = "Atom "+ self.getTokenName(t.label)
elif isinstance(t, SetTransition):
neg = isinstance(t, NotSetTransition)
trans = ("~" if neg else "")+"Set "+ str(t.set)
print(c.toString(self.parser, True) + ":" + trans, file=sys.stderr)
def noViableAlt(self, input:TokenStream, outerContext:ParserRuleContext, configs:ATNConfigSet, startIndex:int):
return NoViableAltException(self.parser, input, input.get(startIndex), input.LT(1), configs, outerContext)
def getUniqueAlt(self, configs:ATNConfigSet):
alt = ATN.INVALID_ALT_NUMBER
for c in configs:
if alt == ATN.INVALID_ALT_NUMBER:
alt = c.alt # found first alt
elif c.alt!=alt:
return ATN.INVALID_ALT_NUMBER
return alt
#
# Add an edge to the DFA, if possible. This method calls
# {@link #addDFAState} to ensure the {@code to} state is present in the
# DFA. If {@code from} is {@code null}, or if {@code t} is outside the
# range of edges that can be represented in the DFA tables, this method
# returns without adding the edge to the DFA.
#
# If {@code to} is {@code null}, this method returns {@code null}.
# Otherwise, this method returns the {@link DFAState} returned by calling
# {@link #addDFAState} for the {@code to} state.
#
# @param dfa The DFA
# @param from The source state for the edge
# @param t The input symbol
# @param to The target state for the edge
#
# @return If {@code to} is {@code null}, this method returns {@code null};
# otherwise this method returns the result of calling {@link #addDFAState}
# on {@code to}
#
def addDFAEdge(self, dfa:DFA, from_:DFAState, t:int, to:DFAState):
if ParserATNSimulator.debug:
print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t))
if to is None:
return None
to = self.addDFAState(dfa, to) # used existing if possible not incoming
if from_ is None or t < -1 or t > self.atn.maxTokenType:
return to
if from_.edges is None:
from_.edges = [None] * (self.atn.maxTokenType + 2)
from_.edges[t+1] = to # connect
if ParserATNSimulator.debug:
names = None if self.parser is None else self.parser.literalNames
print("DFA=\n" + dfa.toString(names))
return to
#
# Add state {@code D} to the DFA if it is not already present, and return
# the actual instance stored in the DFA. If a state equivalent to {@code D}
# is already in the DFA, the existing state is returned. Otherwise this
# method returns {@code D} after adding it to the DFA.
#
# If {@code D} is {@link #ERROR}, this method returns {@link #ERROR} and
# does not change the DFA.
#
# @param dfa The dfa
# @param D The DFA state to add
# @return The state stored in the DFA. This will be either the existing
# state if {@code D} is already in the DFA, or {@code D} itself if the
# state was not already present.
#
def addDFAState(self, dfa:DFA, D:DFAState):
if D is self.ERROR:
return D
existing = dfa.states.get(D, None)
if existing is not None:
return existing
D.stateNumber = len(dfa.states)
if not D.configs.readonly:
D.configs.optimizeConfigs(self)
D.configs.setReadonly(True)
dfa.states[D] = D
if ParserATNSimulator.debug:
print("adding new DFA state: " + str(D))
return D
def reportAttemptingFullContext(self, dfa:DFA, conflictingAlts:set, configs:ATNConfigSet, startIndex:int, stopIndex:int):
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) +
", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex))
if self.parser is not None:
self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
def reportContextSensitivity(self, dfa:DFA, prediction:int, configs:ATNConfigSet, startIndex:int, stopIndex:int):
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) +
", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex))
if self.parser is not None:
self.parser.getErrorListenerDispatch().reportContextSensitivity(self.parser, dfa, startIndex, stopIndex, prediction, configs)
# If context sensitive parsing, we know it's ambiguity not conflict#
def reportAmbiguity(self, dfa:DFA, D:DFAState, startIndex:int, stopIndex:int,
exact:bool, ambigAlts:set, configs:ATNConfigSet ):
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn);
# int i = 1;
# for (Transition t : dfa.atnStartState.transitions) {
# print("ALT "+i+"=");
# print(startIndex+".."+stopIndex+", len(input)="+parser.getInputStream().size());
# TraceTree path = finder.trace(t.target, parser.getContext(), (TokenStream)parser.getInputStream(),
# startIndex, stopIndex);
# if ( path!=null ) {
# print("path = "+path.toStringTree());
# for (TraceTree leaf : path.leaves) {
# List states = path.getPathToNode(leaf);
# print("states="+states);
# }
# }
# i++;
# }
print("reportAmbiguity " + str(ambigAlts) + ":" + str(configs) +
", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex))
if self.parser is not None:
self.parser.getErrorListenerDispatch().reportAmbiguity(self.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/PredictionMode.py 0000644 0000766 0000000 00000053726 00000000000 024335 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
#
# This enumeration defines the prediction modes available in ANTLR 4 along with
# utility methods for analyzing configuration sets for conflicts and/or
# ambiguities.
from enum import Enum
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNConfig import ATNConfig
from antlr4.atn.ATNConfigSet import ATNConfigSet
from antlr4.atn.ATNState import RuleStopState
from antlr4.atn.SemanticContext import SemanticContext
PredictionMode = None
class PredictionMode(Enum):
#
# The SLL(*) prediction mode. This prediction mode ignores the current
# parser context when making predictions. This is the fastest prediction
# mode, and provides correct results for many grammars. This prediction
# mode is more powerful than the prediction mode provided by ANTLR 3, but
# may result in syntax errors for grammar and input combinations which are
# not SLL.
#
#
# When using this prediction mode, the parser will either return a correct
# parse tree (i.e. the same parse tree that would be returned with the
# {@link #LL} prediction mode), or it will report a syntax error. If a
# syntax error is encountered when using the {@link #SLL} prediction mode,
# it may be due to either an actual syntax error in the input or indicate
# that the particular combination of grammar and input requires the more
# powerful {@link #LL} prediction abilities to complete successfully.
#
#
# This prediction mode does not provide any guarantees for prediction
# behavior for syntactically-incorrect inputs.
#
SLL = 0
#
# The LL(*) prediction mode. This prediction mode allows the current parser
# context to be used for resolving SLL conflicts that occur during
# prediction. This is the fastest prediction mode that guarantees correct
# parse results for all combinations of grammars with syntactically correct
# inputs.
#
#
# When using this prediction mode, the parser will make correct decisions
# for all syntactically-correct grammar and input combinations. However, in
# cases where the grammar is truly ambiguous this prediction mode might not
# report a precise answer for exactly which alternatives are
# ambiguous.
#
#
# This prediction mode does not provide any guarantees for prediction
# behavior for syntactically-incorrect inputs.
#
LL = 1
#
# The LL(*) prediction mode with exact ambiguity detection. In addition to
# the correctness guarantees provided by the {@link #LL} prediction mode,
# this prediction mode instructs the prediction algorithm to determine the
# complete and exact set of ambiguous alternatives for every ambiguous
# decision encountered while parsing.
#
#
# This prediction mode may be used for diagnosing ambiguities during
# grammar development. Due to the performance overhead of calculating sets
# of ambiguous alternatives, this prediction mode should be avoided when
# the exact results are not necessary.
#
#
# This prediction mode does not provide any guarantees for prediction
# behavior for syntactically-incorrect inputs.
#
LL_EXACT_AMBIG_DETECTION = 2
#
# Computes the SLL prediction termination condition.
#
#
# This method computes the SLL prediction termination condition for both of
# the following cases.
#
#
# - The usual SLL+LL fallback upon SLL conflict
# - Pure SLL without LL fallback
#
#
# COMBINED SLL+LL PARSING
#
# When LL-fallback is enabled upon SLL conflict, correct predictions are
# ensured regardless of how the termination condition is computed by this
# method. Due to the substantially higher cost of LL prediction, the
# prediction should only fall back to LL when the additional lookahead
# cannot lead to a unique SLL prediction.
#
# Assuming combined SLL+LL parsing, an SLL configuration set with only
# conflicting subsets should fall back to full LL, even if the
# configuration sets don't resolve to the same alternative (e.g.
# {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
# configuration, SLL could continue with the hopes that more lookahead will
# resolve via one of those non-conflicting configurations.
#
# Here's the prediction termination rule them: SLL (for SLL+LL parsing)
# stops when it sees only conflicting configuration subsets. In contrast,
# full LL keeps going when there is uncertainty.
#
# HEURISTIC
#
# As a heuristic, we stop prediction when we see any conflicting subset
# unless we see a state that only has one alternative associated with it.
# The single-alt-state thing lets prediction continue upon rules like
# (otherwise, it would admit defeat too soon):
#
# {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}
#
# When the ATN simulation reaches the state before {@code ';'}, it has a
# DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
# {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
# processing this node because alternative to has another way to continue,
# via {@code [6|2|[]]}.
#
# It also let's us continue for this rule:
#
# {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}
#
# After matching input A, we reach the stop state for rule A, state 1.
# State 8 is the state right before B. Clearly alternatives 1 and 2
# conflict and no amount of further lookahead will separate the two.
# However, alternative 3 will be able to continue and so we do not stop
# working on this state. In the previous example, we're concerned with
# states associated with the conflicting alternatives. Here alt 3 is not
# associated with the conflicting configs, but since we can continue
# looking for input reasonably, don't declare the state done.
#
# PURE SLL PARSING
#
# To handle pure SLL parsing, all we have to do is make sure that we
# combine stack contexts for configurations that differ only by semantic
# predicate. From there, we can do the usual SLL termination heuristic.
#
# PREDICATES IN SLL+LL PARSING
#
# SLL decisions don't evaluate predicates until after they reach DFA stop
# states because they need to create the DFA cache that works in all
# semantic situations. In contrast, full LL evaluates predicates collected
# during start state computation so it can ignore predicates thereafter.
# This means that SLL termination detection can totally ignore semantic
# predicates.
#
# Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
# semantic predicate contexts so we might see two configurations like the
# following.
#
# {@code (s, 1, x, {}), (s, 1, x', {p})}
#
# Before testing these configurations against others, we have to merge
# {@code x} and {@code x'} (without modifying the existing configurations).
# For example, we test {@code (x+x')==x''} when looking for conflicts in
# the following configurations.
#
# {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
#
# If the configuration set has predicates (as indicated by
# {@link ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of
# the configurations to strip out all of the predicates so that a standard
# {@link ATNConfigSet} will merge everything ignoring predicates.
#
@classmethod
def hasSLLConflictTerminatingPrediction(cls, mode:PredictionMode, configs:ATNConfigSet):
# Configs in rule stop states indicate reaching the end of the decision
# rule (local context) or end of start rule (full context). If all
# configs meet this condition, then none of the configurations is able
# to match additional input so we terminate prediction.
#
if cls.allConfigsInRuleStopStates(configs):
return True
# pure SLL mode parsing
if mode == PredictionMode.SLL:
# Don't bother with combining configs from different semantic
# contexts if we can fail over to full LL; costs more time
# since we'll often fail over anyway.
if configs.hasSemanticContext:
# dup configs, tossing out semantic predicates
dup = ATNConfigSet()
for c in configs:
c = ATNConfig(config=c, semantic=SemanticContext.NONE)
dup.add(c)
configs = dup
# now we have combined contexts for configs with dissimilar preds
# pure SLL or combined SLL+LL mode parsing
altsets = cls.getConflictingAltSubsets(configs)
return cls.hasConflictingAltSet(altsets) and not cls.hasStateAssociatedWithOneAlt(configs)
# Checks if any configuration in {@code configs} is in a
# {@link RuleStopState}. Configurations meeting this condition have reached
# the end of the decision rule (local context) or end of start rule (full
# context).
#
# @param configs the configuration set to test
# @return {@code true} if any configuration in {@code configs} is in a
# {@link RuleStopState}, otherwise {@code false}
@classmethod
def hasConfigInRuleStopState(cls, configs:ATNConfigSet):
return any(isinstance(cfg.state, RuleStopState) for cfg in configs)
# Checks if all configurations in {@code configs} are in a
# {@link RuleStopState}. Configurations meeting this condition have reached
# the end of the decision rule (local context) or end of start rule (full
# context).
#
# @param configs the configuration set to test
# @return {@code true} if all configurations in {@code configs} are in a
# {@link RuleStopState}, otherwise {@code false}
@classmethod
def allConfigsInRuleStopStates(cls, configs:ATNConfigSet):
return all(isinstance(cfg.state, RuleStopState) for cfg in configs)
#
# Full LL prediction termination.
#
# Can we stop looking ahead during ATN simulation or is there some
# uncertainty as to which alternative we will ultimately pick, after
# consuming more input? Even if there are partial conflicts, we might know
# that everything is going to resolve to the same minimum alternative. That
# means we can stop since no more lookahead will change that fact. On the
# other hand, there might be multiple conflicts that resolve to different
# minimums. That means we need more look ahead to decide which of those
# alternatives we should predict.
#
# The basic idea is to split the set of configurations {@code C}, into
# conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
# non-conflicting configurations. Two configurations conflict if they have
# identical {@link ATNConfig#state} and {@link ATNConfig#context} values
# but different {@link ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)}
# and {@code (s, j, ctx, _)} for {@code i!=j}.
#
# Reduce these configuration subsets to the set of possible alternatives.
# You can compute the alternative subsets in one pass as follows:
#
# {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
# {@code C} holding {@code s} and {@code ctx} fixed.
#
# Or in pseudo-code, for each configuration {@code c} in {@code C}:
#
#
# map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
# alt and not pred
#
#
# The values in {@code map} are the set of {@code A_s,ctx} sets.
#
# If {@code |A_s,ctx|=1} then there is no conflict associated with
# {@code s} and {@code ctx}.
#
# Reduce the subsets to singletons by choosing a minimum of each subset. If
# the union of these alternative subsets is a singleton, then no amount of
# more lookahead will help us. We will always pick that alternative. If,
# however, there is more than one alternative, then we are uncertain which
# alternative to predict and must continue looking for resolution. We may
# or may not discover an ambiguity in the future, even if there are no
# conflicting subsets this round.
#
# The biggest sin is to terminate early because it means we've made a
# decision but were uncertain as to the eventual outcome. We haven't used
# enough lookahead. On the other hand, announcing a conflict too late is no
# big deal; you will still have the conflict. It's just inefficient. It
# might even look until the end of file.
#
# No special consideration for semantic predicates is required because
# predicates are evaluated on-the-fly for full LL prediction, ensuring that
# no configuration contains a semantic context during the termination
# check.
#
# CONFLICTING CONFIGS
#
# Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
# when {@code i!=j} but {@code x=x'}. Because we merge all
# {@code (s, i, _)} configurations together, that means that there are at
# most {@code n} configurations associated with state {@code s} for
# {@code n} possible alternatives in the decision. The merged stacks
# complicate the comparison of configuration contexts {@code x} and
# {@code x'}. Sam checks to see if one is a subset of the other by calling
# merge and checking to see if the merged result is either {@code x} or
# {@code x'}. If the {@code x} associated with lowest alternative {@code i}
# is the superset, then {@code i} is the only possible prediction since the
# others resolve to {@code min(i)} as well. However, if {@code x} is
# associated with {@code j>i} then at least one stack configuration for
# {@code j} is not in conflict with alternative {@code i}. The algorithm
# should keep going, looking for more lookahead due to the uncertainty.
#
# For simplicity, I'm doing a equality check between {@code x} and
# {@code x'} that lets the algorithm continue to consume lookahead longer
# than necessary. The reason I like the equality is of course the
# simplicity but also because that is the test you need to detect the
# alternatives that are actually in conflict.
#
# CONTINUE/STOP RULE
#
# Continue if union of resolved alternative sets from non-conflicting and
# conflicting alternative subsets has more than one alternative. We are
# uncertain about which alternative to predict.
#
# The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
# alternatives are still in the running for the amount of input we've
# consumed at this point. The conflicting sets let us to strip away
# configurations that won't lead to more states because we resolve
# conflicts to the configuration with a minimum alternate for the
# conflicting set.
#
# CASES
#
#
#
# - no conflicts and more than 1 alternative in set => continue
#
# - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
# {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
# {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
# {@code {1,3}} => continue
#
#
# - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
# {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
# {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
# {@code {1}} => stop and predict 1
#
# - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
# {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
# {@code {1}} = {@code {1}} => stop and predict 1, can announce
# ambiguity {@code {1,2}}
#
# - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
# {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
# {@code {2}} = {@code {1,2}} => continue
#
# - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
# {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
# {@code {3}} = {@code {1,3}} => continue
#
#
#
# EXACT AMBIGUITY DETECTION
#
# If all states report the same conflicting set of alternatives, then we
# know we have the exact ambiguity set.
#
# |A_i|>1 and
# A_i = A_j for all i, j.
#
# In other words, we continue examining lookahead until all {@code A_i}
# have more than one alternative and all {@code A_i} are the same. If
# {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
# because the resolved set is {@code {1}}. To determine what the real
# ambiguity is, we have to know whether the ambiguity is between one and
# two or one and three so we keep going. We can only stop prediction when
# we need exact ambiguity detection when the sets look like
# {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
#
@classmethod
def resolvesToJustOneViableAlt(cls, altsets:list):
return cls.getSingleViableAlt(altsets)
#
# Determines if every alternative subset in {@code altsets} contains more
# than one alternative.
#
# @param altsets a collection of alternative subsets
# @return {@code true} if every {@link BitSet} in {@code altsets} has
# {@link BitSet#cardinality cardinality} > 1, otherwise {@code false}
#
@classmethod
def allSubsetsConflict(cls, altsets:list):
return not cls.hasNonConflictingAltSet(altsets)
#
# Determines if any single alternative subset in {@code altsets} contains
# exactly one alternative.
#
# @param altsets a collection of alternative subsets
# @return {@code true} if {@code altsets} contains a {@link BitSet} with
# {@link BitSet#cardinality cardinality} 1, otherwise {@code false}
#
@classmethod
def hasNonConflictingAltSet(cls, altsets:list):
return any(len(alts) == 1 for alts in altsets)
#
# Determines if any single alternative subset in {@code altsets} contains
# more than one alternative.
#
# @param altsets a collection of alternative subsets
# @return {@code true} if {@code altsets} contains a {@link BitSet} with
# {@link BitSet#cardinality cardinality} > 1, otherwise {@code false}
#
@classmethod
def hasConflictingAltSet(cls, altsets:list):
return any(len(alts) > 1 for alts in altsets)
#
# Determines if every alternative subset in {@code altsets} is equivalent.
#
# @param altsets a collection of alternative subsets
# @return {@code true} if every member of {@code altsets} is equal to the
# others, otherwise {@code false}
#
@classmethod
def allSubsetsEqual(cls, altsets:list):
if not altsets:
return True
first = next(iter(altsets))
return all(alts == first for alts in iter(altsets))
#
# Returns the unique alternative predicted by all alternative subsets in
# {@code altsets}. If no such alternative exists, this method returns
# {@link ATN#INVALID_ALT_NUMBER}.
#
# @param altsets a collection of alternative subsets
#
@classmethod
def getUniqueAlt(cls, altsets:list):
all = cls.getAlts(altsets)
if len(all)==1:
return next(iter(all))
return ATN.INVALID_ALT_NUMBER
# Gets the complete set of represented alternatives for a collection of
# alternative subsets. This method returns the union of each {@link BitSet}
# in {@code altsets}.
#
# @param altsets a collection of alternative subsets
# @return the set of represented alternatives in {@code altsets}
#
@classmethod
def getAlts(cls, altsets:list):
return set.union(*altsets)
#
# This function gets the conflicting alt subsets from a configuration set.
# For each configuration {@code c} in {@code configs}:
#
#
# map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
# alt and not pred
#
#
@classmethod
def getConflictingAltSubsets(cls, configs:ATNConfigSet):
configToAlts = dict()
for c in configs:
h = hash((c.state.stateNumber, c.context))
alts = configToAlts.get(h, None)
if alts is None:
alts = set()
configToAlts[h] = alts
alts.add(c.alt)
return configToAlts.values()
#
# Get a map from state to alt subset from a configuration set. For each
# configuration {@code c} in {@code configs}:
#
#
# map[c.{@link ATNConfig#state state}] U= c.{@link ATNConfig#alt alt}
#
#
@classmethod
def getStateToAltMap(cls, configs:ATNConfigSet):
m = dict()
for c in configs:
alts = m.get(c.state, None)
if alts is None:
alts = set()
m[c.state] = alts
alts.add(c.alt)
return m
@classmethod
def hasStateAssociatedWithOneAlt(cls, configs:ATNConfigSet):
return any(len(alts) == 1 for alts in cls.getStateToAltMap(configs).values())
@classmethod
def getSingleViableAlt(cls, altsets:list):
viableAlts = set()
for alts in altsets:
minAlt = min(alts)
viableAlts.add(minAlt)
if len(viableAlts)>1 : # more than 1 viable alt
return ATN.INVALID_ALT_NUMBER
return min(viableAlts)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/SemanticContext.py 0000644 0000766 0000000 00000024377 00000000000 024540 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
# A tree structure used to record the semantic context in which
# an ATN configuration is valid. It's either a single predicate,
# a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
#
# I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
# {@link SemanticContext} within the scope of this outer class.
#
from antlr4.Recognizer import Recognizer
from antlr4.RuleContext import RuleContext
from io import StringIO
class SemanticContext(object):
#
# The default {@link SemanticContext}, which is semantically equivalent to
# a predicate of the form {@code {true}?}.
#
NONE = None
#
# For context independent predicates, we evaluate them without a local
# context (i.e., null context). That way, we can evaluate them without
# having to create proper rule-specific context during prediction (as
# opposed to the parser, which creates them naturally). In a practical
# sense, this avoids a cast exception from RuleContext to myruleContext.
#
# For context dependent predicates, we must pass in a local context so that
# references such as $arg evaluate properly as _localctx.arg. We only
# capture context dependent predicates in the context in which we begin
# prediction, so we passed in the outer context here in case of context
# dependent predicate evaluation.
#
def eval(self, parser:Recognizer , outerContext:RuleContext ):
pass
#
# Evaluate the precedence predicates for the context and reduce the result.
#
# @param parser The parser instance.
# @param outerContext The current parser context object.
# @return The simplified semantic context after precedence predicates are
# evaluated, which will be one of the following values.
#
# - {@link #NONE}: if the predicate simplifies to {@code true} after
# precedence predicates are evaluated.
# - {@code null}: if the predicate simplifies to {@code false} after
# precedence predicates are evaluated.
# - {@code this}: if the semantic context is not changed as a result of
# precedence predicate evaluation.
# - A non-{@code null} {@link SemanticContext}: the new simplified
# semantic context after precedence predicates are evaluated.
#
#
def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
return self
# need forward declaration
AND = None
def andContext(a:SemanticContext, b:SemanticContext):
if a is None or a is SemanticContext.NONE:
return b
if b is None or b is SemanticContext.NONE:
return a
result = AND(a, b)
if len(result.opnds) == 1:
return result.opnds[0]
else:
return result
# need forward declaration
OR = None
def orContext(a:SemanticContext, b:SemanticContext):
if a is None:
return b
if b is None:
return a
if a is SemanticContext.NONE or b is SemanticContext.NONE:
return SemanticContext.NONE
result = OR(a, b)
if len(result.opnds) == 1:
return result.opnds[0]
else:
return result
def filterPrecedencePredicates(collection:set):
return [context for context in collection if isinstance(context, PrecedencePredicate)]
class Predicate(SemanticContext):
__slots__ = ('ruleIndex', 'predIndex', 'isCtxDependent')
def __init__(self, ruleIndex:int=-1, predIndex:int=-1, isCtxDependent:bool=False):
self.ruleIndex = ruleIndex
self.predIndex = predIndex
self.isCtxDependent = isCtxDependent # e.g., $i ref in pred
def eval(self, parser:Recognizer , outerContext:RuleContext ):
localctx = outerContext if self.isCtxDependent else None
return parser.sempred(localctx, self.ruleIndex, self.predIndex)
def __hash__(self):
return hash((self.ruleIndex, self.predIndex, self.isCtxDependent))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, Predicate):
return False
return self.ruleIndex == other.ruleIndex and \
self.predIndex == other.predIndex and \
self.isCtxDependent == other.isCtxDependent
def __str__(self):
return "{" + str(self.ruleIndex) + ":" + str(self.predIndex) + "}?"
class PrecedencePredicate(SemanticContext):
def __init__(self, precedence:int=0):
self.precedence = precedence
def eval(self, parser:Recognizer , outerContext:RuleContext ):
return parser.precpred(outerContext, self.precedence)
def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
if parser.precpred(outerContext, self.precedence):
return SemanticContext.NONE
else:
return None
def __lt__(self, other):
return self.precedence < other.precedence
def __hash__(self):
return 31
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, PrecedencePredicate):
return False
else:
return self.precedence == other.precedence
# A semantic context which is true whenever none of the contained contexts
# is false.
del AND
class AND(SemanticContext):
__slots__ = 'opnds'
def __init__(self, a:SemanticContext, b:SemanticContext):
operands = set()
if isinstance( a, AND ):
operands.update(a.opnds)
else:
operands.add(a)
if isinstance( b, AND ):
operands.update(b.opnds)
else:
operands.add(b)
precedencePredicates = filterPrecedencePredicates(operands)
if len(precedencePredicates)>0:
# interested in the transition with the lowest precedence
reduced = min(precedencePredicates)
operands.add(reduced)
self.opnds = list(operands)
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, AND):
return False
else:
return self.opnds == other.opnds
def __hash__(self):
h = 0
for o in self.opnds:
h = hash((h, o))
return hash((h, "AND"))
#
# {@inheritDoc}
#
#
# The evaluation of predicates by this context is short-circuiting, but
# unordered.
#
def eval(self, parser:Recognizer, outerContext:RuleContext):
return all(opnd.eval(parser, outerContext) for opnd in self.opnds)
def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
differs = False
operands = []
for context in self.opnds:
evaluated = context.evalPrecedence(parser, outerContext)
differs |= evaluated is not context
if evaluated is None:
# The AND context is false if any element is false
return None
elif evaluated is not SemanticContext.NONE:
# Reduce the result by skipping true elements
operands.append(evaluated)
if not differs:
return self
if len(operands)==0:
# all elements were true, so the AND context is true
return SemanticContext.NONE
result = None
for o in operands:
result = o if result is None else andContext(result, o)
return result
def __str__(self):
with StringIO() as buf:
first = True
for o in self.opnds:
if not first:
buf.write("&&")
buf.write(str(o))
first = False
return buf.getvalue()
#
# A semantic context which is true whenever at least one of the contained
# contexts is true.
del OR
class OR (SemanticContext):
__slots__ = 'opnds'
def __init__(self, a:SemanticContext, b:SemanticContext):
operands = set()
if isinstance( a, OR ):
operands.update(a.opnds)
else:
operands.add(a)
if isinstance( b, OR ):
operands.update(b.opnds)
else:
operands.add(b)
precedencePredicates = filterPrecedencePredicates(operands)
if len(precedencePredicates)>0:
# interested in the transition with the highest precedence
s = sorted(precedencePredicates)
reduced = s[-1]
operands.add(reduced)
self.opnds = list(operands)
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, OR):
return False
else:
return self.opnds == other.opnds
def __hash__(self):
h = 0
for o in self.opnds:
h = hash((h, o))
return hash((h, "OR"))
#
# The evaluation of predicates by this context is short-circuiting, but
# unordered.
#
def eval(self, parser:Recognizer, outerContext:RuleContext):
return any(opnd.eval(parser, outerContext) for opnd in self.opnds)
def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
differs = False
operands = []
for context in self.opnds:
evaluated = context.evalPrecedence(parser, outerContext)
differs |= evaluated is not context
if evaluated is SemanticContext.NONE:
# The OR context is true if any element is true
return SemanticContext.NONE
elif evaluated is not None:
# Reduce the result by skipping false elements
operands.append(evaluated)
if not differs:
return self
if len(operands)==0:
# all elements were false, so the OR context is false
return None
result = None
for o in operands:
result = o if result is None else orContext(result, o)
return result
def __str__(self):
with StringIO() as buf:
first = True
for o in self.opnds:
if not first:
buf.write("||")
buf.write(str(o))
first = False
return buf.getvalue()
SemanticContext.NONE = Predicate()
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/Transition.py 0000644 0000766 0000000 00000021072 00000000000 023547 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
# An ATN transition between any two ATN states. Subclasses define
# atom, set, epsilon, action, predicate, rule transitions.
#
# This is a one way link. It emanates from a state (usually via a list of
# transitions) and has a target state.
#
# Since we never have to change the ATN transitions once we construct it,
# we can fix these transitions as specific classes. The DFA transitions
# on the other hand need to update the labels as it adds transitions to
# the states. We'll use the term Edge for the DFA to distinguish them from
# ATN transitions.
#
from antlr4.IntervalSet import IntervalSet
from antlr4.Token import Token
# need forward declarations
from antlr4.atn.SemanticContext import Predicate, PrecedencePredicate
ATNState = None
RuleStartState = None
class Transition (object):
__slots__ = ('target','isEpsilon','label')
# constants for serialization
EPSILON = 1
RANGE = 2
RULE = 3
PREDICATE = 4 # e.g., {isType(input.LT(1))}?
ATOM = 5
ACTION = 6
SET = 7 # ~(A|B) or ~atom, wildcard, which convert to next 2
NOT_SET = 8
WILDCARD = 9
PRECEDENCE = 10
serializationNames = [
"INVALID",
"EPSILON",
"RANGE",
"RULE",
"PREDICATE",
"ATOM",
"ACTION",
"SET",
"NOT_SET",
"WILDCARD",
"PRECEDENCE"
]
serializationTypes = dict()
def __init__(self, target:ATNState):
# The target of this transition.
if target is None:
raise Exception("target cannot be null.")
self.target = target
# Are we epsilon, action, sempred?
self.isEpsilon = False
self.label = None
# TODO: make all transitions sets? no, should remove set edges
class AtomTransition(Transition):
__slots__ = ('label_', 'serializationType')
def __init__(self, target:ATNState, label:int):
super().__init__(target)
self.label_ = label # The token type or character value; or, signifies special label.
self.label = self.makeLabel()
self.serializationType = self.ATOM
def makeLabel(self):
s = IntervalSet()
s.addOne(self.label_)
return s
def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
return self.label_ == symbol
def __str__(self):
return str(self.label_)
class RuleTransition(Transition):
__slots__ = ('ruleIndex', 'precedence', 'followState', 'serializationType')
def __init__(self, ruleStart:RuleStartState, ruleIndex:int, precedence:int, followState:ATNState):
super().__init__(ruleStart)
self.ruleIndex = ruleIndex # ptr to the rule definition object for this rule ref
self.precedence = precedence
self.followState = followState # what node to begin computations following ref to rule
self.serializationType = self.RULE
self.isEpsilon = True
def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
return False
class EpsilonTransition(Transition):
__slots__ = ('serializationType', 'outermostPrecedenceReturn')
def __init__(self, target, outermostPrecedenceReturn=-1):
super(EpsilonTransition, self).__init__(target)
self.serializationType = self.EPSILON
self.isEpsilon = True
self.outermostPrecedenceReturn = outermostPrecedenceReturn
def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
return False
def __str__(self):
return "epsilon"
class RangeTransition(Transition):
__slots__ = ('serializationType', 'start', 'stop')
def __init__(self, target:ATNState, start:int, stop:int):
super().__init__(target)
self.serializationType = self.RANGE
self.start = start
self.stop = stop
self.label = self.makeLabel()
def makeLabel(self):
s = IntervalSet()
s.addRange(range(self.start, self.stop + 1))
return s
def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
return symbol >= self.start and symbol <= self.stop
def __str__(self):
return "'" + chr(self.start) + "'..'" + chr(self.stop) + "'"
class AbstractPredicateTransition(Transition):
def __init__(self, target:ATNState):
super().__init__(target)
class PredicateTransition(AbstractPredicateTransition):
__slots__ = ('serializationType', 'ruleIndex', 'predIndex', 'isCtxDependent')
def __init__(self, target:ATNState, ruleIndex:int, predIndex:int, isCtxDependent:bool):
super().__init__(target)
self.serializationType = self.PREDICATE
self.ruleIndex = ruleIndex
self.predIndex = predIndex
self.isCtxDependent = isCtxDependent # e.g., $i ref in pred
self.isEpsilon = True
def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
return False
def getPredicate(self):
return Predicate(self.ruleIndex, self.predIndex, self.isCtxDependent)
def __str__(self):
return "pred_" + str(self.ruleIndex) + ":" + str(self.predIndex)
class ActionTransition(Transition):
__slots__ = ('serializationType', 'ruleIndex', 'actionIndex', 'isCtxDependent')
def __init__(self, target:ATNState, ruleIndex:int, actionIndex:int=-1, isCtxDependent:bool=False):
super().__init__(target)
self.serializationType = self.ACTION
self.ruleIndex = ruleIndex
self.actionIndex = actionIndex
self.isCtxDependent = isCtxDependent # e.g., $i ref in pred
self.isEpsilon = True
def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
return False
def __str__(self):
return "action_"+self.ruleIndex+":"+self.actionIndex
# A transition containing a set of values.
class SetTransition(Transition):
__slots__ = 'serializationType'
def __init__(self, target:ATNState, set:IntervalSet):
super().__init__(target)
self.serializationType = self.SET
if set is not None:
self.label = set
else:
self.label = IntervalSet()
self.label.addRange(range(Token.INVALID_TYPE, Token.INVALID_TYPE + 1))
def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
return symbol in self.label
def __str__(self):
return str(self.label)
class NotSetTransition(SetTransition):
def __init__(self, target:ATNState, set:IntervalSet):
super().__init__(target, set)
self.serializationType = self.NOT_SET
def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
return symbol >= minVocabSymbol \
and symbol <= maxVocabSymbol \
and not super(type(self), self).matches(symbol, minVocabSymbol, maxVocabSymbol)
def __str__(self):
return '~' + super(type(self), self).__str__()
class WildcardTransition(Transition):
__slots__ = 'serializationType'
def __init__(self, target:ATNState):
super().__init__(target)
self.serializationType = self.WILDCARD
def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
return symbol >= minVocabSymbol and symbol <= maxVocabSymbol
def __str__(self):
return "."
class PrecedencePredicateTransition(AbstractPredicateTransition):
__slots__ = ('serializationType', 'precedence')
def __init__(self, target:ATNState, precedence:int):
super().__init__(target)
self.serializationType = self.PRECEDENCE
self.precedence = precedence
self.isEpsilon = True
def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
return False
def getPredicate(self):
return PrecedencePredicate(self.precedence)
def __str__(self):
return self.precedence + " >= _p"
Transition.serializationTypes = {
EpsilonTransition: Transition.EPSILON,
RangeTransition: Transition.RANGE,
RuleTransition: Transition.RULE,
PredicateTransition: Transition.PREDICATE,
AtomTransition: Transition.ATOM,
ActionTransition: Transition.ACTION,
SetTransition: Transition.SET,
NotSetTransition: Transition.NOT_SET,
WildcardTransition: Transition.WILDCARD,
PrecedencePredicateTransition: Transition.PRECEDENCE
}
del ATNState
del RuleStartState
from antlr4.atn.ATNState import *
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/atn/__init__.py 0000644 0000766 0000000 00000000034 00000000000 023147 0 ustar 00parrt wheel 0000000 0000000 __author__ = 'ericvergnaud'
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1609880862.5328283
antlr4-python3-runtime-4.9.1/src/antlr4/dfa/ 0000755 0000766 0000000 00000000000 00000000000 021011 5 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/dfa/DFA.py 0000644 0000766 0000000 00000012414 00000000000 021757 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
from antlr4.atn.ATNState import StarLoopEntryState
from antlr4.atn.ATNConfigSet import ATNConfigSet
from antlr4.atn.ATNState import DecisionState
from antlr4.dfa.DFAState import DFAState
from antlr4.error.Errors import IllegalStateException
class DFA(object):
__slots__ = ('atnStartState', 'decision', '_states', 's0', 'precedenceDfa')
def __init__(self, atnStartState:DecisionState, decision:int=0):
# From which ATN state did we create this DFA?
self.atnStartState = atnStartState
self.decision = decision
# A set of all DFA states. Use {@link Map} so we can get old state back
# ({@link Set} only allows you to see if it's there).
self._states = dict()
self.s0 = None
# {@code true} if this DFA is for a precedence decision; otherwise,
# {@code false}. This is the backing field for {@link #isPrecedenceDfa},
# {@link #setPrecedenceDfa}.
self.precedenceDfa = False
if isinstance(atnStartState, StarLoopEntryState):
if atnStartState.isPrecedenceDecision:
self.precedenceDfa = True
precedenceState = DFAState(configs=ATNConfigSet())
precedenceState.edges = []
precedenceState.isAcceptState = False
precedenceState.requiresFullContext = False
self.s0 = precedenceState
# Get the start state for a specific precedence value.
#
# @param precedence The current precedence.
# @return The start state corresponding to the specified precedence, or
# {@code null} if no start state exists for the specified precedence.
#
# @throws IllegalStateException if this is not a precedence DFA.
# @see #isPrecedenceDfa()
def getPrecedenceStartState(self, precedence:int):
if not self.precedenceDfa:
raise IllegalStateException("Only precedence DFAs may contain a precedence start state.")
# s0.edges is never null for a precedence DFA
if precedence < 0 or precedence >= len(self.s0.edges):
return None
return self.s0.edges[precedence]
# Set the start state for a specific precedence value.
#
# @param precedence The current precedence.
# @param startState The start state corresponding to the specified
# precedence.
#
# @throws IllegalStateException if this is not a precedence DFA.
# @see #isPrecedenceDfa()
#
def setPrecedenceStartState(self, precedence:int, startState:DFAState):
if not self.precedenceDfa:
raise IllegalStateException("Only precedence DFAs may contain a precedence start state.")
if precedence < 0:
return
# synchronization on s0 here is ok. when the DFA is turned into a
# precedence DFA, s0 will be initialized once and not updated again
# s0.edges is never null for a precedence DFA
if precedence >= len(self.s0.edges):
ext = [None] * (precedence + 1 - len(self.s0.edges))
self.s0.edges.extend(ext)
self.s0.edges[precedence] = startState
#
# Sets whether this is a precedence DFA. If the specified value differs
# from the current DFA configuration, the following actions are taken;
# otherwise no changes are made to the current DFA.
#
#
# - The {@link #states} map is cleared
# - If {@code precedenceDfa} is {@code false}, the initial state
# {@link #s0} is set to {@code null}; otherwise, it is initialized to a new
# {@link DFAState} with an empty outgoing {@link DFAState#edges} array to
# store the start states for individual precedence values.
# - The {@link #precedenceDfa} field is updated
#
#
# @param precedenceDfa {@code true} if this is a precedence DFA; otherwise,
# {@code false}
def setPrecedenceDfa(self, precedenceDfa:bool):
if self.precedenceDfa != precedenceDfa:
self._states = dict()
if precedenceDfa:
precedenceState = DFAState(configs=ATNConfigSet())
precedenceState.edges = []
precedenceState.isAcceptState = False
precedenceState.requiresFullContext = False
self.s0 = precedenceState
else:
self.s0 = None
self.precedenceDfa = precedenceDfa
@property
def states(self):
return self._states
# Return a list of all states in this DFA, ordered by state number.
def sortedStates(self):
return sorted(self._states.keys(), key=lambda state: state.stateNumber)
def __str__(self):
return self.toString(None)
def toString(self, literalNames:list=None, symbolicNames:list=None):
if self.s0 is None:
return ""
from antlr4.dfa.DFASerializer import DFASerializer
serializer = DFASerializer(self,literalNames,symbolicNames)
return str(serializer)
def toLexerString(self):
if self.s0 is None:
return ""
from antlr4.dfa.DFASerializer import LexerDFASerializer
serializer = LexerDFASerializer(self)
return str(serializer)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/dfa/DFASerializer.py 0000644 0000766 0000000 00000004726 00000000000 024020 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# A DFA walker that knows how to dump them to serialized strings.#/
from io import StringIO
from antlr4 import DFA
from antlr4.Utils import str_list
from antlr4.dfa.DFAState import DFAState
class DFASerializer(object):
__slots__ = ('dfa', 'literalNames', 'symbolicNames')
def __init__(self, dfa:DFA, literalNames:list=None, symbolicNames:list=None):
self.dfa = dfa
self.literalNames = literalNames
self.symbolicNames = symbolicNames
def __str__(self):
if self.dfa.s0 is None:
return None
with StringIO() as buf:
for s in self.dfa.sortedStates():
n = 0
if s.edges is not None:
n = len(s.edges)
for i in range(0, n):
t = s.edges[i]
if t is not None and t.stateNumber != 0x7FFFFFFF:
buf.write(self.getStateString(s))
label = self.getEdgeLabel(i)
buf.write("-")
buf.write(label)
buf.write("->")
buf.write(self.getStateString(t))
buf.write('\n')
output = buf.getvalue()
if len(output)==0:
return None
else:
return output
def getEdgeLabel(self, i:int):
if i==0:
return "EOF"
if self.literalNames is not None and i<=len(self.literalNames):
return self.literalNames[i-1]
elif self.symbolicNames is not None and i<=len(self.symbolicNames):
return self.symbolicNames[i-1]
else:
return str(i-1)
def getStateString(self, s:DFAState):
n = s.stateNumber
baseStateStr = ( ":" if s.isAcceptState else "") + "s" + str(n) + ( "^" if s.requiresFullContext else "")
if s.isAcceptState:
if s.predicates is not None:
return baseStateStr + "=>" + str_list(s.predicates)
else:
return baseStateStr + "=>" + str(s.prediction)
else:
return baseStateStr
class LexerDFASerializer(DFASerializer):
def __init__(self, dfa:DFA):
super().__init__(dfa, None)
def getEdgeLabel(self, i:int):
return "'" + chr(i) + "'"
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/dfa/DFAState.py 0000644 0000766 0000000 00000012717 00000000000 022766 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# Map a predicate to a predicted alternative.#/
from io import StringIO
from antlr4.atn.ATNConfigSet import ATNConfigSet
from antlr4.atn.SemanticContext import SemanticContext
class PredPrediction(object):
__slots__ = ('alt', 'pred')
def __init__(self, pred:SemanticContext, alt:int):
self.alt = alt
self.pred = pred
def __str__(self):
return "(" + str(self.pred) + ", " + str(self.alt) + ")"
# A DFA state represents a set of possible ATN configurations.
# As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
# to keep track of all possible states the ATN can be in after
# reading each input symbol. That is to say, after reading
# input a1a2..an, the DFA is in a state that represents the
# subset T of the states of the ATN that are reachable from the
# ATN's start state along some path labeled a1a2..an."
# In conventional NFA→DFA conversion, therefore, the subset T
# would be a bitset representing the set of states the
# ATN could be in. We need to track the alt predicted by each
# state as well, however. More importantly, we need to maintain
# a stack of states, tracking the closure operations as they
# jump from rule to rule, emulating rule invocations (method calls).
# I have to add a stack to simulate the proper lookahead sequences for
# the underlying LL grammar from which the ATN was derived.
#
# I use a set of ATNConfig objects not simple states. An ATNConfig
# is both a state (ala normal conversion) and a RuleContext describing
# the chain of rules (if any) followed to arrive at that state.
#
# A DFA state may have multiple references to a particular state,
# but with different ATN contexts (with same or different alts)
# meaning that state was reached via a different set of rule invocations.
#/
class DFAState(object):
__slots__ = (
'stateNumber', 'configs', 'edges', 'isAcceptState', 'prediction',
'lexerActionExecutor', 'requiresFullContext', 'predicates'
)
def __init__(self, stateNumber:int=-1, configs:ATNConfigSet=ATNConfigSet()):
self.stateNumber = stateNumber
self.configs = configs
# {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1)
# {@link Token#EOF} maps to {@code edges[0]}.
self.edges = None
self.isAcceptState = False
# if accept state, what ttype do we match or alt do we predict?
# This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or
# {@link #requiresFullContext}.
self.prediction = 0
self.lexerActionExecutor = None
# Indicates that this state was created during SLL prediction that
# discovered a conflict between the configurations in the state. Future
# {@link ParserATNSimulator#execATN} invocations immediately jumped doing
# full context prediction if this field is true.
self.requiresFullContext = False
# During SLL parsing, this is a list of predicates associated with the
# ATN configurations of the DFA state. When we have predicates,
# {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates
# on-the-fly. If this is not null, then {@link #prediction} is
# {@link ATN#INVALID_ALT_NUMBER}.
#
# We only use these for non-{@link #requiresFullContext} but conflicting states. That
# means we know from the context (it's $ or we don't dip into outer
# context) that it's an ambiguity not a conflict.
#
# This list is computed by {@link ParserATNSimulator#predicateDFAState}.
self.predicates = None
# Get the set of all alts mentioned by all ATN configurations in this
# DFA state.
def getAltSet(self):
if self.configs is not None:
return set(cfg.alt for cfg in self.configs) or None
return None
def __hash__(self):
return hash(self.configs)
# Two {@link DFAState} instances are equal if their ATN configuration sets
# are the same. This method is used to see if a state already exists.
#
# Because the number of alternatives and number of ATN configurations are
# finite, there is a finite number of DFA states that can be processed.
# This is necessary to show that the algorithm terminates.
#
# Cannot test the DFA state numbers here because in
# {@link ParserATNSimulator#addDFAState} we need to know if any other state
# exists that has this exact set of ATN configurations. The
# {@link #stateNumber} is irrelevant.
def __eq__(self, other):
# compare set of ATN configurations in this set with other
if self is other:
return True
elif not isinstance(other, DFAState):
return False
else:
return self.configs==other.configs
def __str__(self):
with StringIO() as buf:
buf.write(str(self.stateNumber))
buf.write(":")
buf.write(str(self.configs))
if self.isAcceptState:
buf.write("=>")
if self.predicates is not None:
buf.write(str(self.predicates))
else:
buf.write(str(self.prediction))
return buf.getvalue()
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/dfa/__init__.py 0000644 0000766 0000000 00000000034 00000000000 023117 0 ustar 00parrt wheel 0000000 0000000 __author__ = 'ericvergnaud'
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1609880862.5362973
antlr4-python3-runtime-4.9.1/src/antlr4/error/ 0000755 0000766 0000000 00000000000 00000000000 021410 5 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/error/DiagnosticErrorListener.py 0000644 0000766 0000000 00000010516 00000000000 026571 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
#
# This implementation of {@link ANTLRErrorListener} can be used to identify
# certain potential correctness and performance problems in grammars. "Reports"
# are made by calling {@link Parser#notifyErrorListeners} with the appropriate
# message.
#
#
# - Ambiguities: These are cases where more than one path through the
# grammar can match the input.
# - Weak context sensitivity: These are cases where full-context
# prediction resolved an SLL conflict to a unique alternative which equaled the
# minimum alternative of the SLL conflict.
# - Strong (forced) context sensitivity: These are cases where the
# full-context prediction resolved an SLL conflict to a unique alternative,
# and the minimum alternative of the SLL conflict was found to not be
# a truly viable alternative. Two-stage parsing cannot be used for inputs where
# this situation occurs.
#
from io import StringIO
from antlr4 import Parser, DFA
from antlr4.atn.ATNConfigSet import ATNConfigSet
from antlr4.error.ErrorListener import ErrorListener
class DiagnosticErrorListener(ErrorListener):
def __init__(self, exactOnly:bool=True):
# whether all ambiguities or only exact ambiguities are reported.
self.exactOnly = exactOnly
def reportAmbiguity(self, recognizer:Parser, dfa:DFA, startIndex:int,
stopIndex:int, exact:bool, ambigAlts:set, configs:ATNConfigSet):
if self.exactOnly and not exact:
return
with StringIO() as buf:
buf.write("reportAmbiguity d=")
buf.write(self.getDecisionDescription(recognizer, dfa))
buf.write(": ambigAlts=")
buf.write(str(self.getConflictingAlts(ambigAlts, configs)))
buf.write(", input='")
buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex))
buf.write("'")
recognizer.notifyErrorListeners(buf.getvalue())
def reportAttemptingFullContext(self, recognizer:Parser, dfa:DFA, startIndex:int,
stopIndex:int, conflictingAlts:set, configs:ATNConfigSet):
with StringIO() as buf:
buf.write("reportAttemptingFullContext d=")
buf.write(self.getDecisionDescription(recognizer, dfa))
buf.write(", input='")
buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex))
buf.write("'")
recognizer.notifyErrorListeners(buf.getvalue())
def reportContextSensitivity(self, recognizer:Parser, dfa:DFA, startIndex:int,
stopIndex:int, prediction:int, configs:ATNConfigSet):
with StringIO() as buf:
buf.write("reportContextSensitivity d=")
buf.write(self.getDecisionDescription(recognizer, dfa))
buf.write(", input='")
buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex))
buf.write("'")
recognizer.notifyErrorListeners(buf.getvalue())
def getDecisionDescription(self, recognizer:Parser, dfa:DFA):
decision = dfa.decision
ruleIndex = dfa.atnStartState.ruleIndex
ruleNames = recognizer.ruleNames
if ruleIndex < 0 or ruleIndex >= len(ruleNames):
return str(decision)
ruleName = ruleNames[ruleIndex]
if ruleName is None or len(ruleName)==0:
return str(decision)
return str(decision) + " (" + ruleName + ")"
#
# Computes the set of conflicting or ambiguous alternatives from a
# configuration set, if that information was not already provided by the
# parser.
#
# @param reportedAlts The set of conflicting or ambiguous alternatives, as
# reported by the parser.
# @param configs The conflicting or ambiguous configuration set.
# @return Returns {@code reportedAlts} if it is not {@code null}, otherwise
# returns the set of alternatives represented in {@code configs}.
#
def getConflictingAlts(self, reportedAlts:set, configs:ATNConfigSet):
if reportedAlts is not None:
return reportedAlts
result = set()
for config in configs:
result.add(config.alt)
return result
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/error/ErrorListener.py 0000644 0000766 0000000 00000005242 00000000000 024564 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
# Provides an empty default implementation of {@link ANTLRErrorListener}. The
# default implementation of each method does nothing, but can be overridden as
# necessary.
import sys
class ErrorListener(object):
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
pass
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
pass
def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs):
pass
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
pass
class ConsoleErrorListener(ErrorListener):
#
# Provides a default instance of {@link ConsoleErrorListener}.
#
INSTANCE = None
#
# {@inheritDoc}
#
#
# This implementation prints messages to {@link System#err} containing the
# values of {@code line}, {@code charPositionInLine}, and {@code msg} using
# the following format.
#
#
# line line:charPositionInLine msg
#
#
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
print("line " + str(line) + ":" + str(column) + " " + msg, file=sys.stderr)
ConsoleErrorListener.INSTANCE = ConsoleErrorListener()
class ProxyErrorListener(ErrorListener):
def __init__(self, delegates):
super().__init__()
if delegates is None:
raise ReferenceError("delegates")
self.delegates = delegates
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
for delegate in self.delegates:
delegate.syntaxError(recognizer, offendingSymbol, line, column, msg, e)
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
for delegate in self.delegates:
delegate.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs):
for delegate in self.delegates:
delegate.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
for delegate in self.delegates:
delegate.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/error/ErrorStrategy.py 0000644 0000766 0000000 00000072272 00000000000 024610 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
import sys
from antlr4.IntervalSet import IntervalSet
from antlr4.Token import Token
from antlr4.atn.ATNState import ATNState
from antlr4.error.Errors import RecognitionException, NoViableAltException, InputMismatchException, \
FailedPredicateException, ParseCancellationException
# need forward declaration
Parser = None
class ErrorStrategy(object):
def reset(self, recognizer:Parser):
pass
def recoverInline(self, recognizer:Parser):
pass
def recover(self, recognizer:Parser, e:RecognitionException):
pass
def sync(self, recognizer:Parser):
pass
def inErrorRecoveryMode(self, recognizer:Parser):
pass
def reportError(self, recognizer:Parser, e:RecognitionException):
pass
# This is the default implementation of {@link ANTLRErrorStrategy} used for
# error reporting and recovery in ANTLR parsers.
#
class DefaultErrorStrategy(ErrorStrategy):
def __init__(self):
super().__init__()
# Indicates whether the error strategy is currently "recovering from an
# error". This is used to suppress reporting multiple error messages while
# attempting to recover from a detected syntax error.
#
# @see #inErrorRecoveryMode
#
self.errorRecoveryMode = False
# The index into the input stream where the last error occurred.
# This is used to prevent infinite loops where an error is found
# but no token is consumed during recovery...another error is found,
# ad nauseum. This is a failsafe mechanism to guarantee that at least
# one token/tree node is consumed for two errors.
#
self.lastErrorIndex = -1
self.lastErrorStates = None
# The default implementation simply calls {@link #endErrorCondition} to
# ensure that the handler is not in error recovery mode.
def reset(self, recognizer:Parser):
self.endErrorCondition(recognizer)
#
# This method is called to enter error recovery mode when a recognition
# exception is reported.
#
# @param recognizer the parser instance
#
def beginErrorCondition(self, recognizer:Parser):
self.errorRecoveryMode = True
def inErrorRecoveryMode(self, recognizer:Parser):
return self.errorRecoveryMode
#
# This method is called to leave error recovery mode after recovering from
# a recognition exception.
#
# @param recognizer
#
def endErrorCondition(self, recognizer:Parser):
self.errorRecoveryMode = False
self.lastErrorStates = None
self.lastErrorIndex = -1
#
# {@inheritDoc}
#
# The default implementation simply calls {@link #endErrorCondition}.
#
def reportMatch(self, recognizer:Parser):
self.endErrorCondition(recognizer)
#
# {@inheritDoc}
#
# The default implementation returns immediately if the handler is already
# in error recovery mode. Otherwise, it calls {@link #beginErrorCondition}
# and dispatches the reporting task based on the runtime type of {@code e}
# according to the following table.
#
#
# - {@link NoViableAltException}: Dispatches the call to
# {@link #reportNoViableAlternative}
# - {@link InputMismatchException}: Dispatches the call to
# {@link #reportInputMismatch}
# - {@link FailedPredicateException}: Dispatches the call to
# {@link #reportFailedPredicate}
# - All other types: calls {@link Parser#notifyErrorListeners} to report
# the exception
#
#
def reportError(self, recognizer:Parser, e:RecognitionException):
# if we've already reported an error and have not matched a token
# yet successfully, don't report any errors.
if self.inErrorRecoveryMode(recognizer):
return # don't report spurious errors
self.beginErrorCondition(recognizer)
if isinstance( e, NoViableAltException ):
self.reportNoViableAlternative(recognizer, e)
elif isinstance( e, InputMismatchException ):
self.reportInputMismatch(recognizer, e)
elif isinstance( e, FailedPredicateException ):
self.reportFailedPredicate(recognizer, e)
else:
print("unknown recognition error type: " + type(e).__name__)
recognizer.notifyErrorListeners(e.message, e.offendingToken, e)
#
# {@inheritDoc}
#
# The default implementation resynchronizes the parser by consuming tokens
# until we find one in the resynchronization set--loosely the set of tokens
# that can follow the current rule.
#
def recover(self, recognizer:Parser, e:RecognitionException):
if self.lastErrorIndex==recognizer.getInputStream().index \
and self.lastErrorStates is not None \
and recognizer.state in self.lastErrorStates:
# uh oh, another error at same token index and previously-visited
# state in ATN; must be a case where LT(1) is in the recovery
# token set so nothing got consumed. Consume a single token
# at least to prevent an infinite loop; this is a failsafe.
recognizer.consume()
self.lastErrorIndex = recognizer._input.index
if self.lastErrorStates is None:
self.lastErrorStates = []
self.lastErrorStates.append(recognizer.state)
followSet = self.getErrorRecoverySet(recognizer)
self.consumeUntil(recognizer, followSet)
# The default implementation of {@link ANTLRErrorStrategy#sync} makes sure
# that the current lookahead symbol is consistent with what were expecting
# at this point in the ATN. You can call this anytime but ANTLR only
# generates code to check before subrules/loops and each iteration.
#
# Implements Jim Idle's magic sync mechanism in closures and optional
# subrules. E.g.,
#
#
# a : sync ( stuff sync )* ;
# sync : {consume to what can follow sync} ;
#
#
# At the start of a sub rule upon error, {@link #sync} performs single
# token deletion, if possible. If it can't do that, it bails on the current
# rule and uses the default error recovery, which consumes until the
# resynchronization set of the current rule.
#
# If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
# with an empty alternative), then the expected set includes what follows
# the subrule.
#
# During loop iteration, it consumes until it sees a token that can start a
# sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
# stay in the loop as long as possible.
#
# ORIGINS
#
# Previous versions of ANTLR did a poor job of their recovery within loops.
# A single mismatch token or missing token would force the parser to bail
# out of the entire rules surrounding the loop. So, for rule
#
#
# classDef : 'class' ID '{' member* '}'
#
#
# input with an extra token between members would force the parser to
# consume until it found the next class definition rather than the next
# member definition of the current class.
#
# This functionality cost a little bit of effort because the parser has to
# compare token set at the start of the loop and at each iteration. If for
# some reason speed is suffering for you, you can turn off this
# functionality by simply overriding this method as a blank { }.
#
def sync(self, recognizer:Parser):
# If already recovering, don't try to sync
if self.inErrorRecoveryMode(recognizer):
return
s = recognizer._interp.atn.states[recognizer.state]
la = recognizer.getTokenStream().LA(1)
# try cheaper subset first; might get lucky. seems to shave a wee bit off
nextTokens = recognizer.atn.nextTokens(s)
if Token.EPSILON in nextTokens or la in nextTokens:
return
if s.stateType in [ATNState.BLOCK_START, ATNState.STAR_BLOCK_START,
ATNState.PLUS_BLOCK_START, ATNState.STAR_LOOP_ENTRY]:
# report error and recover if possible
if self.singleTokenDeletion(recognizer)is not None:
return
else:
raise InputMismatchException(recognizer)
elif s.stateType in [ATNState.PLUS_LOOP_BACK, ATNState.STAR_LOOP_BACK]:
self.reportUnwantedToken(recognizer)
expecting = recognizer.getExpectedTokens()
whatFollowsLoopIterationOrRule = expecting.addSet(self.getErrorRecoverySet(recognizer))
self.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
else:
# do nothing if we can't identify the exact kind of ATN state
pass
# This is called by {@link #reportError} when the exception is a
# {@link NoViableAltException}.
#
# @see #reportError
#
# @param recognizer the parser instance
# @param e the recognition exception
#
def reportNoViableAlternative(self, recognizer:Parser, e:NoViableAltException):
tokens = recognizer.getTokenStream()
if tokens is not None:
if e.startToken.type==Token.EOF:
input = ""
else:
input = tokens.getText(e.startToken, e.offendingToken)
else:
input = ""
msg = "no viable alternative at input " + self.escapeWSAndQuote(input)
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
#
# This is called by {@link #reportError} when the exception is an
# {@link InputMismatchException}.
#
# @see #reportError
#
# @param recognizer the parser instance
# @param e the recognition exception
#
def reportInputMismatch(self, recognizer:Parser, e:InputMismatchException):
msg = "mismatched input " + self.getTokenErrorDisplay(e.offendingToken) \
+ " expecting " + e.getExpectedTokens().toString(recognizer.literalNames, recognizer.symbolicNames)
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
#
# This is called by {@link #reportError} when the exception is a
# {@link FailedPredicateException}.
#
# @see #reportError
#
# @param recognizer the parser instance
# @param e the recognition exception
#
def reportFailedPredicate(self, recognizer, e):
ruleName = recognizer.ruleNames[recognizer._ctx.getRuleIndex()]
msg = "rule " + ruleName + " " + e.message
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
# This method is called to report a syntax error which requires the removal
# of a token from the input stream. At the time this method is called, the
# erroneous symbol is current {@code LT(1)} symbol and has not yet been
# removed from the input stream. When this method returns,
# {@code recognizer} is in error recovery mode.
#
# This method is called when {@link #singleTokenDeletion} identifies
# single-token deletion as a viable recovery strategy for a mismatched
# input error.
#
# The default implementation simply returns if the handler is already in
# error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
# enter error recovery mode, followed by calling
# {@link Parser#notifyErrorListeners}.
#
# @param recognizer the parser instance
#
def reportUnwantedToken(self, recognizer:Parser):
if self.inErrorRecoveryMode(recognizer):
return
self.beginErrorCondition(recognizer)
t = recognizer.getCurrentToken()
tokenName = self.getTokenErrorDisplay(t)
expecting = self.getExpectedTokens(recognizer)
msg = "extraneous input " + tokenName + " expecting " \
+ expecting.toString(recognizer.literalNames, recognizer.symbolicNames)
recognizer.notifyErrorListeners(msg, t, None)
# This method is called to report a syntax error which requires the
# insertion of a missing token into the input stream. At the time this
# method is called, the missing token has not yet been inserted. When this
# method returns, {@code recognizer} is in error recovery mode.
#
# This method is called when {@link #singleTokenInsertion} identifies
# single-token insertion as a viable recovery strategy for a mismatched
# input error.
#
# The default implementation simply returns if the handler is already in
# error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
# enter error recovery mode, followed by calling
# {@link Parser#notifyErrorListeners}.
#
# @param recognizer the parser instance
#
def reportMissingToken(self, recognizer:Parser):
if self.inErrorRecoveryMode(recognizer):
return
self.beginErrorCondition(recognizer)
t = recognizer.getCurrentToken()
expecting = self.getExpectedTokens(recognizer)
msg = "missing " + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) \
+ " at " + self.getTokenErrorDisplay(t)
recognizer.notifyErrorListeners(msg, t, None)
# The default implementation attempts to recover from the mismatched input
# by using single token insertion and deletion as described below. If the
# recovery attempt fails, this method throws an
# {@link InputMismatchException}.
#
# EXTRA TOKEN (single token deletion)
#
# {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
# right token, however, then assume {@code LA(1)} is some extra spurious
# token and delete it. Then consume and return the next token (which was
# the {@code LA(2)} token) as the successful result of the match operation.
#
# This recovery strategy is implemented by {@link #singleTokenDeletion}.
#
# MISSING TOKEN (single token insertion)
#
# If current token (at {@code LA(1)}) is consistent with what could come
# after the expected {@code LA(1)} token, then assume the token is missing
# and use the parser's {@link TokenFactory} to create it on the fly. The
# "insertion" is performed by returning the created token as the successful
# result of the match operation.
#
# This recovery strategy is implemented by {@link #singleTokenInsertion}.
#
# EXAMPLE
#
# For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When
# the parser returns from the nested call to {@code expr}, it will have
# call chain:
#
#
# stat → expr → atom
#
#
# and it will be trying to match the {@code ')'} at this point in the
# derivation:
#
#
# => ID '=' '(' INT ')' ('+' atom)* ';'
# ^
#
#
# The attempt to match {@code ')'} will fail when it sees {@code ';'} and
# call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'}
# is in the set of tokens that can follow the {@code ')'} token reference
# in rule {@code atom}. It can assume that you forgot the {@code ')'}.
#
def recoverInline(self, recognizer:Parser):
# SINGLE TOKEN DELETION
matchedSymbol = self.singleTokenDeletion(recognizer)
if matchedSymbol is not None:
# we have deleted the extra token.
# now, move past ttype token as if all were ok
recognizer.consume()
return matchedSymbol
# SINGLE TOKEN INSERTION
if self.singleTokenInsertion(recognizer):
return self.getMissingSymbol(recognizer)
# even that didn't work; must throw the exception
raise InputMismatchException(recognizer)
#
# This method implements the single-token insertion inline error recovery
# strategy. It is called by {@link #recoverInline} if the single-token
# deletion strategy fails to recover from the mismatched input. If this
# method returns {@code true}, {@code recognizer} will be in error recovery
# mode.
#
# This method determines whether or not single-token insertion is viable by
# checking if the {@code LA(1)} input symbol could be successfully matched
# if it were instead the {@code LA(2)} symbol. If this method returns
# {@code true}, the caller is responsible for creating and inserting a
# token with the correct type to produce this behavior.
#
# @param recognizer the parser instance
# @return {@code true} if single-token insertion is a viable recovery
# strategy for the current mismatched input, otherwise {@code false}
#
def singleTokenInsertion(self, recognizer:Parser):
currentSymbolType = recognizer.getTokenStream().LA(1)
# if current token is consistent with what could come after current
# ATN state, then we know we're missing a token; error recovery
# is free to conjure up and insert the missing token
atn = recognizer._interp.atn
currentState = atn.states[recognizer.state]
next = currentState.transitions[0].target
expectingAtLL2 = atn.nextTokens(next, recognizer._ctx)
if currentSymbolType in expectingAtLL2:
self.reportMissingToken(recognizer)
return True
else:
return False
# This method implements the single-token deletion inline error recovery
# strategy. It is called by {@link #recoverInline} to attempt to recover
# from mismatched input. If this method returns null, the parser and error
# handler state will not have changed. If this method returns non-null,
# {@code recognizer} will not be in error recovery mode since the
# returned token was a successful match.
#
# If the single-token deletion is successful, this method calls
# {@link #reportUnwantedToken} to report the error, followed by
# {@link Parser#consume} to actually "delete" the extraneous token. Then,
# before returning {@link #reportMatch} is called to signal a successful
# match.
#
# @param recognizer the parser instance
# @return the successfully matched {@link Token} instance if single-token
# deletion successfully recovers from the mismatched input, otherwise
# {@code null}
#
def singleTokenDeletion(self, recognizer:Parser):
nextTokenType = recognizer.getTokenStream().LA(2)
expecting = self.getExpectedTokens(recognizer)
if nextTokenType in expecting:
self.reportUnwantedToken(recognizer)
# print("recoverFromMismatchedToken deleting " \
# + str(recognizer.getTokenStream().LT(1)) \
# + " since " + str(recognizer.getTokenStream().LT(2)) \
# + " is what we want", file=sys.stderr)
recognizer.consume() # simply delete extra token
# we want to return the token we're actually matching
matchedSymbol = recognizer.getCurrentToken()
self.reportMatch(recognizer) # we know current token is correct
return matchedSymbol
else:
return None
# Conjure up a missing token during error recovery.
#
# The recognizer attempts to recover from single missing
# symbols. But, actions might refer to that missing symbol.
# For example, x=ID {f($x);}. The action clearly assumes
# that there has been an identifier matched previously and that
# $x points at that token. If that token is missing, but
# the next token in the stream is what we want we assume that
# this token is missing and we keep going. Because we
# have to return some token to replace the missing token,
# we have to conjure one up. This method gives the user control
# over the tokens returned for missing tokens. Mostly,
# you will want to create something special for identifier
# tokens. For literals such as '{' and ',', the default
# action in the parser or tree parser works. It simply creates
# a CommonToken of the appropriate type. The text will be the token.
# If you change what tokens must be created by the lexer,
# override this method to create the appropriate tokens.
#
def getMissingSymbol(self, recognizer:Parser):
currentSymbol = recognizer.getCurrentToken()
expecting = self.getExpectedTokens(recognizer)
expectedTokenType = expecting[0] # get any element
if expectedTokenType==Token.EOF:
tokenText = ""
else:
name = None
if expectedTokenType < len(recognizer.literalNames):
name = recognizer.literalNames[expectedTokenType]
if name is None and expectedTokenType < len(recognizer.symbolicNames):
name = recognizer.symbolicNames[expectedTokenType]
tokenText = ""
current = currentSymbol
lookback = recognizer.getTokenStream().LT(-1)
if current.type==Token.EOF and lookback is not None:
current = lookback
return recognizer.getTokenFactory().create(current.source,
expectedTokenType, tokenText, Token.DEFAULT_CHANNEL,
-1, -1, current.line, current.column)
def getExpectedTokens(self, recognizer:Parser):
return recognizer.getExpectedTokens()
# How should a token be displayed in an error message? The default
# is to display just the text, but during development you might
# want to have a lot of information spit out. Override in that case
# to use t.toString() (which, for CommonToken, dumps everything about
# the token). This is better than forcing you to override a method in
# your token objects because you don't have to go modify your lexer
# so that it creates a new Java type.
#
def getTokenErrorDisplay(self, t:Token):
if t is None:
return ""
s = t.text
if s is None:
if t.type==Token.EOF:
s = ""
else:
s = "<" + str(t.type) + ">"
return self.escapeWSAndQuote(s)
def escapeWSAndQuote(self, s:str):
s = s.replace("\n","\\n")
s = s.replace("\r","\\r")
s = s.replace("\t","\\t")
return "'" + s + "'"
# Compute the error recovery set for the current rule. During
# rule invocation, the parser pushes the set of tokens that can
# follow that rule reference on the stack; this amounts to
# computing FIRST of what follows the rule reference in the
# enclosing rule. See LinearApproximator.FIRST().
# This local follow set only includes tokens
# from within the rule; i.e., the FIRST computation done by
# ANTLR stops at the end of a rule.
#
# EXAMPLE
#
# When you find a "no viable alt exception", the input is not
# consistent with any of the alternatives for rule r. The best
# thing to do is to consume tokens until you see something that
# can legally follow a call to r#or* any rule that called r.
# You don't want the exact set of viable next tokens because the
# input might just be missing a token--you might consume the
# rest of the input looking for one of the missing tokens.
#
# Consider grammar:
#
# a : '[' b ']'
# | '(' b ')'
# ;
# b : c '^' INT ;
# c : ID
# | INT
# ;
#
# At each rule invocation, the set of tokens that could follow
# that rule is pushed on a stack. Here are the various
# context-sensitive follow sets:
#
# FOLLOW(b1_in_a) = FIRST(']') = ']'
# FOLLOW(b2_in_a) = FIRST(')') = ')'
# FOLLOW(c_in_b) = FIRST('^') = '^'
#
# Upon erroneous input "[]", the call chain is
#
# a -> b -> c
#
# and, hence, the follow context stack is:
#
# depth follow set start of rule execution
# 0 a (from main())
# 1 ']' b
# 2 '^' c
#
# Notice that ')' is not included, because b would have to have
# been called from a different context in rule a for ')' to be
# included.
#
# For error recovery, we cannot consider FOLLOW(c)
# (context-sensitive or otherwise). We need the combined set of
# all context-sensitive FOLLOW sets--the set of all tokens that
# could follow any reference in the call chain. We need to
# resync to one of those tokens. Note that FOLLOW(c)='^' and if
# we resync'd to that token, we'd consume until EOF. We need to
# sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
# In this case, for input "[]", LA(1) is ']' and in the set, so we would
# not consume anything. After printing an error, rule c would
# return normally. Rule b would not find the required '^' though.
# At this point, it gets a mismatched token error and throws an
# exception (since LA(1) is not in the viable following token
# set). The rule exception handler tries to recover, but finds
# the same recovery set and doesn't consume anything. Rule b
# exits normally returning to rule a. Now it finds the ']' (and
# with the successful match exits errorRecovery mode).
#
# So, you can see that the parser walks up the call chain looking
# for the token that was a member of the recovery set.
#
# Errors are not generated in errorRecovery mode.
#
# ANTLR's error recovery mechanism is based upon original ideas:
#
# "Algorithms + Data Structures = Programs" by Niklaus Wirth
#
# and
#
# "A note on error recovery in recursive descent parsers":
# http:#portal.acm.org/citation.cfm?id=947902.947905
#
# Later, Josef Grosch had some good ideas:
#
# "Efficient and Comfortable Error Recovery in Recursive Descent
# Parsers":
# ftp:#www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
#
# Like Grosch I implement context-sensitive FOLLOW sets that are combined
# at run-time upon error to avoid overhead during parsing.
#
def getErrorRecoverySet(self, recognizer:Parser):
atn = recognizer._interp.atn
ctx = recognizer._ctx
recoverSet = IntervalSet()
while ctx is not None and ctx.invokingState>=0:
# compute what follows who invoked us
invokingState = atn.states[ctx.invokingState]
rt = invokingState.transitions[0]
follow = atn.nextTokens(rt.followState)
recoverSet.addSet(follow)
ctx = ctx.parentCtx
recoverSet.removeOne(Token.EPSILON)
return recoverSet
# Consume tokens until one matches the given token set.#
def consumeUntil(self, recognizer:Parser, set_:set):
ttype = recognizer.getTokenStream().LA(1)
while ttype != Token.EOF and not ttype in set_:
recognizer.consume()
ttype = recognizer.getTokenStream().LA(1)
#
# This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
# by immediately canceling the parse operation with a
# {@link ParseCancellationException}. The implementation ensures that the
# {@link ParserRuleContext#exception} field is set for all parse tree nodes
# that were not completed prior to encountering the error.
#
#
# This error strategy is useful in the following scenarios.
#
#
# - Two-stage parsing: This error strategy allows the first
# stage of two-stage parsing to immediately terminate if an error is
# encountered, and immediately fall back to the second stage. In addition to
# avoiding wasted work by attempting to recover from errors here, the empty
# implementation of {@link BailErrorStrategy#sync} improves the performance of
# the first stage.
# - Silent validation: When syntax errors are not being
# reported or logged, and the parse result is simply ignored if errors occur,
# the {@link BailErrorStrategy} avoids wasting work on recovering from errors
# when the result will be ignored either way.
#
#
#
# {@code myparser.setErrorHandler(new BailErrorStrategy());}
#
# @see Parser#setErrorHandler(ANTLRErrorStrategy)
#
class BailErrorStrategy(DefaultErrorStrategy):
# Instead of recovering from exception {@code e}, re-throw it wrapped
# in a {@link ParseCancellationException} so it is not caught by the
# rule function catches. Use {@link Exception#getCause()} to get the
# original {@link RecognitionException}.
#
def recover(self, recognizer:Parser, e:RecognitionException):
context = recognizer._ctx
while context is not None:
context.exception = e
context = context.parentCtx
raise ParseCancellationException(e)
# Make sure we don't attempt to recover inline; if the parser
# successfully recovers, it won't throw an exception.
#
def recoverInline(self, recognizer:Parser):
self.recover(recognizer, InputMismatchException(recognizer))
# Make sure we don't attempt to recover from problems in subrules.#
def sync(self, recognizer:Parser):
pass
del Parser ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/error/Errors.py 0000644 0000766 0000000 00000015147 00000000000 023246 0 ustar 00parrt wheel 0000000 0000000 # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
# need forward declaration
Token = None
Lexer = None
Parser = None
TokenStream = None
ATNConfigSet = None
ParserRulecontext = None
PredicateTransition = None
BufferedTokenStream = None
class UnsupportedOperationException(Exception):
def __init__(self, msg:str):
super().__init__(msg)
class IllegalStateException(Exception):
def __init__(self, msg:str):
super().__init__(msg)
class CancellationException(IllegalStateException):
def __init__(self, msg:str):
super().__init__(msg)
# The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
# 3 kinds of errors: prediction errors, failed predicate errors, and
# mismatched input errors. In each case, the parser knows where it is
# in the input, where it is in the ATN, the rule invocation stack,
# and what kind of problem occurred.
from antlr4.InputStream import InputStream
from antlr4.ParserRuleContext import ParserRuleContext
from antlr4.Recognizer import Recognizer
class RecognitionException(Exception):
def __init__(self, message:str=None, recognizer:Recognizer=None, input:InputStream=None, ctx:ParserRulecontext=None):
super().__init__(message)
self.message = message
self.recognizer = recognizer
self.input = input
self.ctx = ctx
# The current {@link Token} when an error occurred. Since not all streams
# support accessing symbols by index, we have to track the {@link Token}
# instance itself.
self.offendingToken = None
# Get the ATN state number the parser was in at the time the error
# occurred. For {@link NoViableAltException} and
# {@link LexerNoViableAltException} exceptions, this is the
# {@link DecisionState} number. For others, it is the state whose outgoing
# edge we couldn't match.
self.offendingState = -1
if recognizer is not None:
self.offendingState = recognizer.state
# If the state number is not known, this method returns -1.
#
# Gets the set of input symbols which could potentially follow the
# previously matched symbol at the time this exception was thrown.
#
# If the set of expected tokens is not known and could not be computed,
# this method returns {@code null}.
#
# @return The set of token types that could potentially follow the current
# state in the ATN, or {@code null} if the information is not available.
#/
def getExpectedTokens(self):
if self.recognizer is not None:
return self.recognizer.atn.getExpectedTokens(self.offendingState, self.ctx)
else:
return None
class LexerNoViableAltException(RecognitionException):
def __init__(self, lexer:Lexer, input:InputStream, startIndex:int, deadEndConfigs:ATNConfigSet):
super().__init__(message=None, recognizer=lexer, input=input, ctx=None)
self.startIndex = startIndex
self.deadEndConfigs = deadEndConfigs
def __str__(self):
symbol = ""
if self.startIndex >= 0 and self.startIndex < self.input.size:
symbol = self.input.getText(self.startIndex, self.startIndex)
# TODO symbol = Utils.escapeWhitespace(symbol, false);
return "LexerNoViableAltException('" + symbol + "')"
# Indicates that the parser could not decide which of two or more paths
# to take based upon the remaining input. It tracks the starting token
# of the offending input and also knows where the parser was
# in the various paths when the error. Reported by reportNoViableAlternative()
#
class NoViableAltException(RecognitionException):
def __init__(self, recognizer:Parser, input:TokenStream=None, startToken:Token=None,
offendingToken:Token=None, deadEndConfigs:ATNConfigSet=None, ctx:ParserRuleContext=None):
if ctx is None:
ctx = recognizer._ctx
if offendingToken is None:
offendingToken = recognizer.getCurrentToken()
if startToken is None:
startToken = recognizer.getCurrentToken()
if input is None:
input = recognizer.getInputStream()
super().__init__(recognizer=recognizer, input=input, ctx=ctx)
# Which configurations did we try at input.index() that couldn't match input.LT(1)?#
self.deadEndConfigs = deadEndConfigs
# The token object at the start index; the input stream might
# not be buffering tokens so get a reference to it. (At the
# time the error occurred, of course the stream needs to keep a
# buffer all of the tokens but later we might not have access to those.)
self.startToken = startToken
self.offendingToken = offendingToken
# This signifies any kind of mismatched input exceptions such as
# when the current input does not match the expected token.
#
class InputMismatchException(RecognitionException):
def __init__(self, recognizer:Parser):
super().__init__(recognizer=recognizer, input=recognizer.getInputStream(), ctx=recognizer._ctx)
self.offendingToken = recognizer.getCurrentToken()
# A semantic predicate failed during validation. Validation of predicates
# occurs when normally parsing the alternative just like matching a token.
# Disambiguating predicate evaluation occurs when we test a predicate during
# prediction.
class FailedPredicateException(RecognitionException):
def __init__(self, recognizer:Parser, predicate:str=None, message:str=None):
super().__init__(message=self.formatMessage(predicate,message), recognizer=recognizer,
input=recognizer.getInputStream(), ctx=recognizer._ctx)
s = recognizer._interp.atn.states[recognizer.state]
trans = s.transitions[0]
from antlr4.atn.Transition import PredicateTransition
if isinstance(trans, PredicateTransition):
self.ruleIndex = trans.ruleIndex
self.predicateIndex = trans.predIndex
else:
self.ruleIndex = 0
self.predicateIndex = 0
self.predicate = predicate
self.offendingToken = recognizer.getCurrentToken()
def formatMessage(self, predicate:str, message:str):
if message is not None:
return message
else:
return "failed predicate: {" + predicate + "}?"
class ParseCancellationException(CancellationException):
pass
del Token
del Lexer
del Parser
del TokenStream
del ATNConfigSet
del ParserRulecontext
del PredicateTransition
del BufferedTokenStream
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/error/__init__.py 0000644 0000766 0000000 00000000034 00000000000 023516 0 ustar 00parrt wheel 0000000 0000000 __author__ = 'ericvergnaud'
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1609880862.5419326
antlr4-python3-runtime-4.9.1/src/antlr4/tree/ 0000755 0000766 0000000 00000000000 00000000000 021216 5 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/tree/Chunk.py 0000644 0000766 0000000 00000001267 00000000000 022646 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
class Chunk(object):
pass
class TagChunk(Chunk):
__slots__ = ('tag', 'label')
def __init__(self, tag:str, label:str=None):
self.tag = tag
self.label = label
def __str__(self):
if self.label is None:
return self.tag
else:
return self.label + ":" + self.tag
class TextChunk(Chunk):
__slots__ = 'text'
def __init__(self, text:str):
self.text = text
def __str__(self):
return "'" + self.text + "'"
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/tree/ParseTreeMatch.py 0000644 0000766 0000000 00000010605 00000000000 024441 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
#
# Represents the result of matching a {@link ParseTree} against a tree pattern.
#
from io import StringIO
from antlr4.tree.ParseTreePattern import ParseTreePattern
from antlr4.tree.Tree import ParseTree
class ParseTreeMatch(object):
__slots__ = ('tree', 'pattern', 'labels', 'mismatchedNode')
#
# Constructs a new instance of {@link ParseTreeMatch} from the specified
# parse tree and pattern.
#
# @param tree The parse tree to match against the pattern.
# @param pattern The parse tree pattern.
# @param labels A mapping from label names to collections of
# {@link ParseTree} objects located by the tree pattern matching process.
# @param mismatchedNode The first node which failed to match the tree
# pattern during the matching process.
#
# @exception IllegalArgumentException if {@code tree} is {@code null}
# @exception IllegalArgumentException if {@code pattern} is {@code null}
# @exception IllegalArgumentException if {@code labels} is {@code null}
#
def __init__(self, tree:ParseTree, pattern:ParseTreePattern, labels:dict, mismatchedNode:ParseTree):
if tree is None:
raise Exception("tree cannot be null")
if pattern is None:
raise Exception("pattern cannot be null")
if labels is None:
raise Exception("labels cannot be null")
self.tree = tree
self.pattern = pattern
self.labels = labels
self.mismatchedNode = mismatchedNode
#
# Get the last node associated with a specific {@code label}.
#
# For example, for pattern {@code }, {@code get("id")} returns the
# node matched for that {@code ID}. If more than one node
# matched the specified label, only the last is returned. If there is
# no node associated with the label, this returns {@code null}.
#
# Pattern tags like {@code } and {@code } without labels are
# considered to be labeled with {@code ID} and {@code expr}, respectively.
#
# @param label The label to check.
#
# @return The last {@link ParseTree} to match a tag with the specified
# label, or {@code null} if no parse tree matched a tag with the label.
#
def get(self, label:str):
parseTrees = self.labels.get(label, None)
if parseTrees is None or len(parseTrees)==0:
return None
else:
return parseTrees[len(parseTrees)-1]
#
# Return all nodes matching a rule or token tag with the specified label.
#
# If the {@code label} is the name of a parser rule or token in the
# grammar, the resulting list will contain both the parse trees matching
# rule or tags explicitly labeled with the label and the complete set of
# parse trees matching the labeled and unlabeled tags in the pattern for
# the parser rule or token. For example, if {@code label} is {@code "foo"},
# the result will contain all of the following.
#
#
# - Parse tree nodes matching tags of the form {@code } and
# {@code }.
# - Parse tree nodes matching tags of the form {@code }.
# - Parse tree nodes matching tags of the form {@code }.
#
#
# @param label The label.
#
# @return A collection of all {@link ParseTree} nodes matching tags with
# the specified {@code label}. If no nodes matched the label, an empty list
# is returned.
#
def getAll(self, label:str):
nodes = self.labels.get(label, None)
if nodes is None:
return list()
else:
return nodes
#
# Gets a value indicating whether the match operation succeeded.
#
# @return {@code true} if the match operation succeeded; otherwise,
# {@code false}.
#
def succeeded(self):
return self.mismatchedNode is None
#
# {@inheritDoc}
#
def __str__(self):
with StringIO() as buf:
buf.write("Match ")
buf.write("succeeded" if self.succeeded() else "failed")
buf.write("; found ")
buf.write(str(len(self.labels)))
buf.write(" labels")
return buf.getvalue()
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/tree/ParseTreePattern.py 0000644 0000766 0000000 00000005411 00000000000 025021 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
#
# A pattern like {@code = ;} converted to a {@link ParseTree} by
# {@link ParseTreePatternMatcher#compile(String, int)}.
#
from antlr4.tree.ParseTreePatternMatcher import ParseTreePatternMatcher
from antlr4.tree.Tree import ParseTree
from antlr4.xpath.XPath import XPath
class ParseTreePattern(object):
__slots__ = ('matcher', 'patternRuleIndex', 'pattern', 'patternTree')
# Construct a new instance of the {@link ParseTreePattern} class.
#
# @param matcher The {@link ParseTreePatternMatcher} which created this
# tree pattern.
# @param pattern The tree pattern in concrete syntax form.
# @param patternRuleIndex The parser rule which serves as the root of the
# tree pattern.
# @param patternTree The tree pattern in {@link ParseTree} form.
#
def __init__(self, matcher:ParseTreePatternMatcher, pattern:str, patternRuleIndex:int , patternTree:ParseTree):
self.matcher = matcher
self.patternRuleIndex = patternRuleIndex
self.pattern = pattern
self.patternTree = patternTree
#
# Match a specific parse tree against this tree pattern.
#
# @param tree The parse tree to match against this tree pattern.
# @return A {@link ParseTreeMatch} object describing the result of the
# match operation. The {@link ParseTreeMatch#succeeded()} method can be
# used to determine whether or not the match was successful.
#
def match(self, tree:ParseTree):
return self.matcher.match(tree, self)
#
# Determine whether or not a parse tree matches this tree pattern.
#
# @param tree The parse tree to match against this tree pattern.
# @return {@code true} if {@code tree} is a match for the current tree
# pattern; otherwise, {@code false}.
#
def matches(self, tree:ParseTree):
return self.matcher.match(tree, self).succeeded()
# Find all nodes using XPath and then try to match those subtrees against
# this tree pattern.
#
# @param tree The {@link ParseTree} to match against this pattern.
# @param xpath An expression matching the nodes
#
# @return A collection of {@link ParseTreeMatch} objects describing the
# successful matches. Unsuccessful matches are omitted from the result,
# regardless of the reason for the failure.
#
def findAll(self, tree:ParseTree, xpath:str):
subtrees = XPath.findAll(tree, xpath, self.matcher.parser)
matches = list()
for t in subtrees:
match = self.match(t)
if match.succeeded():
matches.append(match)
return matches
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/tree/ParseTreePatternMatcher.py 0000644 0000766 0000000 00000040004 00000000000 026322 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
#
# A tree pattern matching mechanism for ANTLR {@link ParseTree}s.
#
# Patterns are strings of source input text with special tags representing
# token or rule references such as:
#
# {@code = ;}
#
# Given a pattern start rule such as {@code statement}, this object constructs
# a {@link ParseTree} with placeholders for the {@code ID} and {@code expr}
# subtree. Then the {@link #match} routines can compare an actual
# {@link ParseTree} from a parse with this pattern. Tag {@code } matches
# any {@code ID} token and tag {@code } references the result of the
# {@code expr} rule (generally an instance of {@code ExprContext}.
#
# Pattern {@code x = 0;} is a similar pattern that matches the same pattern
# except that it requires the identifier to be {@code x} and the expression to
# be {@code 0}.
#
# The {@link #matches} routines return {@code true} or {@code false} based
# upon a match for the tree rooted at the parameter sent in. The
# {@link #match} routines return a {@link ParseTreeMatch} object that
# contains the parse tree, the parse tree pattern, and a map from tag name to
# matched nodes (more below). A subtree that fails to match, returns with
# {@link ParseTreeMatch#mismatchedNode} set to the first tree node that did not
# match.
#
# For efficiency, you can compile a tree pattern in string form to a
# {@link ParseTreePattern} object.
#
# See {@code TestParseTreeMatcher} for lots of examples.
# {@link ParseTreePattern} has two static helper methods:
# {@link ParseTreePattern#findAll} and {@link ParseTreePattern#match} that
# are easy to use but not super efficient because they create new
# {@link ParseTreePatternMatcher} objects each time and have to compile the
# pattern in string form before using it.
#
# The lexer and parser that you pass into the {@link ParseTreePatternMatcher}
# constructor are used to parse the pattern in string form. The lexer converts
# the {@code = ;} into a sequence of four tokens (assuming lexer
# throws out whitespace or puts it on a hidden channel). Be aware that the
# input stream is reset for the lexer (but not the parser; a
# {@link ParserInterpreter} is created to parse the input.). Any user-defined
# fields you have put into the lexer might get changed when this mechanism asks
# it to scan the pattern string.
#
# Normally a parser does not accept token {@code } as a valid
# {@code expr} but, from the parser passed in, we create a special version of
# the underlying grammar representation (an {@link ATN}) that allows imaginary
# tokens representing rules ({@code }) to match entire rules. We call
# these bypass alternatives.
#
# Delimiters are {@code <} and {@code >}, with {@code \} as the escape string
# by default, but you can set them to whatever you want using
# {@link #setDelimiters}. You must escape both start and stop strings
# {@code \<} and {@code \>}.
#
from antlr4.CommonTokenStream import CommonTokenStream
from antlr4.InputStream import InputStream
from antlr4.ParserRuleContext import ParserRuleContext
from antlr4.Lexer import Lexer
from antlr4.ListTokenSource import ListTokenSource
from antlr4.Token import Token
from antlr4.error.ErrorStrategy import BailErrorStrategy
from antlr4.error.Errors import RecognitionException, ParseCancellationException
from antlr4.tree.Chunk import TagChunk, TextChunk
from antlr4.tree.RuleTagToken import RuleTagToken
from antlr4.tree.TokenTagToken import TokenTagToken
from antlr4.tree.Tree import ParseTree, TerminalNode, RuleNode
# need forward declaration
Parser = None
ParseTreePattern = None
class CannotInvokeStartRule(Exception):
def __init__(self, e:Exception):
super().__init__(e)
class StartRuleDoesNotConsumeFullPattern(Exception):
pass
class ParseTreePatternMatcher(object):
__slots__ = ('lexer', 'parser', 'start', 'stop', 'escape')
# Constructs a {@link ParseTreePatternMatcher} or from a {@link Lexer} and
# {@link Parser} object. The lexer input stream is altered for tokenizing
# the tree patterns. The parser is used as a convenient mechanism to get
# the grammar name, plus token, rule names.
def __init__(self, lexer:Lexer, parser:Parser):
self.lexer = lexer
self.parser = parser
self.start = "<"
self.stop = ">"
self.escape = "\\" # e.g., \< and \> must escape BOTH!
# Set the delimiters used for marking rule and token tags within concrete
# syntax used by the tree pattern parser.
#
# @param start The start delimiter.
# @param stop The stop delimiter.
# @param escapeLeft The escape sequence to use for escaping a start or stop delimiter.
#
# @exception IllegalArgumentException if {@code start} is {@code null} or empty.
# @exception IllegalArgumentException if {@code stop} is {@code null} or empty.
#
def setDelimiters(self, start:str, stop:str, escapeLeft:str):
if start is None or len(start)==0:
raise Exception("start cannot be null or empty")
if stop is None or len(stop)==0:
raise Exception("stop cannot be null or empty")
self.start = start
self.stop = stop
self.escape = escapeLeft
# Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}?#
def matchesRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int):
p = self.compileTreePattern(pattern, patternRuleIndex)
return self.matches(tree, p)
# Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a
# compiled pattern instead of a string representation of a tree pattern.
#
def matchesPattern(self, tree:ParseTree, pattern:ParseTreePattern):
mismatchedNode = self.matchImpl(tree, pattern.patternTree, dict())
return mismatchedNode is None
#
# Compare {@code pattern} matched as rule {@code patternRuleIndex} against
# {@code tree} and return a {@link ParseTreeMatch} object that contains the
# matched elements, or the node at which the match failed.
#
def matchRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int):
p = self.compileTreePattern(pattern, patternRuleIndex)
return self.matchPattern(tree, p)
#
# Compare {@code pattern} matched against {@code tree} and return a
# {@link ParseTreeMatch} object that contains the matched elements, or the
# node at which the match failed. Pass in a compiled pattern instead of a
# string representation of a tree pattern.
#
def matchPattern(self, tree:ParseTree, pattern:ParseTreePattern):
labels = dict()
mismatchedNode = self.matchImpl(tree, pattern.patternTree, labels)
from antlr4.tree.ParseTreeMatch import ParseTreeMatch
return ParseTreeMatch(tree, pattern, labels, mismatchedNode)
#
# For repeated use of a tree pattern, compile it to a
# {@link ParseTreePattern} using this method.
#
def compileTreePattern(self, pattern:str, patternRuleIndex:int):
tokenList = self.tokenize(pattern)
tokenSrc = ListTokenSource(tokenList)
tokens = CommonTokenStream(tokenSrc)
from antlr4.ParserInterpreter import ParserInterpreter
parserInterp = ParserInterpreter(self.parser.grammarFileName, self.parser.tokenNames,
self.parser.ruleNames, self.parser.getATNWithBypassAlts(),tokens)
tree = None
try:
parserInterp.setErrorHandler(BailErrorStrategy())
tree = parserInterp.parse(patternRuleIndex)
except ParseCancellationException as e:
raise e.cause
except RecognitionException as e:
raise e
except Exception as e:
raise CannotInvokeStartRule(e)
# Make sure tree pattern compilation checks for a complete parse
if tokens.LA(1)!=Token.EOF:
raise StartRuleDoesNotConsumeFullPattern()
from antlr4.tree.ParseTreePattern import ParseTreePattern
return ParseTreePattern(self, pattern, patternRuleIndex, tree)
#
# Recursively walk {@code tree} against {@code patternTree}, filling
# {@code match.}{@link ParseTreeMatch#labels labels}.
#
# @return the first node encountered in {@code tree} which does not match
# a corresponding node in {@code patternTree}, or {@code null} if the match
# was successful. The specific node returned depends on the matching
# algorithm used by the implementation, and may be overridden.
#
def matchImpl(self, tree:ParseTree, patternTree:ParseTree, labels:dict):
if tree is None:
raise Exception("tree cannot be null")
if patternTree is None:
raise Exception("patternTree cannot be null")
# x and , x and y, or x and x; or could be mismatched types
if isinstance(tree, TerminalNode) and isinstance(patternTree, TerminalNode ):
mismatchedNode = None
# both are tokens and they have same type
if tree.symbol.type == patternTree.symbol.type:
if isinstance( patternTree.symbol, TokenTagToken ): # x and
tokenTagToken = patternTree.symbol
# track label->list-of-nodes for both token name and label (if any)
self.map(labels, tokenTagToken.tokenName, tree)
if tokenTagToken.label is not None:
self.map(labels, tokenTagToken.label, tree)
elif tree.getText()==patternTree.getText():
# x and x
pass
else:
# x and y
if mismatchedNode is None:
mismatchedNode = tree
else:
if mismatchedNode is None:
mismatchedNode = tree
return mismatchedNode
if isinstance(tree, ParserRuleContext) and isinstance(patternTree, ParserRuleContext):
mismatchedNode = None
# (expr ...) and
ruleTagToken = self.getRuleTagToken(patternTree)
if ruleTagToken is not None:
m = None
if tree.ruleContext.ruleIndex == patternTree.ruleContext.ruleIndex:
# track label->list-of-nodes for both rule name and label (if any)
self.map(labels, ruleTagToken.ruleName, tree)
if ruleTagToken.label is not None:
self.map(labels, ruleTagToken.label, tree)
else:
if mismatchedNode is None:
mismatchedNode = tree
return mismatchedNode
# (expr ...) and (expr ...)
if tree.getChildCount()!=patternTree.getChildCount():
if mismatchedNode is None:
mismatchedNode = tree
return mismatchedNode
n = tree.getChildCount()
for i in range(0, n):
childMatch = self.matchImpl(tree.getChild(i), patternTree.getChild(i), labels)
if childMatch is not None:
return childMatch
return mismatchedNode
# if nodes aren't both tokens or both rule nodes, can't match
return tree
def map(self, labels, label, tree):
v = labels.get(label, None)
if v is None:
v = list()
labels[label] = v
v.append(tree)
# Is {@code t} {@code (expr )} subtree?#
def getRuleTagToken(self, tree:ParseTree):
if isinstance( tree, RuleNode ):
if tree.getChildCount()==1 and isinstance(tree.getChild(0), TerminalNode ):
c = tree.getChild(0)
if isinstance( c.symbol, RuleTagToken ):
return c.symbol
return None
def tokenize(self, pattern:str):
# split pattern into chunks: sea (raw input) and islands (, )
chunks = self.split(pattern)
# create token stream from text and tags
tokens = list()
for chunk in chunks:
if isinstance( chunk, TagChunk ):
# add special rule token or conjure up new token from name
if chunk.tag[0].isupper():
ttype = self.parser.getTokenType(chunk.tag)
if ttype==Token.INVALID_TYPE:
raise Exception("Unknown token " + str(chunk.tag) + " in pattern: " + pattern)
tokens.append(TokenTagToken(chunk.tag, ttype, chunk.label))
elif chunk.tag[0].islower():
ruleIndex = self.parser.getRuleIndex(chunk.tag)
if ruleIndex==-1:
raise Exception("Unknown rule " + str(chunk.tag) + " in pattern: " + pattern)
ruleImaginaryTokenType = self.parser.getATNWithBypassAlts().ruleToTokenType[ruleIndex]
tokens.append(RuleTagToken(chunk.tag, ruleImaginaryTokenType, chunk.label))
else:
raise Exception("invalid tag: " + str(chunk.tag) + " in pattern: " + pattern)
else:
self.lexer.setInputStream(InputStream(chunk.text))
t = self.lexer.nextToken()
while t.type!=Token.EOF:
tokens.append(t)
t = self.lexer.nextToken()
return tokens
# Split {@code = ;} into 4 chunks for tokenizing by {@link #tokenize}.#
def split(self, pattern:str):
p = 0
n = len(pattern)
chunks = list()
# find all start and stop indexes first, then collect
starts = list()
stops = list()
while p < n :
if p == pattern.find(self.escape + self.start, p):
p += len(self.escape) + len(self.start)
elif p == pattern.find(self.escape + self.stop, p):
p += len(self.escape) + len(self.stop)
elif p == pattern.find(self.start, p):
starts.append(p)
p += len(self.start)
elif p == pattern.find(self.stop, p):
stops.append(p)
p += len(self.stop)
else:
p += 1
nt = len(starts)
if nt > len(stops):
raise Exception("unterminated tag in pattern: " + pattern)
if nt < len(stops):
raise Exception("missing start tag in pattern: " + pattern)
for i in range(0, nt):
if starts[i] >= stops[i]:
raise Exception("tag delimiters out of order in pattern: " + pattern)
# collect into chunks now
if nt==0:
chunks.append(TextChunk(pattern))
if nt>0 and starts[0]>0: # copy text up to first tag into chunks
text = pattern[0:starts[0]]
chunks.add(TextChunk(text))
for i in range(0, nt):
# copy inside of
tag = pattern[starts[i] + len(self.start) : stops[i]]
ruleOrToken = tag
label = None
colon = tag.find(':')
if colon >= 0:
label = tag[0:colon]
ruleOrToken = tag[colon+1 : len(tag)]
chunks.append(TagChunk(label, ruleOrToken))
if i+1 < len(starts):
# copy from end of to start of next
text = pattern[stops[i] + len(self.stop) : starts[i + 1]]
chunks.append(TextChunk(text))
if nt > 0 :
afterLastTag = stops[nt - 1] + len(self.stop)
if afterLastTag < n : # copy text from end of last tag to end
text = pattern[afterLastTag : n]
chunks.append(TextChunk(text))
# strip out the escape sequences from text chunks but not tags
for i in range(0, len(chunks)):
c = chunks[i]
if isinstance( c, TextChunk ):
unescaped = c.text.replace(self.escape, "")
if len(unescaped) < len(c.text):
chunks[i] = TextChunk(unescaped)
return chunks
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/tree/RuleTagToken.py 0000644 0000766 0000000 00000003746 00000000000 024146 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
#
# A {@link Token} object representing an entire subtree matched by a parser
# rule; e.g., {@code }. These tokens are created for {@link TagChunk}
# chunks where the tag corresponds to a parser rule.
#
from antlr4.Token import Token
class RuleTagToken(Token):
__slots__ = ('label', 'ruleName')
#
# Constructs a new instance of {@link RuleTagToken} with the specified rule
# name, bypass token type, and label.
#
# @param ruleName The name of the parser rule this rule tag matches.
# @param bypassTokenType The bypass token type assigned to the parser rule.
# @param label The label associated with the rule tag, or {@code null} if
# the rule tag is unlabeled.
#
# @exception IllegalArgumentException if {@code ruleName} is {@code null}
# or empty.
def __init__(self, ruleName:str, bypassTokenType:int, label:str=None):
if ruleName is None or len(ruleName)==0:
raise Exception("ruleName cannot be null or empty.")
self.source = None
self.type = bypassTokenType # token type of the token
self.channel = Token.DEFAULT_CHANNEL # The parser ignores everything not on DEFAULT_CHANNEL
self.start = -1 # optional; return -1 if not implemented.
self.stop = -1 # optional; return -1 if not implemented.
self.tokenIndex = -1 # from 0..n-1 of the token object in the input stream
self.line = 0 # line=1..n of the 1st character
self.column = -1 # beginning of the line at which it occurs, 0..n-1
self.label = label
self._text = self.getText() # text of the token.
self.ruleName = ruleName
def getText(self):
if self.label is None:
return "<" + self.ruleName + ">"
else:
return "<" + self.label + ":" + self.ruleName + ">"
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/tree/TokenTagToken.py 0000644 0000766 0000000 00000003050 00000000000 024303 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
#
# A {@link Token} object representing a token of a particular type; e.g.,
# {@code }. These tokens are created for {@link TagChunk} chunks where the
# tag corresponds to a lexer rule or token type.
#
from antlr4.Token import CommonToken
class TokenTagToken(CommonToken):
__slots__ = ('tokenName', 'label')
# Constructs a new instance of {@link TokenTagToken} with the specified
# token name, type, and label.
#
# @param tokenName The token name.
# @param type The token type.
# @param label The label associated with the token tag, or {@code null} if
# the token tag is unlabeled.
#
def __init__(self, tokenName:str, type:int, label:str=None):
super().__init__(type=type)
self.tokenName = tokenName
self.label = label
self._text = self.getText()
#
# {@inheritDoc}
#
# The implementation for {@link TokenTagToken} returns the token tag
# formatted with {@code <} and {@code >} delimiters.
#
def getText(self):
if self.label is None:
return "<" + self.tokenName + ">"
else:
return "<" + self.label + ":" + self.tokenName + ">"
# The implementation for {@link TokenTagToken} returns a string of the form
# {@code tokenName:type}.
#
def __str__(self):
return self.tokenName + ":" + str(self.type)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609695176.0
antlr4-python3-runtime-4.9.1/src/antlr4/tree/Tree.py 0000644 0000766 0000000 00000012704 00000000000 022473 0 ustar 00parrt wheel 0000000 0000000 # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# The basic notion of a tree has a parent, a payload, and a list of children.
# It is the most abstract interface for all the trees used by ANTLR.
#/
from antlr4.Token import Token
INVALID_INTERVAL = (-1, -2)
class Tree(object):
pass
class SyntaxTree(Tree):
pass
class ParseTree(SyntaxTree):
pass
class RuleNode(ParseTree):
pass
class TerminalNode(ParseTree):
pass
class ErrorNode(TerminalNode):
pass
class ParseTreeVisitor(object):
def visit(self, tree):
return tree.accept(self)
def visitChildren(self, node):
result = self.defaultResult()
n = node.getChildCount()
for i in range(n):
if not self.shouldVisitNextChild(node, result):
return result
c = node.getChild(i)
childResult = c.accept(self)
result = self.aggregateResult(result, childResult)
return result
def visitTerminal(self, node):
return self.defaultResult()
def visitErrorNode(self, node):
return self.defaultResult()
def defaultResult(self):
return None
def aggregateResult(self, aggregate, nextResult):
return nextResult
def shouldVisitNextChild(self, node, currentResult):
return True
ParserRuleContext = None
class ParseTreeListener(object):
def visitTerminal(self, node:TerminalNode):
pass
def visitErrorNode(self, node:ErrorNode):
pass
def enterEveryRule(self, ctx:ParserRuleContext):
pass
def exitEveryRule(self, ctx:ParserRuleContext):
pass
del ParserRuleContext
class TerminalNodeImpl(TerminalNode):
__slots__ = ('parentCtx', 'symbol')
def __init__(self, symbol:Token):
self.parentCtx = None
self.symbol = symbol
def __setattr__(self, key, value):
super().__setattr__(key, value)
def getChild(self, i:int):
return None
def getSymbol(self):
return self.symbol
def getParent(self):
return self.parentCtx
def getPayload(self):
return self.symbol
def getSourceInterval(self):
if self.symbol is None:
return INVALID_INTERVAL
tokenIndex = self.symbol.tokenIndex
return (tokenIndex, tokenIndex)
def getChildCount(self):
return 0
def accept(self, visitor:ParseTreeVisitor):
return visitor.visitTerminal(self)
def getText(self):
return self.symbol.text
def __str__(self):
if self.symbol.type == Token.EOF:
return ""
else:
return self.symbol.text
# Represents a token that was consumed during resynchronization
# rather than during a valid match operation. For example,
# we will create this kind of a node during single token insertion
# and deletion as well as during "consume until error recovery set"
# upon no viable alternative exceptions.
class ErrorNodeImpl(TerminalNodeImpl,ErrorNode):
def __init__(self, token:Token):
super().__init__(token)
def accept(self, visitor:ParseTreeVisitor):
return visitor.visitErrorNode(self)
class ParseTreeWalker(object):
DEFAULT = None
def walk(self, listener:ParseTreeListener, t:ParseTree):
"""
Performs a walk on the given parse tree starting at the root and going down recursively
with depth-first search. On each node, {@link ParseTreeWalker#enterRule} is called before
recursively walking down into child nodes, then
{@link ParseTreeWalker#exitRule} is called after the recursive call to wind up.
@param listener The listener used by the walker to process grammar rules
@param t The parse tree to be walked on
"""
if isinstance(t, ErrorNode):
listener.visitErrorNode(t)
return
elif isinstance(t, TerminalNode):
listener.visitTerminal(t)
return
self.enterRule(listener, t)
for child in t.getChildren():
self.walk(listener, child)
self.exitRule(listener, t)
#
# The discovery of a rule node, involves sending two events: the generic
# {@link ParseTreeListener#enterEveryRule} and a
# {@link RuleContext}-specific event. First we trigger the generic and then
# the rule specific. We to them in reverse order upon finishing the node.
#
def enterRule(self, listener:ParseTreeListener, r:RuleNode):
"""
Enters a grammar rule by first triggering the generic event {@link ParseTreeListener#enterEveryRule}
then by triggering the event specific to the given parse tree node
@param listener The listener responding to the trigger events
@param r The grammar rule containing the rule context
"""
ctx = r.getRuleContext()
listener.enterEveryRule(ctx)
ctx.enterRule(listener)
def exitRule(self, listener:ParseTreeListener, r:RuleNode):
"""
Exits a grammar rule by first triggering the event specific to the given parse tree node
then by triggering the generic event {@link ParseTreeListener#exitEveryRule}
@param listener The listener responding to the trigger events
@param r The grammar rule containing the rule context
"""
ctx = r.getRuleContext()
ctx.exitRule(listener)
listener.exitEveryRule(ctx)
ParseTreeWalker.DEFAULT = ParseTreeWalker()
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/tree/Trees.py 0000644 0000766 0000000 00000007600 00000000000 022655 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
# A set of utility routines useful for all kinds of ANTLR trees.#
from io import StringIO
from antlr4.Token import Token
from antlr4.Utils import escapeWhitespace
from antlr4.tree.Tree import RuleNode, ErrorNode, TerminalNode, Tree, ParseTree
# need forward declaration
Parser = None
class Trees(object):
# Print out a whole tree in LISP form. {@link #getNodeText} is used on the
# node payloads to get the text for the nodes. Detect
# parse trees and extract data appropriately.
@classmethod
def toStringTree(cls, t:Tree, ruleNames:list=None, recog:Parser=None):
if recog is not None:
ruleNames = recog.ruleNames
s = escapeWhitespace(cls.getNodeText(t, ruleNames), False)
if t.getChildCount()==0:
return s
with StringIO() as buf:
buf.write("(")
buf.write(s)
buf.write(' ')
for i in range(0, t.getChildCount()):
if i > 0:
buf.write(' ')
buf.write(cls.toStringTree(t.getChild(i), ruleNames))
buf.write(")")
return buf.getvalue()
@classmethod
def getNodeText(cls, t:Tree, ruleNames:list=None, recog:Parser=None):
if recog is not None:
ruleNames = recog.ruleNames
if ruleNames is not None:
if isinstance(t, RuleNode):
if t.getAltNumber()!=0: # should use ATN.INVALID_ALT_NUMBER but won't compile
return ruleNames[t.getRuleIndex()]+":"+str(t.getAltNumber())
return ruleNames[t.getRuleIndex()]
elif isinstance( t, ErrorNode):
return str(t)
elif isinstance(t, TerminalNode):
if t.symbol is not None:
return t.symbol.text
# no recog for rule names
payload = t.getPayload()
if isinstance(payload, Token ):
return payload.text
return str(t.getPayload())
# Return ordered list of all children of this node
@classmethod
def getChildren(cls, t:Tree):
return [ t.getChild(i) for i in range(0, t.getChildCount()) ]
# Return a list of all ancestors of this node. The first node of
# list is the root and the last is the parent of this node.
#
@classmethod
def getAncestors(cls, t:Tree):
ancestors = []
t = t.getParent()
while t is not None:
ancestors.insert(0, t) # insert at start
t = t.getParent()
return ancestors
@classmethod
def findAllTokenNodes(cls, t:ParseTree, ttype:int):
return cls.findAllNodes(t, ttype, True)
@classmethod
def findAllRuleNodes(cls, t:ParseTree, ruleIndex:int):
return cls.findAllNodes(t, ruleIndex, False)
@classmethod
def findAllNodes(cls, t:ParseTree, index:int, findTokens:bool):
nodes = []
cls._findAllNodes(t, index, findTokens, nodes)
return nodes
@classmethod
def _findAllNodes(cls, t:ParseTree, index:int, findTokens:bool, nodes:list):
from antlr4.ParserRuleContext import ParserRuleContext
# check this node (the root) first
if findTokens and isinstance(t, TerminalNode):
if t.symbol.type==index:
nodes.append(t)
elif not findTokens and isinstance(t, ParserRuleContext):
if t.ruleIndex == index:
nodes.append(t)
# check children
for i in range(0, t.getChildCount()):
cls._findAllNodes(t.getChild(i), index, findTokens, nodes)
@classmethod
def descendants(cls, t:ParseTree):
nodes = [t]
for i in range(0, t.getChildCount()):
nodes.extend(cls.descendants(t.getChild(i)))
return nodes
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/tree/__init__.py 0000644 0000766 0000000 00000000000 00000000000 023315 0 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1609880862.5426397
antlr4-python3-runtime-4.9.1/src/antlr4/xpath/ 0000755 0000766 0000000 00000000000 00000000000 021403 5 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697148.0
antlr4-python3-runtime-4.9.1/src/antlr4/xpath/XPath.py 0000644 0000766 0000000 00000031327 00000000000 023007 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
#
# Represent a subset of XPath XML path syntax for use in identifying nodes in
# parse trees.
#
#
# Split path into words and separators {@code /} and {@code //} via ANTLR
# itself then walk path elements from left to right. At each separator-word
# pair, find set of nodes. Next stage uses those as work list.
#
#
# The basic interface is
# {@link XPath#findAll ParseTree.findAll}{@code (tree, pathString, parser)}.
# But that is just shorthand for:
#
#
# {@link XPath} p = new {@link XPath#XPath XPath}(parser, pathString);
# return p.{@link #evaluate evaluate}(tree);
#
#
#
# See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this
# allows operators:
#
#
# - /
- root
# - //
- anywhere
# - !
- invert; this must appear directly after root or anywhere
# operator
#
#
#
# and path elements:
#
#
# - ID
- token name
# - 'string'
- any string literal token from the grammar
# - expr
- rule name
# - *
- wildcard matching any node
#
#
#
# Whitespace is not allowed.
#
from antlr4 import CommonTokenStream, DFA, PredictionContextCache, Lexer, LexerATNSimulator, ParserRuleContext, TerminalNode
from antlr4.InputStream import InputStream
from antlr4.Parser import Parser
from antlr4.RuleContext import RuleContext
from antlr4.Token import Token
from antlr4.atn.ATNDeserializer import ATNDeserializer
from antlr4.error.ErrorListener import ErrorListener
from antlr4.error.Errors import LexerNoViableAltException
from antlr4.tree.Tree import ParseTree
from antlr4.tree.Trees import Trees
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\n")
buf.write("\64\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write("\7\4\b\t\b\4\t\t\t\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5")
buf.write("\3\6\3\6\7\6\37\n\6\f\6\16\6\"\13\6\3\6\3\6\3\7\3\7\5")
buf.write("\7(\n\7\3\b\3\b\3\t\3\t\7\t.\n\t\f\t\16\t\61\13\t\3\t")
buf.write("\3\t\3/\2\n\3\5\5\6\7\7\t\b\13\t\r\2\17\2\21\n\3\2\4\7")
buf.write("\2\62;aa\u00b9\u00b9\u0302\u0371\u2041\u2042\17\2C\\c")
buf.write("|\u00c2\u00d8\u00da\u00f8\u00fa\u0301\u0372\u037f\u0381")
buf.write("\u2001\u200e\u200f\u2072\u2191\u2c02\u2ff1\u3003\ud801")
buf.write("\uf902\ufdd1\ufdf2\uffff\64\2\3\3\2\2\2\2\5\3\2\2\2\2")
buf.write("\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\21\3\2\2\2\3\23")
buf.write("\3\2\2\2\5\26\3\2\2\2\7\30\3\2\2\2\t\32\3\2\2\2\13\34")
buf.write("\3\2\2\2\r\'\3\2\2\2\17)\3\2\2\2\21+\3\2\2\2\23\24\7\61")
buf.write("\2\2\24\25\7\61\2\2\25\4\3\2\2\2\26\27\7\61\2\2\27\6\3")
buf.write("\2\2\2\30\31\7,\2\2\31\b\3\2\2\2\32\33\7#\2\2\33\n\3\2")
buf.write("\2\2\34 \5\17\b\2\35\37\5\r\7\2\36\35\3\2\2\2\37\"\3\2")
buf.write("\2\2 \36\3\2\2\2 !\3\2\2\2!#\3\2\2\2\" \3\2\2\2#$\b\6")
buf.write("\2\2$\f\3\2\2\2%(\5\17\b\2&(\t\2\2\2\'%\3\2\2\2\'&\3\2")
buf.write("\2\2(\16\3\2\2\2)*\t\3\2\2*\20\3\2\2\2+/\7)\2\2,.\13\2")
buf.write("\2\2-,\3\2\2\2.\61\3\2\2\2/\60\3\2\2\2/-\3\2\2\2\60\62")
buf.write("\3\2\2\2\61/\3\2\2\2\62\63\7)\2\2\63\22\3\2\2\2\6\2 \'")
buf.write("/\3\3\6\2")
return buf.getvalue()
class XPathLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
TOKEN_REF = 1
RULE_REF = 2
ANYWHERE = 3
ROOT = 4
WILDCARD = 5
BANG = 6
ID = 7
STRING = 8
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "",
"'//'", "'/'", "'*'", "'!'" ]
symbolicNames = [ "",
"TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT", "WILDCARD", "BANG",
"ID", "STRING" ]
ruleNames = [ "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", "NameChar",
"NameStartChar", "STRING" ]
grammarFileName = "XPathLexer.g4"
def __init__(self, input=None):
super().__init__(input)
self.checkVersion("4.9.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
if self._actions is None:
actions = dict()
actions[4] = self.ID_action
self._actions = actions
_action = self._actions.get(ruleIndex, None)
if _action is not None:
_action(localctx, actionIndex)
else:
raise Exception("No registered action for: %d" % ruleIndex)
def ID_action(self, localctx:RuleContext , actionIndex:int):
if actionIndex == 0:
char = self.text[0]
if char.isupper():
self.type = XPathLexer.TOKEN_REF
else:
self.type = XPathLexer.RULE_REF
class XPath(object):
WILDCARD = "*" # word not operator/separator
NOT = "!" # word for invert operator
def __init__(self, parser:Parser, path:str):
self.parser = parser
self.path = path
self.elements = self.split(path)
def split(self, path:str):
input = InputStream(path)
lexer = XPathLexer(input)
def recover(self, e):
raise e
lexer.recover = recover
lexer.removeErrorListeners()
lexer.addErrorListener(ErrorListener()) # XPathErrorListener does no more
tokenStream = CommonTokenStream(lexer)
try:
tokenStream.fill()
except LexerNoViableAltException as e:
pos = lexer.column
msg = "Invalid tokens or characters at index %d in path '%s'" % (pos, path)
raise Exception(msg, e)
tokens = iter(tokenStream.tokens)
elements = list()
for el in tokens:
invert = False
anywhere = False
# Check for path separators, if none assume root
if el.type in [XPathLexer.ROOT, XPathLexer.ANYWHERE]:
anywhere = el.type == XPathLexer.ANYWHERE
next_el = next(tokens, None)
if not next_el:
raise Exception('Missing element after %s' % el.getText())
else:
el = next_el
# Check for bangs
if el.type == XPathLexer.BANG:
invert = True
next_el = next(tokens, None)
if not next_el:
raise Exception('Missing element after %s' % el.getText())
else:
el = next_el
# Add searched element
if el.type in [XPathLexer.TOKEN_REF, XPathLexer.RULE_REF, XPathLexer.WILDCARD, XPathLexer.STRING]:
element = self.getXPathElement(el, anywhere)
element.invert = invert
elements.append(element)
elif el.type==Token.EOF:
break
else:
raise Exception("Unknown path element %s" % lexer.symbolicNames[el.type])
return elements
#
# Convert word like {@code#} or {@code ID} or {@code expr} to a path
# element. {@code anywhere} is {@code true} if {@code //} precedes the
# word.
#
def getXPathElement(self, wordToken:Token, anywhere:bool):
if wordToken.type==Token.EOF:
raise Exception("Missing path element at end of path")
word = wordToken.text
if wordToken.type==XPathLexer.WILDCARD :
return XPathWildcardAnywhereElement() if anywhere else XPathWildcardElement()
elif wordToken.type in [XPathLexer.TOKEN_REF, XPathLexer.STRING]:
tsource = self.parser.getTokenStream().tokenSource
ttype = Token.INVALID_TYPE
if wordToken.type == XPathLexer.TOKEN_REF:
if word in tsource.ruleNames:
ttype = tsource.ruleNames.index(word) + 1
else:
if word in tsource.literalNames:
ttype = tsource.literalNames.index(word)
if ttype == Token.INVALID_TYPE:
raise Exception("%s at index %d isn't a valid token name" % (word, wordToken.tokenIndex))
return XPathTokenAnywhereElement(word, ttype) if anywhere else XPathTokenElement(word, ttype)
else:
ruleIndex = self.parser.ruleNames.index(word) if word in self.parser.ruleNames else -1
if ruleIndex == -1:
raise Exception("%s at index %d isn't a valid rule name" % (word, wordToken.tokenIndex))
return XPathRuleAnywhereElement(word, ruleIndex) if anywhere else XPathRuleElement(word, ruleIndex)
@staticmethod
def findAll(tree:ParseTree, xpath:str, parser:Parser):
p = XPath(parser, xpath)
return p.evaluate(tree)
#
# Return a list of all nodes starting at {@code t} as root that satisfy the
# path. The root {@code /} is relative to the node passed to
# {@link #evaluate}.
#
def evaluate(self, t:ParseTree):
dummyRoot = ParserRuleContext()
dummyRoot.children = [t] # don't set t's parent.
work = [dummyRoot]
for element in self.elements:
work_next = list()
for node in work:
if not isinstance(node, TerminalNode) and node.children:
# only try to match next element if it has children
# e.g., //func/*/stat might have a token node for which
# we can't go looking for stat nodes.
matching = element.evaluate(node)
# See issue antlr#370 - Prevents XPath from returning the
# same node multiple times
matching = filter(lambda m: m not in work_next, matching)
work_next.extend(matching)
work = work_next
return work
class XPathElement(object):
def __init__(self, nodeName:str):
self.nodeName = nodeName
self.invert = False
def __str__(self):
return type(self).__name__ + "[" + ("!" if self.invert else "") + self.nodeName + "]"
#
# Either {@code ID} at start of path or {@code ...//ID} in middle of path.
#
class XPathRuleAnywhereElement(XPathElement):
def __init__(self, ruleName:str, ruleIndex:int):
super().__init__(ruleName)
self.ruleIndex = ruleIndex
def evaluate(self, t:ParseTree):
# return all ParserRuleContext descendants of t that match ruleIndex (or do not match if inverted)
return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.descendants(t))
class XPathRuleElement(XPathElement):
def __init__(self, ruleName:str, ruleIndex:int):
super().__init__(ruleName)
self.ruleIndex = ruleIndex
def evaluate(self, t:ParseTree):
# return all ParserRuleContext children of t that match ruleIndex (or do not match if inverted)
return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.getChildren(t))
class XPathTokenAnywhereElement(XPathElement):
def __init__(self, ruleName:str, tokenType:int):
super().__init__(ruleName)
self.tokenType = tokenType
def evaluate(self, t:ParseTree):
# return all TerminalNode descendants of t that match tokenType (or do not match if inverted)
return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.descendants(t))
class XPathTokenElement(XPathElement):
def __init__(self, ruleName:str, tokenType:int):
super().__init__(ruleName)
self.tokenType = tokenType
def evaluate(self, t:ParseTree):
# return all TerminalNode children of t that match tokenType (or do not match if inverted)
return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.getChildren(t))
class XPathWildcardAnywhereElement(XPathElement):
def __init__(self):
super().__init__(XPath.WILDCARD)
def evaluate(self, t:ParseTree):
if self.invert:
return list() # !* is weird but valid (empty)
else:
return Trees.descendants(t)
class XPathWildcardElement(XPathElement):
def __init__(self):
super().__init__(XPath.WILDCARD)
def evaluate(self, t:ParseTree):
if self.invert:
return list() # !* is weird but valid (empty)
else:
return Trees.getChildren(t)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1606416672.0
antlr4-python3-runtime-4.9.1/src/antlr4/xpath/__init__.py 0000644 0000766 0000000 00000000034 00000000000 023511 0 ustar 00parrt wheel 0000000 0000000 __author__ = 'ericvergnaud'
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1609880862.5444744
antlr4-python3-runtime-4.9.1/src/antlr4_python3_runtime.egg-info/ 0000755 0000766 0000000 00000000000 00000000000 025200 5 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609880862.0
antlr4-python3-runtime-4.9.1/src/antlr4_python3_runtime.egg-info/PKG-INFO 0000644 0000766 0000000 00000000432 00000000000 026274 0 ustar 00parrt wheel 0000000 0000000 Metadata-Version: 1.0
Name: antlr4-python3-runtime
Version: 4.9.1
Summary: ANTLR 4.9.1 runtime for Python 3.7
Home-page: http://www.antlr.org
Author: Eric Vergnaud, Terence Parr, Sam Harwell
Author-email: eric.vergnaud@wanadoo.fr
License: BSD
Description: UNKNOWN
Platform: UNKNOWN
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609880862.0
antlr4-python3-runtime-4.9.1/src/antlr4_python3_runtime.egg-info/SOURCES.txt 0000644 0000766 0000000 00000004427 00000000000 027073 0 ustar 00parrt wheel 0000000 0000000 MANIFEST.in
README.txt
RELEASE-4.5.txt
setup.py
bin/pygrun
src/antlr4/BufferedTokenStream.py
src/antlr4/CommonTokenFactory.py
src/antlr4/CommonTokenStream.py
src/antlr4/FileStream.py
src/antlr4/InputStream.py
src/antlr4/IntervalSet.py
src/antlr4/LL1Analyzer.py
src/antlr4/Lexer.py
src/antlr4/ListTokenSource.py
src/antlr4/Parser.py
src/antlr4/ParserInterpreter.py
src/antlr4/ParserRuleContext.py
src/antlr4/PredictionContext.py
src/antlr4/Recognizer.py
src/antlr4/RuleContext.py
src/antlr4/StdinStream.py
src/antlr4/Token.py
src/antlr4/TokenStreamRewriter.py
src/antlr4/Utils.py
src/antlr4/__init__.py
src/antlr4/atn/ATN.py
src/antlr4/atn/ATNConfig.py
src/antlr4/atn/ATNConfigSet.py
src/antlr4/atn/ATNDeserializationOptions.py
src/antlr4/atn/ATNDeserializer.py
src/antlr4/atn/ATNSimulator.py
src/antlr4/atn/ATNState.py
src/antlr4/atn/ATNType.py
src/antlr4/atn/LexerATNSimulator.py
src/antlr4/atn/LexerAction.py
src/antlr4/atn/LexerActionExecutor.py
src/antlr4/atn/ParserATNSimulator.py
src/antlr4/atn/PredictionMode.py
src/antlr4/atn/SemanticContext.py
src/antlr4/atn/Transition.py
src/antlr4/atn/__init__.py
src/antlr4/dfa/DFA.py
src/antlr4/dfa/DFASerializer.py
src/antlr4/dfa/DFAState.py
src/antlr4/dfa/__init__.py
src/antlr4/error/DiagnosticErrorListener.py
src/antlr4/error/ErrorListener.py
src/antlr4/error/ErrorStrategy.py
src/antlr4/error/Errors.py
src/antlr4/error/__init__.py
src/antlr4/tree/Chunk.py
src/antlr4/tree/ParseTreeMatch.py
src/antlr4/tree/ParseTreePattern.py
src/antlr4/tree/ParseTreePatternMatcher.py
src/antlr4/tree/RuleTagToken.py
src/antlr4/tree/TokenTagToken.py
src/antlr4/tree/Tree.py
src/antlr4/tree/Trees.py
src/antlr4/tree/__init__.py
src/antlr4/xpath/XPath.py
src/antlr4/xpath/__init__.py
src/antlr4_python3_runtime.egg-info/PKG-INFO
src/antlr4_python3_runtime.egg-info/SOURCES.txt
src/antlr4_python3_runtime.egg-info/dependency_links.txt
src/antlr4_python3_runtime.egg-info/requires.txt
src/antlr4_python3_runtime.egg-info/top_level.txt
test/TestFileStream.py
test/TestInputStream.py
test/TestIntervalSet.py
test/TestRecognizer.py
test/TestTokenStreamRewriter.py
test/__init__.py
test/c.c
test/ctest.py
test/run.py
test/xpathtest.py
test/expr/ExprLexer.py
test/expr/ExprParser.py
test/mocks/TestLexer.py
test/mocks/__init__.py
test/parser/__init__.py
test/parser/clexer.py
test/parser/cparser.py ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609880862.0
antlr4-python3-runtime-4.9.1/src/antlr4_python3_runtime.egg-info/dependency_links.txt 0000644 0000766 0000000 00000000001 00000000000 031246 0 ustar 00parrt wheel 0000000 0000000
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609880862.0
antlr4-python3-runtime-4.9.1/src/antlr4_python3_runtime.egg-info/requires.txt 0000644 0000766 0000000 00000000042 00000000000 027574 0 ustar 00parrt wheel 0000000 0000000
[:python_version < "3.5"]
typing
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609880862.0
antlr4-python3-runtime-4.9.1/src/antlr4_python3_runtime.egg-info/top_level.txt 0000644 0000766 0000000 00000000007 00000000000 027727 0 ustar 00parrt wheel 0000000 0000000 antlr4
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1609880862.550845
antlr4-python3-runtime-4.9.1/test/ 0000755 0000766 0000000 00000000000 00000000000 017243 5 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/TestFileStream.py 0000644 0000766 0000000 00000000315 00000000000 022507 0 ustar 00parrt wheel 0000000 0000000 import unittest
from antlr4.FileStream import FileStream
class TestFileStream(unittest.TestCase):
def testStream(self):
stream = FileStream(__file__)
self.assertTrue(stream.size > 0)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/TestInputStream.py 0000644 0000766 0000000 00000001116 00000000000 022727 0 ustar 00parrt wheel 0000000 0000000 import unittest
from antlr4.Token import Token
from antlr4.InputStream import InputStream
class TestInputStream(unittest.TestCase):
def testStream(self):
stream = InputStream("abcde")
self.assertEqual(0, stream.index)
self.assertEqual(5, stream.size)
self.assertEqual(ord("a"), stream.LA(1))
stream.consume()
self.assertEqual(1, stream.index)
stream.seek(5)
self.assertEqual(Token.EOF, stream.LA(1))
self.assertEqual("bcd", stream.getText(1, 3))
stream.reset()
self.assertEqual(0, stream.index)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/TestIntervalSet.py 0000644 0000766 0000000 00000005447 00000000000 022727 0 ustar 00parrt wheel 0000000 0000000 import unittest
from antlr4.IntervalSet import IntervalSet
class TestIntervalSet(unittest.TestCase):
def testEmpty(self):
s = IntervalSet()
self.assertIsNone(s.intervals)
self.assertFalse(30 in s)
def testOne(self):
s = IntervalSet()
s.addOne(30)
self.assertTrue(30 in s)
self.assertFalse(29 in s)
self.assertFalse(31 in s)
def testTwo(self):
s = IntervalSet()
s.addOne(30)
s.addOne(40)
self.assertTrue(30 in s)
self.assertTrue(40 in s)
self.assertFalse(35 in s)
def testRange(self):
s = IntervalSet()
s.addRange(range(30,41))
self.assertTrue(30 in s)
self.assertTrue(40 in s)
self.assertTrue(35 in s)
def testDistinct1(self):
s = IntervalSet()
s.addRange(range(30,32))
s.addRange(range(40,42))
self.assertEquals(2,len(s.intervals))
self.assertTrue(30 in s)
self.assertTrue(40 in s)
self.assertFalse(35 in s)
def testDistinct2(self):
s = IntervalSet()
s.addRange(range(40,42))
s.addRange(range(30,32))
self.assertEquals(2,len(s.intervals))
self.assertTrue(30 in s)
self.assertTrue(40 in s)
self.assertFalse(35 in s)
def testContiguous1(self):
s = IntervalSet()
s.addRange(range(30,36))
s.addRange(range(36,41))
self.assertEquals(1,len(s.intervals))
self.assertTrue(30 in s)
self.assertTrue(40 in s)
self.assertTrue(35 in s)
def testContiguous2(self):
s = IntervalSet()
s.addRange(range(36,41))
s.addRange(range(30,36))
self.assertEquals(1,len(s.intervals))
self.assertTrue(30 in s)
self.assertTrue(40 in s)
def testOverlapping1(self):
s = IntervalSet()
s.addRange(range(30,40))
s.addRange(range(35,45))
self.assertEquals(1,len(s.intervals))
self.assertTrue(30 in s)
self.assertTrue(44 in s)
def testOverlapping2(self):
s = IntervalSet()
s.addRange(range(35,45))
s.addRange(range(30,40))
self.assertEquals(1,len(s.intervals))
self.assertTrue(30 in s)
self.assertTrue(44 in s)
def testOverlapping3(self):
s = IntervalSet()
s.addRange(range(30,32))
s.addRange(range(40,42))
s.addRange(range(50,52))
s.addRange(range(20,61))
self.assertEquals(1,len(s.intervals))
self.assertTrue(20 in s)
self.assertTrue(60 in s)
def testComplement(self):
s = IntervalSet()
s.addRange(range(10,21))
c = s.complement(1,100)
self.assertTrue(1 in c)
self.assertTrue(100 in c)
self.assertTrue(10 not in c)
self.assertTrue(20 not in c)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/TestRecognizer.py 0000644 0000766 0000000 00000001026 00000000000 022563 0 ustar 00parrt wheel 0000000 0000000 import unittest
from antlr4.Recognizer import Recognizer
class TestRecognizer(unittest.TestCase):
def testVersion(self):
major, minor = Recognizer().extractVersion("1.2")
self.assertEqual("1", major)
self.assertEqual("2", minor)
major, minor = Recognizer().extractVersion("1.2.3")
self.assertEqual("1", major)
self.assertEqual("2", minor)
major, minor = Recognizer().extractVersion("1.2-snapshot")
self.assertEqual("1", major)
self.assertEqual("2", minor)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/TestTokenStreamRewriter.py 0000644 0000766 0000000 00000042356 00000000000 024447 0 ustar 00parrt wheel 0000000 0000000 # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
import unittest
from mocks.TestLexer import TestLexer, TestLexer2
from antlr4.TokenStreamRewriter import TokenStreamRewriter
from antlr4.InputStream import InputStream
from antlr4.CommonTokenStream import CommonTokenStream
class TestTokenStreamRewriter(unittest.TestCase):
def testInsertBeforeIndexZero(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, '0')
self.assertEqual(rewriter.getDefaultText(), '0abc')
def testInsertAfterLastIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertAfter(10, 'x')
self.assertEqual(rewriter.getDefaultText(), 'abcx')
def test2InsertBeforeAfterMiddleIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'x')
rewriter.insertAfter(1, 'x')
self.assertEqual(rewriter.getDefaultText(), 'axbxc')
def testReplaceIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(0, 'x')
self.assertEqual(rewriter.getDefaultText(), 'xbc')
def testReplaceLastIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(2, 'x')
self.assertEqual(rewriter.getDefaultText(), 'abx')
def testReplaceMiddleIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(1, 'x')
self.assertEqual(rewriter.getDefaultText(), 'axc')
def testToStringStartStop(self):
input = InputStream('x = 3 * 0;')
lexer = TestLexer2(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(4, 8, '0')
self.assertEqual(rewriter.getDefaultText(), 'x = 0;')
self.assertEqual(rewriter.getText('default', 0, 9), 'x = 0;')
self.assertEqual(rewriter.getText('default', 4, 8), '0')
def testToStringStartStop2(self):
input = InputStream('x = 3 * 0 + 2 * 0;')
lexer = TestLexer2(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
self.assertEqual('x = 3 * 0 + 2 * 0;', rewriter.getDefaultText())
# replace 3 * 0 with 0
rewriter.replaceRange(4, 8, '0')
self.assertEqual('x = 0 + 2 * 0;', rewriter.getDefaultText())
self.assertEqual('x = 0 + 2 * 0;', rewriter.getText('default', 0, 17))
self.assertEqual('0', rewriter.getText('default', 4, 8))
self.assertEqual('x = 0', rewriter.getText('default', 0, 8))
self.assertEqual('2 * 0', rewriter.getText('default', 12, 16))
rewriter.insertAfter(17, "// comment")
self.assertEqual('2 * 0;// comment', rewriter.getText('default', 12, 18))
self.assertEqual('x = 0', rewriter.getText('default', 0, 8))
def test2ReplaceMiddleIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(1, 'x')
rewriter.replaceIndex(1, 'y')
self.assertEqual('ayc', rewriter.getDefaultText())
def test2ReplaceMiddleIndex1InsertBefore(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, "_")
rewriter.replaceIndex(1, 'x')
rewriter.replaceIndex(1, 'y')
self.assertEqual('_ayc', rewriter.getDefaultText())
def test2InsertMiddleIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'x')
rewriter.insertBeforeIndex(1, 'y')
self.assertEqual('ayxbc', rewriter.getDefaultText())
def testReplaceThenDeleteMiddleIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(0, 2, 'x')
rewriter.insertBeforeIndex(1, '0')
with self.assertRaises(ValueError) as ctx:
rewriter.getDefaultText()
self.assertEqual(
'insert op ,1:1]:"0"> within boundaries of previous ,1:0]..[@2,2:2=\'c\',<3>,1:2]:"x">',
str(ctx.exception)
)
def testInsertThenReplaceSameIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, '0')
rewriter.replaceIndex(0, 'x')
self.assertEqual('0xbc', rewriter.getDefaultText())
def test2InsertThenReplaceIndex0(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, 'x')
rewriter.insertBeforeIndex(0, 'y')
rewriter.replaceIndex(0, 'z')
self.assertEqual('yxzbc', rewriter.getDefaultText())
def testReplaceThenInsertBeforeLastIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(2, 'x')
rewriter.insertBeforeIndex(2, 'y')
self.assertEqual('abyx', rewriter.getDefaultText())
def testReplaceThenInsertAfterLastIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(2, 'x')
rewriter.insertAfter(2, 'y')
self.assertEqual('abxy', rewriter.getDefaultText())
def testReplaceRangeThenInsertAtLeftEdge(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'x')
rewriter.insertBeforeIndex(2, 'y')
self.assertEqual('abyxba', rewriter.getDefaultText())
def testReplaceRangeThenInsertAtRightEdge(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'x')
rewriter.insertBeforeIndex(4, 'y')
with self.assertRaises(ValueError) as ctx:
rewriter.getDefaultText()
msg = str(ctx.exception)
self.assertEqual(
"insert op ,1:4]:\"y\"> within boundaries of previous ,1:2]..[@4,4:4='c',<3>,1:4]:\"x\">",
msg
)
def testReplaceRangeThenInsertAfterRightEdge(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'x')
rewriter.insertAfter(4, 'y')
self.assertEqual('abxyba', rewriter.getDefaultText())
def testReplaceAll(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(0, 6, 'x')
self.assertEqual('x', rewriter.getDefaultText())
def testReplaceSubsetThenFetch(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'xyz')
self.assertEqual('abxyzba', rewriter.getDefaultText())
def testReplaceThenReplaceSuperset(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'xyz')
rewriter.replaceRange(3, 5, 'foo')
with self.assertRaises(ValueError) as ctx:
rewriter.getDefaultText()
msg = str(ctx.exception)
self.assertEqual(
"""replace op boundaries of ,1:3]..[@5,5:5='b',<2>,1:5]:"foo"> overlap with previous ,1:2]..[@4,4:4='c',<3>,1:4]:"xyz">""",
msg
)
def testReplaceThenReplaceLowerIndexedSuperset(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'xyz')
rewriter.replaceRange(1, 3, 'foo')
with self.assertRaises(ValueError) as ctx:
rewriter.getDefaultText()
msg = str(ctx.exception)
self.assertEqual(
"""replace op boundaries of ,1:1]..[@3,3:3='c',<3>,1:3]:"foo"> overlap with previous ,1:2]..[@4,4:4='c',<3>,1:4]:"xyz">""",
msg
)
def testReplaceSingleMiddleThenOverlappingSuperset(self):
input = InputStream('abcba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(2, 'xyz')
rewriter.replaceRange(0, 3, 'foo')
self.assertEqual('fooa', rewriter.getDefaultText())
def testCombineInserts(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, 'x')
rewriter.insertBeforeIndex(0, 'y')
self.assertEqual('yxabc', rewriter.getDefaultText())
def testCombine3Inserts(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'x')
rewriter.insertBeforeIndex(0, 'y')
rewriter.insertBeforeIndex(1, 'z')
self.assertEqual('yazxbc', rewriter.getDefaultText())
def testCombineInsertOnLeftWithReplace(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(0, 2, 'foo')
rewriter.insertBeforeIndex(0, 'z')
self.assertEqual('zfoo', rewriter.getDefaultText())
def testCombineInsertOnLeftWithDelete(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.delete('default', 0, 2)
rewriter.insertBeforeIndex(0, 'z')
self.assertEqual('z', rewriter.getDefaultText())
def testDisjointInserts(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'x')
rewriter.insertBeforeIndex(2, 'y')
rewriter.insertBeforeIndex(0, 'z')
self.assertEqual('zaxbyc', rewriter.getDefaultText())
def testOverlappingReplace(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(1, 2, 'foo')
rewriter.replaceRange(0, 3, 'bar')
self.assertEqual('bar', rewriter.getDefaultText())
def testOverlappingReplace2(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(0, 3, 'bar')
rewriter.replaceRange(1, 2, 'foo')
with self.assertRaises(ValueError) as ctx:
rewriter.getDefaultText()
self.assertEqual(
"""replace op boundaries of ,1:1]..[@2,2:2='c',<3>,1:2]:"foo"> overlap with previous ,1:0]..[@3,3:2='',<-1>,1:3]:"bar">""",
str(ctx.exception)
)
def testOverlappingReplace3(self):
input = InputStream('abcc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(1, 2, 'foo')
rewriter.replaceRange(0, 2, 'bar')
self.assertEqual('barc', rewriter.getDefaultText())
def testOverlappingReplace4(self):
input = InputStream('abcc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(1, 2, 'foo')
rewriter.replaceRange(1, 3, 'bar')
self.assertEqual('abar', rewriter.getDefaultText())
def testDropIdenticalReplace(self):
input = InputStream('abcc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(1, 2, 'foo')
rewriter.replaceRange(1, 2, 'foo')
self.assertEqual('afooc', rewriter.getDefaultText())
def testDropPrevCoveredInsert(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'foo')
rewriter.replaceRange(1, 2, 'foo')
self.assertEqual('afoofoo', rewriter.getDefaultText())
def testLeaveAloneDisjointInsert(self):
input = InputStream('abcc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'x')
rewriter.replaceRange(2, 3, 'foo')
self.assertEqual('axbfoo', rewriter.getDefaultText())
def testLeaveAloneDisjointInsert2(self):
input = InputStream('abcc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 3, 'foo')
rewriter.insertBeforeIndex(1, 'x')
self.assertEqual('axbfoo', rewriter.getDefaultText())
def testInsertBeforeTokenThenDeleteThatToken(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'foo')
rewriter.replaceRange(1, 2, 'foo')
self.assertEqual('afoofoo', rewriter.getDefaultText())
def testPreservesOrderOfContiguousInserts(self):
"""
Test for fix for: https://github.com/antlr/antlr4/issues/550
"""
input = InputStream('aa')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, '')
rewriter.insertAfter(0, '')
rewriter.insertBeforeIndex(1, '')
rewriter.insertAfter(1, '')
self.assertEqual('aa', rewriter.getDefaultText())
if __name__ == '__main__':
unittest.main()
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/__init__.py 0000644 0000766 0000000 00000000034 00000000000 021351 0 ustar 00parrt wheel 0000000 0000000 __author__ = 'ericvergnaud'
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/c.c 0000644 0000766 0000000 00000022526 00000000000 017640 0 ustar 00parrt wheel 0000000 0000000 void main()
{
int a=0;
if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
a++;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
a++;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
a++;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
a++;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
a++;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
a++;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
a++;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
a++;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
else if( 3 > 4){
;
}
} ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/ctest.py 0000644 0000766 0000000 00000002303 00000000000 020735 0 ustar 00parrt wheel 0000000 0000000 #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
import sys
sys.setrecursionlimit(4000)
import antlr4
from parser.cparser import CParser
from parser.clexer import CLexer
from datetime import datetime
import cProfile
class ErrorListener(antlr4.error.ErrorListener.ErrorListener):
def __init__(self):
super(ErrorListener, self).__init__()
self.errored_out = False
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
self.errored_out = True
def sub():
# Parse the input file
input_stream = antlr4.FileStream("c.c")
lexer = CLexer(input_stream)
token_stream = antlr4.CommonTokenStream(lexer)
parser = CParser(token_stream)
errors = ErrorListener()
parser.addErrorListener(errors)
tree = parser.compilationUnit()
def main():
before = datetime.now()
sub()
after = datetime.now()
print(str(after-before))
# before = after
# sub()
# after = datetime.now()
# print(str(after-before))
if __name__ == '__main__':
cProfile.run("main()", sort='tottime') ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1609880862.5517519
antlr4-python3-runtime-4.9.1/test/expr/ 0000755 0000766 0000000 00000000000 00000000000 020221 5 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/expr/ExprLexer.py 0000644 0000766 0000000 00000007505 00000000000 022520 0 ustar 00parrt wheel 0000000 0000000 # Generated from expr/Expr.g4 by ANTLR 4.7.2
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\23")
buf.write("^\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\3\2\3\2")
buf.write("\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3")
buf.write("\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3\r\3\16\3\16")
buf.write("\3\16\3\16\3\16\3\16\3\16\3\17\6\17H\n\17\r\17\16\17I")
buf.write("\3\20\6\20M\n\20\r\20\16\20N\3\21\5\21R\n\21\3\21\3\21")
buf.write("\3\21\3\21\3\22\6\22Y\n\22\r\22\16\22Z\3\22\3\22\2\2\23")
buf.write("\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31")
buf.write("\16\33\17\35\20\37\21!\22#\23\3\2\5\4\2C\\c|\3\2\62;\4")
buf.write("\2\13\13\"\"\2a\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2")
buf.write("\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21")
buf.write("\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3")
buf.write("\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2")
buf.write("\2\2#\3\2\2\2\3%\3\2\2\2\5)\3\2\2\2\7+\3\2\2\2\t-\3\2")
buf.write("\2\2\13/\3\2\2\2\r\61\3\2\2\2\17\63\3\2\2\2\21\65\3\2")
buf.write("\2\2\23\67\3\2\2\2\259\3\2\2\2\27;\3\2\2\2\31=\3\2\2\2")
buf.write("\33?\3\2\2\2\35G\3\2\2\2\37L\3\2\2\2!Q\3\2\2\2#X\3\2\2")
buf.write("\2%&\7f\2\2&\'\7g\2\2\'(\7h\2\2(\4\3\2\2\2)*\7*\2\2*\6")
buf.write("\3\2\2\2+,\7.\2\2,\b\3\2\2\2-.\7+\2\2.\n\3\2\2\2/\60\7")
buf.write("}\2\2\60\f\3\2\2\2\61\62\7\177\2\2\62\16\3\2\2\2\63\64")
buf.write("\7=\2\2\64\20\3\2\2\2\65\66\7?\2\2\66\22\3\2\2\2\678\7")
buf.write(",\2\28\24\3\2\2\29:\7\61\2\2:\26\3\2\2\2;<\7-\2\2<\30")
buf.write("\3\2\2\2=>\7/\2\2>\32\3\2\2\2?@\7t\2\2@A\7g\2\2AB\7v\2")
buf.write("\2BC\7w\2\2CD\7t\2\2DE\7p\2\2E\34\3\2\2\2FH\t\2\2\2GF")
buf.write("\3\2\2\2HI\3\2\2\2IG\3\2\2\2IJ\3\2\2\2J\36\3\2\2\2KM\t")
buf.write("\3\2\2LK\3\2\2\2MN\3\2\2\2NL\3\2\2\2NO\3\2\2\2O \3\2\2")
buf.write("\2PR\7\17\2\2QP\3\2\2\2QR\3\2\2\2RS\3\2\2\2ST\7\f\2\2")
buf.write("TU\3\2\2\2UV\b\21\2\2V\"\3\2\2\2WY\t\4\2\2XW\3\2\2\2Y")
buf.write("Z\3\2\2\2ZX\3\2\2\2Z[\3\2\2\2[\\\3\2\2\2\\]\b\22\2\2]")
buf.write("$\3\2\2\2\7\2INQZ\3\b\2\2")
return buf.getvalue()
class ExprLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
MUL = 9
DIV = 10
ADD = 11
SUB = 12
RETURN = 13
ID = 14
INT = 15
NEWLINE = 16
WS = 17
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "",
"'def'", "'('", "','", "')'", "'{'", "'}'", "';'", "'='", "'*'",
"'/'", "'+'", "'-'", "'return'" ]
symbolicNames = [ "",
"MUL", "DIV", "ADD", "SUB", "RETURN", "ID", "INT", "NEWLINE",
"WS" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "MUL", "DIV", "ADD", "SUB", "RETURN", "ID", "INT",
"NEWLINE", "WS" ]
grammarFileName = "Expr.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/expr/ExprParser.py 0000644 0000766 0000000 00000052664 00000000000 022703 0 ustar 00parrt wheel 0000000 0000000 # Generated from expr/Expr.g4 by ANTLR 4.7.2
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\23")
buf.write("S\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b")
buf.write("\t\b\3\2\6\2\22\n\2\r\2\16\2\23\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\7\3\34\n\3\f\3\16\3\37\13\3\3\3\3\3\3\3\3\4\3\4\6\4")
buf.write("&\n\4\r\4\16\4\'\3\4\3\4\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3")
buf.write("\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6;\n\6\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\7\7F\n\7\f\7\16\7I\13\7\3\b\3\b")
buf.write("\3\b\3\b\3\b\3\b\5\bQ\n\b\3\b\2\3\f\t\2\4\6\b\n\f\16\2")
buf.write("\4\3\2\13\f\3\2\r\16\2U\2\21\3\2\2\2\4\25\3\2\2\2\6#\3")
buf.write("\2\2\2\b+\3\2\2\2\n:\3\2\2\2\f<\3\2\2\2\16P\3\2\2\2\20")
buf.write("\22\5\4\3\2\21\20\3\2\2\2\22\23\3\2\2\2\23\21\3\2\2\2")
buf.write("\23\24\3\2\2\2\24\3\3\2\2\2\25\26\7\3\2\2\26\27\7\20\2")
buf.write("\2\27\30\7\4\2\2\30\35\5\b\5\2\31\32\7\5\2\2\32\34\5\b")
buf.write("\5\2\33\31\3\2\2\2\34\37\3\2\2\2\35\33\3\2\2\2\35\36\3")
buf.write("\2\2\2\36 \3\2\2\2\37\35\3\2\2\2 !\7\6\2\2!\"\5\6\4\2")
buf.write("\"\5\3\2\2\2#%\7\7\2\2$&\5\n\6\2%$\3\2\2\2&\'\3\2\2\2")
buf.write("\'%\3\2\2\2\'(\3\2\2\2()\3\2\2\2)*\7\b\2\2*\7\3\2\2\2")
buf.write("+,\7\20\2\2,\t\3\2\2\2-.\5\f\7\2./\7\t\2\2/;\3\2\2\2\60")
buf.write("\61\7\20\2\2\61\62\7\n\2\2\62\63\5\f\7\2\63\64\7\t\2\2")
buf.write("\64;\3\2\2\2\65\66\7\17\2\2\66\67\5\f\7\2\678\7\t\2\2")
buf.write("8;\3\2\2\29;\7\t\2\2:-\3\2\2\2:\60\3\2\2\2:\65\3\2\2\2")
buf.write(":9\3\2\2\2;\13\3\2\2\2<=\b\7\1\2=>\5\16\b\2>G\3\2\2\2")
buf.write("?@\f\5\2\2@A\t\2\2\2AF\5\f\7\6BC\f\4\2\2CD\t\3\2\2DF\5")
buf.write("\f\7\5E?\3\2\2\2EB\3\2\2\2FI\3\2\2\2GE\3\2\2\2GH\3\2\2")
buf.write("\2H\r\3\2\2\2IG\3\2\2\2JQ\7\21\2\2KQ\7\20\2\2LM\7\4\2")
buf.write("\2MN\5\f\7\2NO\7\6\2\2OQ\3\2\2\2PJ\3\2\2\2PK\3\2\2\2P")
buf.write("L\3\2\2\2Q\17\3\2\2\2\t\23\35\':EGP")
return buf.getvalue()
class ExprParser ( Parser ):
grammarFileName = "Expr.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "", "'def'", "'('", "','", "')'", "'{'", "'}'",
"';'", "'='", "'*'", "'/'", "'+'", "'-'", "'return'" ]
symbolicNames = [ "", "", "", "",
"", "", "", "",
"", "MUL", "DIV", "ADD", "SUB", "RETURN",
"ID", "INT", "NEWLINE", "WS" ]
RULE_prog = 0
RULE_func = 1
RULE_body = 2
RULE_arg = 3
RULE_stat = 4
RULE_expr = 5
RULE_primary = 6
ruleNames = [ "prog", "func", "body", "arg", "stat", "expr", "primary" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
MUL=9
DIV=10
ADD=11
SUB=12
RETURN=13
ID=14
INT=15
NEWLINE=16
WS=17
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ProgContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def func(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ExprParser.FuncContext)
else:
return self.getTypedRuleContext(ExprParser.FuncContext,i)
def getRuleIndex(self):
return ExprParser.RULE_prog
def prog(self):
localctx = ExprParser.ProgContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_prog)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 15
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 14
self.func()
self.state = 17
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==ExprParser.T__0):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FuncContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(ExprParser.ID, 0)
def arg(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ExprParser.ArgContext)
else:
return self.getTypedRuleContext(ExprParser.ArgContext,i)
def body(self):
return self.getTypedRuleContext(ExprParser.BodyContext,0)
def getRuleIndex(self):
return ExprParser.RULE_func
def func(self):
localctx = ExprParser.FuncContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_func)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 19
self.match(ExprParser.T__0)
self.state = 20
self.match(ExprParser.ID)
self.state = 21
self.match(ExprParser.T__1)
self.state = 22
self.arg()
self.state = 27
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ExprParser.T__2:
self.state = 23
self.match(ExprParser.T__2)
self.state = 24
self.arg()
self.state = 29
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 30
self.match(ExprParser.T__3)
self.state = 31
self.body()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BodyContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def stat(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ExprParser.StatContext)
else:
return self.getTypedRuleContext(ExprParser.StatContext,i)
def getRuleIndex(self):
return ExprParser.RULE_body
def body(self):
localctx = ExprParser.BodyContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_body)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 33
self.match(ExprParser.T__4)
self.state = 35
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 34
self.stat()
self.state = 37
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ExprParser.T__1) | (1 << ExprParser.T__6) | (1 << ExprParser.RETURN) | (1 << ExprParser.ID) | (1 << ExprParser.INT))) != 0)):
break
self.state = 39
self.match(ExprParser.T__5)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArgContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(ExprParser.ID, 0)
def getRuleIndex(self):
return ExprParser.RULE_arg
def arg(self):
localctx = ExprParser.ArgContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_arg)
try:
self.enterOuterAlt(localctx, 1)
self.state = 41
self.match(ExprParser.ID)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ExprParser.RULE_stat
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class RetContext(StatContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ExprParser.StatContext
super().__init__(parser)
self.copyFrom(ctx)
def RETURN(self):
return self.getToken(ExprParser.RETURN, 0)
def expr(self):
return self.getTypedRuleContext(ExprParser.ExprContext,0)
class BlankContext(StatContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ExprParser.StatContext
super().__init__(parser)
self.copyFrom(ctx)
class PrintExprContext(StatContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ExprParser.StatContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(ExprParser.ExprContext,0)
class AssignContext(StatContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ExprParser.StatContext
super().__init__(parser)
self.copyFrom(ctx)
def ID(self):
return self.getToken(ExprParser.ID, 0)
def expr(self):
return self.getTypedRuleContext(ExprParser.ExprContext,0)
def stat(self):
localctx = ExprParser.StatContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_stat)
try:
self.state = 56
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,3,self._ctx)
if la_ == 1:
localctx = ExprParser.PrintExprContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 43
self.expr(0)
self.state = 44
self.match(ExprParser.T__6)
pass
elif la_ == 2:
localctx = ExprParser.AssignContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 46
self.match(ExprParser.ID)
self.state = 47
self.match(ExprParser.T__7)
self.state = 48
self.expr(0)
self.state = 49
self.match(ExprParser.T__6)
pass
elif la_ == 3:
localctx = ExprParser.RetContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 51
self.match(ExprParser.RETURN)
self.state = 52
self.expr(0)
self.state = 53
self.match(ExprParser.T__6)
pass
elif la_ == 4:
localctx = ExprParser.BlankContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 55
self.match(ExprParser.T__6)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ExprParser.RULE_expr
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class PrimContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ExprParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def primary(self):
return self.getTypedRuleContext(ExprParser.PrimaryContext,0)
class MulDivContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ExprParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ExprParser.ExprContext)
else:
return self.getTypedRuleContext(ExprParser.ExprContext,i)
def MUL(self):
return self.getToken(ExprParser.MUL, 0)
def DIV(self):
return self.getToken(ExprParser.DIV, 0)
class AddSubContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ExprParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ExprParser.ExprContext)
else:
return self.getTypedRuleContext(ExprParser.ExprContext,i)
def ADD(self):
return self.getToken(ExprParser.ADD, 0)
def SUB(self):
return self.getToken(ExprParser.SUB, 0)
def expr(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = ExprParser.ExprContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 10
self.enterRecursionRule(localctx, 10, self.RULE_expr, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
localctx = ExprParser.PrimContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 59
self.primary()
self._ctx.stop = self._input.LT(-1)
self.state = 69
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,5,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 67
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
localctx = ExprParser.MulDivContext(self, ExprParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 61
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 62
_la = self._input.LA(1)
if not(_la==ExprParser.MUL or _la==ExprParser.DIV):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 63
self.expr(4)
pass
elif la_ == 2:
localctx = ExprParser.AddSubContext(self, ExprParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 64
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 65
_la = self._input.LA(1)
if not(_la==ExprParser.ADD or _la==ExprParser.SUB):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 66
self.expr(3)
pass
self.state = 71
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,5,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class PrimaryContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ExprParser.RULE_primary
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ParensContext(PrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ExprParser.PrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(ExprParser.ExprContext,0)
class IdContext(PrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ExprParser.PrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def ID(self):
return self.getToken(ExprParser.ID, 0)
class IntContext(PrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ExprParser.PrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def INT(self):
return self.getToken(ExprParser.INT, 0)
def primary(self):
localctx = ExprParser.PrimaryContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_primary)
try:
self.state = 78
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ExprParser.INT]:
localctx = ExprParser.IntContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 72
self.match(ExprParser.INT)
pass
elif token in [ExprParser.ID]:
localctx = ExprParser.IdContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 73
self.match(ExprParser.ID)
pass
elif token in [ExprParser.T__1]:
localctx = ExprParser.ParensContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 74
self.match(ExprParser.T__1)
self.state = 75
self.expr(0)
self.state = 76
self.match(ExprParser.T__3)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[5] = self.expr_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def expr_sempred(self, localctx:ExprContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 3)
if predIndex == 1:
return self.precpred(self._ctx, 2)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1609880862.5531392
antlr4-python3-runtime-4.9.1/test/mocks/ 0000755 0000766 0000000 00000000000 00000000000 020357 5 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/mocks/TestLexer.py 0000644 0000766 0000000 00000006701 00000000000 022654 0 ustar 00parrt wheel 0000000 0000000 # Generated from /Users/lyga/Dropbox/code/python/antlr4-learn/test_grammar/T.g4 by ANTLR 4.5.3
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2")
buf.write(u"\5\17\b\1\4\2\t\2\4\3\t\3\4\4\t\4\3\2\3\2\3\3\3\3\3\4")
buf.write(u"\3\4\2\2\5\3\3\5\4\7\5\3\2\2\16\2\3\3\2\2\2\2\5\3\2\2")
buf.write(u"\2\2\7\3\2\2\2\3\t\3\2\2\2\5\13\3\2\2\2\7\r\3\2\2\2\t")
buf.write(u"\n\7c\2\2\n\4\3\2\2\2\13\f\7d\2\2\f\6\3\2\2\2\r\16\7")
buf.write(u"e\2\2\16\b\3\2\2\2\3\2\2")
return buf.getvalue()
class TestLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]
A = 1
B = 2
C = 3
modeNames = [u"DEFAULT_MODE"]
literalNames = [u"",
u"'a'", u"'b'", u"'c'"]
symbolicNames = [u"",
u"A", u"B", u"C"]
ruleNames = [u"A", u"B", u"C"]
grammarFileName = u"T.g4"
def __init__(self, input=None):
super(TestLexer, self).__init__(input)
self.checkVersion("4.9")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
def serializedATN2():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2")
buf.write(u"\t(\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write(u"\7\4\b\t\b\3\2\6\2\23\n\2\r\2\16\2\24\3\3\6\3\30\n\3")
buf.write(u"\r\3\16\3\31\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\6\b")
buf.write(u"%\n\b\r\b\16\b&\2\2\t\3\3\5\4\7\5\t\6\13\7\r\b\17\t\3")
buf.write(u"\2\2*\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2")
buf.write(u"\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\3\22\3\2\2\2\5")
buf.write(u"\27\3\2\2\2\7\33\3\2\2\2\t\35\3\2\2\2\13\37\3\2\2\2\r")
buf.write(u"!\3\2\2\2\17$\3\2\2\2\21\23\4c|\2\22\21\3\2\2\2\23\24")
buf.write(u"\3\2\2\2\24\22\3\2\2\2\24\25\3\2\2\2\25\4\3\2\2\2\26")
buf.write(u"\30\4\62;\2\27\26\3\2\2\2\30\31\3\2\2\2\31\27\3\2\2\2")
buf.write(u"\31\32\3\2\2\2\32\6\3\2\2\2\33\34\7=\2\2\34\b\3\2\2\2")
buf.write(u"\35\36\7?\2\2\36\n\3\2\2\2\37 \7-\2\2 \f\3\2\2\2!\"\7")
buf.write(u",\2\2\"\16\3\2\2\2#%\7\"\2\2$#\3\2\2\2%&\3\2\2\2&$\3")
buf.write(u"\2\2\2&\'\3\2\2\2\'\20\3\2\2\2\6\2\24\31&\2")
return buf.getvalue()
class TestLexer2(Lexer):
atn = ATNDeserializer().deserialize(serializedATN2())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
ID = 1
INT = 2
SEMI = 3
ASSIGN = 4
PLUS = 5
MULT = 6
WS = 7
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"",
u"';'", u"'='", u"'+'", u"'*'" ]
symbolicNames = [ u"",
u"ID", u"INT", u"SEMI", u"ASSIGN", u"PLUS", u"MULT", u"WS" ]
ruleNames = [ u"ID", u"INT", u"SEMI", u"ASSIGN", u"PLUS", u"MULT", u"WS" ]
grammarFileName = u"T2.g4"
def __init__(self, input=None):
super(TestLexer2, self).__init__(input)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/mocks/__init__.py 0000644 0000766 0000000 00000000000 00000000000 022456 0 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1609880862.5554566
antlr4-python3-runtime-4.9.1/test/parser/ 0000755 0000766 0000000 00000000000 00000000000 020537 5 ustar 00parrt wheel 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/parser/__init__.py 0000644 0000766 0000000 00000000034 00000000000 022645 0 ustar 00parrt wheel 0000000 0000000 __author__ = 'ericvergnaud'
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/parser/clexer.py 0000644 0000766 0000000 00000147116 00000000000 022405 0 ustar 00parrt wheel 0000000 0000000 # Generated from C.bnf by ANTLR 4.5.1
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2s")
buf.write("\u04e7\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\4\u0089\t\u0089\4\u008a\t\u008a")
buf.write("\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d\4\u008e")
buf.write("\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091")
buf.write("\4\u0092\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095")
buf.write("\t\u0095\4\u0096\t\u0096\4\u0097\t\u0097\4\u0098\t\u0098")
buf.write("\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3")
buf.write("\2\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\5\3\5\3\5\3\5")
buf.write("\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3")
buf.write("\t\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3")
buf.write("\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\f\3\f\3")
buf.write("\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r")
buf.write("\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3")
buf.write("\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17")
buf.write("\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21")
buf.write("\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23")
buf.write("\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25")
buf.write("\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30")
buf.write("\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32")
buf.write("\3\32\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34")
buf.write("\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36")
buf.write("\3\36\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!")
buf.write("\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3#\3$\3$")
buf.write("\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%\3%\3&\3&\3&\3")
buf.write("&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(")
buf.write("\3(\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3*\3+\3+\3")
buf.write("+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3")
buf.write("-\3.\3.\3.\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3\60\3\60")
buf.write("\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61")
buf.write("\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62")
buf.write("\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65\3\65\3\65\3\65")
buf.write("\3\65\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\67")
buf.write("\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\38\38\38\38\3")
buf.write("8\38\38\38\38\38\38\39\39\39\39\39\39\39\39\39\39\3:\3")
buf.write(":\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3")
buf.write(";\3;\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3=\3=\3>\3>\3?\3?\3")
buf.write("@\3@\3A\3A\3B\3B\3C\3C\3C\3D\3D\3E\3E\3E\3F\3F\3F\3G\3")
buf.write("G\3G\3H\3H\3I\3I\3I\3J\3J\3K\3K\3K\3L\3L\3M\3M\3N\3N\3")
buf.write("O\3O\3P\3P\3Q\3Q\3Q\3R\3R\3R\3S\3S\3T\3T\3U\3U\3V\3V\3")
buf.write("W\3W\3X\3X\3Y\3Y\3Z\3Z\3[\3[\3[\3\\\3\\\3\\\3]\3]\3]\3")
buf.write("^\3^\3^\3_\3_\3_\3`\3`\3`\3`\3a\3a\3a\3a\3b\3b\3b\3c\3")
buf.write("c\3c\3d\3d\3d\3e\3e\3e\3f\3f\3f\3g\3g\3g\3h\3h\3i\3i\3")
buf.write("i\3i\3j\3j\3j\7j\u0381\nj\fj\16j\u0384\13j\3k\3k\5k\u0388")
buf.write("\nk\3l\3l\3m\3m\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\5n\u0398")
buf.write("\nn\3o\3o\3o\3o\3o\3p\3p\3p\5p\u03a2\np\3q\3q\5q\u03a6")
buf.write("\nq\3q\3q\5q\u03aa\nq\3q\3q\5q\u03ae\nq\5q\u03b0\nq\3")
buf.write("r\3r\7r\u03b4\nr\fr\16r\u03b7\13r\3s\3s\7s\u03bb\ns\f")
buf.write("s\16s\u03be\13s\3t\3t\6t\u03c2\nt\rt\16t\u03c3\3u\3u\3")
buf.write("u\3v\3v\3w\3w\3x\3x\3y\3y\5y\u03d1\ny\3y\3y\3y\3y\3y\5")
buf.write("y\u03d8\ny\3y\3y\5y\u03dc\ny\5y\u03de\ny\3z\3z\3{\3{\3")
buf.write("|\3|\3|\3|\5|\u03e8\n|\3}\3}\5}\u03ec\n}\3~\3~\5~\u03f0")
buf.write("\n~\3~\5~\u03f3\n~\3~\3~\3~\5~\u03f8\n~\5~\u03fa\n~\3")
buf.write("\177\3\177\3\177\3\177\5\177\u0400\n\177\3\177\3\177\3")
buf.write("\177\3\177\5\177\u0406\n\177\5\177\u0408\n\177\3\u0080")
buf.write("\5\u0080\u040b\n\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\5\u0080\u0412\n\u0080\3\u0081\3\u0081\5\u0081")
buf.write("\u0416\n\u0081\3\u0081\3\u0081\3\u0081\5\u0081\u041b\n")
buf.write("\u0081\3\u0081\5\u0081\u041e\n\u0081\3\u0082\3\u0082\3")
buf.write("\u0083\6\u0083\u0423\n\u0083\r\u0083\16\u0083\u0424\3")
buf.write("\u0084\5\u0084\u0428\n\u0084\3\u0084\3\u0084\3\u0084\3")
buf.write("\u0084\3\u0084\5\u0084\u042f\n\u0084\3\u0085\3\u0085\5")
buf.write("\u0085\u0433\n\u0085\3\u0085\3\u0085\3\u0085\5\u0085\u0438")
buf.write("\n\u0085\3\u0085\5\u0085\u043b\n\u0085\3\u0086\6\u0086")
buf.write("\u043e\n\u0086\r\u0086\16\u0086\u043f\3\u0087\3\u0087")
buf.write("\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write("\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write("\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write("\3\u0088\5\u0088\u045a\n\u0088\3\u0089\6\u0089\u045d\n")
buf.write("\u0089\r\u0089\16\u0089\u045e\3\u008a\3\u008a\5\u008a")
buf.write("\u0463\n\u008a\3\u008b\3\u008b\3\u008b\3\u008b\5\u008b")
buf.write("\u0469\n\u008b\3\u008c\3\u008c\3\u008c\3\u008d\3\u008d")
buf.write("\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d")
buf.write("\3\u008d\3\u008d\5\u008d\u0479\n\u008d\3\u008e\3\u008e")
buf.write("\3\u008e\3\u008e\6\u008e\u047f\n\u008e\r\u008e\16\u008e")
buf.write("\u0480\3\u008f\5\u008f\u0484\n\u008f\3\u008f\3\u008f\5")
buf.write("\u008f\u0488\n\u008f\3\u008f\3\u008f\3\u0090\3\u0090\3")
buf.write("\u0090\5\u0090\u048f\n\u0090\3\u0091\6\u0091\u0492\n\u0091")
buf.write("\r\u0091\16\u0091\u0493\3\u0092\3\u0092\5\u0092\u0498")
buf.write("\n\u0092\3\u0093\3\u0093\5\u0093\u049c\n\u0093\3\u0093")
buf.write("\3\u0093\5\u0093\u04a0\n\u0093\3\u0093\3\u0093\7\u0093")
buf.write("\u04a4\n\u0093\f\u0093\16\u0093\u04a7\13\u0093\3\u0093")
buf.write("\3\u0093\3\u0094\3\u0094\5\u0094\u04ad\n\u0094\3\u0094")
buf.write("\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094")
buf.write("\3\u0094\7\u0094\u04b8\n\u0094\f\u0094\16\u0094\u04bb")
buf.write("\13\u0094\3\u0094\3\u0094\3\u0095\6\u0095\u04c0\n\u0095")
buf.write("\r\u0095\16\u0095\u04c1\3\u0095\3\u0095\3\u0096\3\u0096")
buf.write("\5\u0096\u04c8\n\u0096\3\u0096\5\u0096\u04cb\n\u0096\3")
buf.write("\u0096\3\u0096\3\u0097\3\u0097\3\u0097\3\u0097\7\u0097")
buf.write("\u04d3\n\u0097\f\u0097\16\u0097\u04d6\13\u0097\3\u0097")
buf.write("\3\u0097\3\u0097\3\u0097\3\u0097\3\u0098\3\u0098\3\u0098")
buf.write("\3\u0098\7\u0098\u04e1\n\u0098\f\u0098\16\u0098\u04e4")
buf.write("\13\u0098\3\u0098\3\u0098\3\u04d4\2\u0099\3\3\5\4\7\5")
buf.write("\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35")
buf.write("\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33")
buf.write("\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[")
buf.write("/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u{?}@\177")
buf.write("A\u0081B\u0083C\u0085D\u0087E\u0089F\u008bG\u008dH\u008f")
buf.write("I\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009dP\u009f")
buf.write("Q\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00af")
buf.write("Y\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bf")
buf.write("a\u00c1b\u00c3c\u00c5d\u00c7e\u00c9f\u00cbg\u00cdh\u00cf")
buf.write("i\u00d1j\u00d3k\u00d5\2\u00d7\2\u00d9\2\u00db\2\u00dd")
buf.write("\2\u00dfl\u00e1\2\u00e3\2\u00e5\2\u00e7\2\u00e9\2\u00eb")
buf.write("\2\u00ed\2\u00ef\2\u00f1\2\u00f3\2\u00f5\2\u00f7\2\u00f9")
buf.write("\2\u00fb\2\u00fd\2\u00ff\2\u0101\2\u0103\2\u0105\2\u0107")
buf.write("\2\u0109\2\u010b\2\u010d\2\u010f\2\u0111\2\u0113\2\u0115")
buf.write("\2\u0117\2\u0119\2\u011b\2\u011dm\u011f\2\u0121\2\u0123")
buf.write("\2\u0125n\u0127o\u0129p\u012bq\u012dr\u012fs\3\2\22\5")
buf.write("\2C\\aac|\3\2\62;\4\2ZZzz\3\2\63;\3\2\629\5\2\62;CHch")
buf.write("\4\2WWww\4\2NNnn\4\2--//\6\2HHNNhhnn\6\2\f\f\17\17))^")
buf.write("^\f\2$$))AA^^cdhhppttvvxx\5\2NNWWww\6\2\f\f\17\17$$^^")
buf.write("\4\2\f\f\17\17\4\2\13\13\"\"\u0503\2\3\3\2\2\2\2\5\3\2")
buf.write("\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2")
buf.write("\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2")
buf.write("\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37")
buf.write("\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2")
buf.write("\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2")
buf.write("\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2")
buf.write("\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2")
buf.write("\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2")
buf.write("\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3")
buf.write("\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a")
buf.write("\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2")
buf.write("k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2")
buf.write("\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2")
buf.write("\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2\u0085")
buf.write("\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b\3\2\2")
buf.write("\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093")
buf.write("\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2")
buf.write("\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1")
buf.write("\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2")
buf.write("\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af")
buf.write("\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2")
buf.write("\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd")
buf.write("\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2")
buf.write("\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb")
buf.write("\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2")
buf.write("\2\2\u00d3\3\2\2\2\2\u00df\3\2\2\2\2\u011d\3\2\2\2\2\u0125")
buf.write("\3\2\2\2\2\u0127\3\2\2\2\2\u0129\3\2\2\2\2\u012b\3\2\2")
buf.write("\2\2\u012d\3\2\2\2\2\u012f\3\2\2\2\3\u0131\3\2\2\2\5\u013f")
buf.write("\3\2\2\2\7\u0150\3\2\2\2\t\u0163\3\2\2\2\13\u016a\3\2")
buf.write("\2\2\r\u0172\3\2\2\2\17\u017a\3\2\2\2\21\u0185\3\2\2\2")
buf.write("\23\u0190\3\2\2\2\25\u019a\3\2\2\2\27\u01a5\3\2\2\2\31")
buf.write("\u01ab\3\2\2\2\33\u01b9\3\2\2\2\35\u01c1\3\2\2\2\37\u01ce")
buf.write("\3\2\2\2!\u01d3\3\2\2\2#\u01d9\3\2\2\2%\u01de\3\2\2\2")
buf.write("\'\u01e3\3\2\2\2)\u01e9\3\2\2\2+\u01f2\3\2\2\2-\u01fa")
buf.write("\3\2\2\2/\u01fd\3\2\2\2\61\u0204\3\2\2\2\63\u0209\3\2")
buf.write("\2\2\65\u020e\3\2\2\2\67\u0215\3\2\2\29\u021b\3\2\2\2")
buf.write(";\u021f\3\2\2\2=\u0224\3\2\2\2?\u0227\3\2\2\2A\u022e\3")
buf.write("\2\2\2C\u0232\3\2\2\2E\u0237\3\2\2\2G\u0240\3\2\2\2I\u0249")
buf.write("\3\2\2\2K\u0250\3\2\2\2M\u0256\3\2\2\2O\u025d\3\2\2\2")
buf.write("Q\u0264\3\2\2\2S\u026b\3\2\2\2U\u0272\3\2\2\2W\u0279\3")
buf.write("\2\2\2Y\u0281\3\2\2\2[\u0287\3\2\2\2]\u0290\3\2\2\2_\u0295")
buf.write("\3\2\2\2a\u029e\3\2\2\2c\u02a4\3\2\2\2e\u02ad\3\2\2\2")
buf.write("g\u02b6\3\2\2\2i\u02be\3\2\2\2k\u02c4\3\2\2\2m\u02cd\3")
buf.write("\2\2\2o\u02d6\3\2\2\2q\u02e1\3\2\2\2s\u02eb\3\2\2\2u\u02fa")
buf.write("\3\2\2\2w\u0308\3\2\2\2y\u030a\3\2\2\2{\u030c\3\2\2\2")
buf.write("}\u030e\3\2\2\2\177\u0310\3\2\2\2\u0081\u0312\3\2\2\2")
buf.write("\u0083\u0314\3\2\2\2\u0085\u0316\3\2\2\2\u0087\u0319\3")
buf.write("\2\2\2\u0089\u031b\3\2\2\2\u008b\u031e\3\2\2\2\u008d\u0321")
buf.write("\3\2\2\2\u008f\u0324\3\2\2\2\u0091\u0326\3\2\2\2\u0093")
buf.write("\u0329\3\2\2\2\u0095\u032b\3\2\2\2\u0097\u032e\3\2\2\2")
buf.write("\u0099\u0330\3\2\2\2\u009b\u0332\3\2\2\2\u009d\u0334\3")
buf.write("\2\2\2\u009f\u0336\3\2\2\2\u00a1\u0338\3\2\2\2\u00a3\u033b")
buf.write("\3\2\2\2\u00a5\u033e\3\2\2\2\u00a7\u0340\3\2\2\2\u00a9")
buf.write("\u0342\3\2\2\2\u00ab\u0344\3\2\2\2\u00ad\u0346\3\2\2\2")
buf.write("\u00af\u0348\3\2\2\2\u00b1\u034a\3\2\2\2\u00b3\u034c\3")
buf.write("\2\2\2\u00b5\u034e\3\2\2\2\u00b7\u0351\3\2\2\2\u00b9\u0354")
buf.write("\3\2\2\2\u00bb\u0357\3\2\2\2\u00bd\u035a\3\2\2\2\u00bf")
buf.write("\u035d\3\2\2\2\u00c1\u0361\3\2\2\2\u00c3\u0365\3\2\2\2")
buf.write("\u00c5\u0368\3\2\2\2\u00c7\u036b\3\2\2\2\u00c9\u036e\3")
buf.write("\2\2\2\u00cb\u0371\3\2\2\2\u00cd\u0374\3\2\2\2\u00cf\u0377")
buf.write("\3\2\2\2\u00d1\u0379\3\2\2\2\u00d3\u037d\3\2\2\2\u00d5")
buf.write("\u0387\3\2\2\2\u00d7\u0389\3\2\2\2\u00d9\u038b\3\2\2\2")
buf.write("\u00db\u0397\3\2\2\2\u00dd\u0399\3\2\2\2\u00df\u03a1\3")
buf.write("\2\2\2\u00e1\u03af\3\2\2\2\u00e3\u03b1\3\2\2\2\u00e5\u03b8")
buf.write("\3\2\2\2\u00e7\u03bf\3\2\2\2\u00e9\u03c5\3\2\2\2\u00eb")
buf.write("\u03c8\3\2\2\2\u00ed\u03ca\3\2\2\2\u00ef\u03cc\3\2\2\2")
buf.write("\u00f1\u03dd\3\2\2\2\u00f3\u03df\3\2\2\2\u00f5\u03e1\3")
buf.write("\2\2\2\u00f7\u03e7\3\2\2\2\u00f9\u03eb\3\2\2\2\u00fb\u03f9")
buf.write("\3\2\2\2\u00fd\u0407\3\2\2\2\u00ff\u0411\3\2\2\2\u0101")
buf.write("\u041d\3\2\2\2\u0103\u041f\3\2\2\2\u0105\u0422\3\2\2\2")
buf.write("\u0107\u042e\3\2\2\2\u0109\u043a\3\2\2\2\u010b\u043d\3")
buf.write("\2\2\2\u010d\u0441\3\2\2\2\u010f\u0459\3\2\2\2\u0111\u045c")
buf.write("\3\2\2\2\u0113\u0462\3\2\2\2\u0115\u0468\3\2\2\2\u0117")
buf.write("\u046a\3\2\2\2\u0119\u0478\3\2\2\2\u011b\u047a\3\2\2\2")
buf.write("\u011d\u0483\3\2\2\2\u011f\u048e\3\2\2\2\u0121\u0491\3")
buf.write("\2\2\2\u0123\u0497\3\2\2\2\u0125\u0499\3\2\2\2\u0127\u04aa")
buf.write("\3\2\2\2\u0129\u04bf\3\2\2\2\u012b\u04ca\3\2\2\2\u012d")
buf.write("\u04ce\3\2\2\2\u012f\u04dc\3\2\2\2\u0131\u0132\7a\2\2")
buf.write("\u0132\u0133\7a\2\2\u0133\u0134\7g\2\2\u0134\u0135\7z")
buf.write("\2\2\u0135\u0136\7v\2\2\u0136\u0137\7g\2\2\u0137\u0138")
buf.write("\7p\2\2\u0138\u0139\7u\2\2\u0139\u013a\7k\2\2\u013a\u013b")
buf.write("\7q\2\2\u013b\u013c\7p\2\2\u013c\u013d\7a\2\2\u013d\u013e")
buf.write("\7a\2\2\u013e\4\3\2\2\2\u013f\u0140\7a\2\2\u0140\u0141")
buf.write("\7a\2\2\u0141\u0142\7d\2\2\u0142\u0143\7w\2\2\u0143\u0144")
buf.write("\7k\2\2\u0144\u0145\7n\2\2\u0145\u0146\7v\2\2\u0146\u0147")
buf.write("\7k\2\2\u0147\u0148\7p\2\2\u0148\u0149\7a\2\2\u0149\u014a")
buf.write("\7x\2\2\u014a\u014b\7c\2\2\u014b\u014c\7a\2\2\u014c\u014d")
buf.write("\7c\2\2\u014d\u014e\7t\2\2\u014e\u014f\7i\2\2\u014f\6")
buf.write("\3\2\2\2\u0150\u0151\7a\2\2\u0151\u0152\7a\2\2\u0152\u0153")
buf.write("\7d\2\2\u0153\u0154\7w\2\2\u0154\u0155\7k\2\2\u0155\u0156")
buf.write("\7n\2\2\u0156\u0157\7v\2\2\u0157\u0158\7k\2\2\u0158\u0159")
buf.write("\7p\2\2\u0159\u015a\7a\2\2\u015a\u015b\7q\2\2\u015b\u015c")
buf.write("\7h\2\2\u015c\u015d\7h\2\2\u015d\u015e\7u\2\2\u015e\u015f")
buf.write("\7g\2\2\u015f\u0160\7v\2\2\u0160\u0161\7q\2\2\u0161\u0162")
buf.write("\7h\2\2\u0162\b\3\2\2\2\u0163\u0164\7a\2\2\u0164\u0165")
buf.write("\7a\2\2\u0165\u0166\7o\2\2\u0166\u0167\7\63\2\2\u0167")
buf.write("\u0168\7\64\2\2\u0168\u0169\7:\2\2\u0169\n\3\2\2\2\u016a")
buf.write("\u016b\7a\2\2\u016b\u016c\7a\2\2\u016c\u016d\7o\2\2\u016d")
buf.write("\u016e\7\63\2\2\u016e\u016f\7\64\2\2\u016f\u0170\7:\2")
buf.write("\2\u0170\u0171\7f\2\2\u0171\f\3\2\2\2\u0172\u0173\7a\2")
buf.write("\2\u0173\u0174\7a\2\2\u0174\u0175\7o\2\2\u0175\u0176\7")
buf.write("\63\2\2\u0176\u0177\7\64\2\2\u0177\u0178\7:\2\2\u0178")
buf.write("\u0179\7k\2\2\u0179\16\3\2\2\2\u017a\u017b\7a\2\2\u017b")
buf.write("\u017c\7a\2\2\u017c\u017d\7v\2\2\u017d\u017e\7{\2\2\u017e")
buf.write("\u017f\7r\2\2\u017f\u0180\7g\2\2\u0180\u0181\7q\2\2\u0181")
buf.write("\u0182\7h\2\2\u0182\u0183\7a\2\2\u0183\u0184\7a\2\2\u0184")
buf.write("\20\3\2\2\2\u0185\u0186\7a\2\2\u0186\u0187\7a\2\2\u0187")
buf.write("\u0188\7k\2\2\u0188\u0189\7p\2\2\u0189\u018a\7n\2\2\u018a")
buf.write("\u018b\7k\2\2\u018b\u018c\7p\2\2\u018c\u018d\7g\2\2\u018d")
buf.write("\u018e\7a\2\2\u018e\u018f\7a\2\2\u018f\22\3\2\2\2\u0190")
buf.write("\u0191\7a\2\2\u0191\u0192\7a\2\2\u0192\u0193\7u\2\2\u0193")
buf.write("\u0194\7v\2\2\u0194\u0195\7f\2\2\u0195\u0196\7e\2\2\u0196")
buf.write("\u0197\7c\2\2\u0197\u0198\7n\2\2\u0198\u0199\7n\2\2\u0199")
buf.write("\24\3\2\2\2\u019a\u019b\7a\2\2\u019b\u019c\7a\2\2\u019c")
buf.write("\u019d\7f\2\2\u019d\u019e\7g\2\2\u019e\u019f\7e\2\2\u019f")
buf.write("\u01a0\7n\2\2\u01a0\u01a1\7u\2\2\u01a1\u01a2\7r\2\2\u01a2")
buf.write("\u01a3\7g\2\2\u01a3\u01a4\7e\2\2\u01a4\26\3\2\2\2\u01a5")
buf.write("\u01a6\7a\2\2\u01a6\u01a7\7a\2\2\u01a7\u01a8\7c\2\2\u01a8")
buf.write("\u01a9\7u\2\2\u01a9\u01aa\7o\2\2\u01aa\30\3\2\2\2\u01ab")
buf.write("\u01ac\7a\2\2\u01ac\u01ad\7a\2\2\u01ad\u01ae\7c\2\2\u01ae")
buf.write("\u01af\7v\2\2\u01af\u01b0\7v\2\2\u01b0\u01b1\7t\2\2\u01b1")
buf.write("\u01b2\7k\2\2\u01b2\u01b3\7d\2\2\u01b3\u01b4\7w\2\2\u01b4")
buf.write("\u01b5\7v\2\2\u01b5\u01b6\7g\2\2\u01b6\u01b7\7a\2\2\u01b7")
buf.write("\u01b8\7a\2\2\u01b8\32\3\2\2\2\u01b9\u01ba\7a\2\2\u01ba")
buf.write("\u01bb\7a\2\2\u01bb\u01bc\7c\2\2\u01bc\u01bd\7u\2\2\u01bd")
buf.write("\u01be\7o\2\2\u01be\u01bf\7a\2\2\u01bf\u01c0\7a\2\2\u01c0")
buf.write("\34\3\2\2\2\u01c1\u01c2\7a\2\2\u01c2\u01c3\7a\2\2\u01c3")
buf.write("\u01c4\7x\2\2\u01c4\u01c5\7q\2\2\u01c5\u01c6\7n\2\2\u01c6")
buf.write("\u01c7\7c\2\2\u01c7\u01c8\7v\2\2\u01c8\u01c9\7k\2\2\u01c9")
buf.write("\u01ca\7n\2\2\u01ca\u01cb\7g\2\2\u01cb\u01cc\7a\2\2\u01cc")
buf.write("\u01cd\7a\2\2\u01cd\36\3\2\2\2\u01ce\u01cf\7c\2\2\u01cf")
buf.write("\u01d0\7w\2\2\u01d0\u01d1\7v\2\2\u01d1\u01d2\7q\2\2\u01d2")
buf.write(" \3\2\2\2\u01d3\u01d4\7d\2\2\u01d4\u01d5\7t\2\2\u01d5")
buf.write("\u01d6\7g\2\2\u01d6\u01d7\7c\2\2\u01d7\u01d8\7m\2\2\u01d8")
buf.write("\"\3\2\2\2\u01d9\u01da\7e\2\2\u01da\u01db\7c\2\2\u01db")
buf.write("\u01dc\7u\2\2\u01dc\u01dd\7g\2\2\u01dd$\3\2\2\2\u01de")
buf.write("\u01df\7e\2\2\u01df\u01e0\7j\2\2\u01e0\u01e1\7c\2\2\u01e1")
buf.write("\u01e2\7t\2\2\u01e2&\3\2\2\2\u01e3\u01e4\7e\2\2\u01e4")
buf.write("\u01e5\7q\2\2\u01e5\u01e6\7p\2\2\u01e6\u01e7\7u\2\2\u01e7")
buf.write("\u01e8\7v\2\2\u01e8(\3\2\2\2\u01e9\u01ea\7e\2\2\u01ea")
buf.write("\u01eb\7q\2\2\u01eb\u01ec\7p\2\2\u01ec\u01ed\7v\2\2\u01ed")
buf.write("\u01ee\7k\2\2\u01ee\u01ef\7p\2\2\u01ef\u01f0\7w\2\2\u01f0")
buf.write("\u01f1\7g\2\2\u01f1*\3\2\2\2\u01f2\u01f3\7f\2\2\u01f3")
buf.write("\u01f4\7g\2\2\u01f4\u01f5\7h\2\2\u01f5\u01f6\7c\2\2\u01f6")
buf.write("\u01f7\7w\2\2\u01f7\u01f8\7n\2\2\u01f8\u01f9\7v\2\2\u01f9")
buf.write(",\3\2\2\2\u01fa\u01fb\7f\2\2\u01fb\u01fc\7q\2\2\u01fc")
buf.write(".\3\2\2\2\u01fd\u01fe\7f\2\2\u01fe\u01ff\7q\2\2\u01ff")
buf.write("\u0200\7w\2\2\u0200\u0201\7d\2\2\u0201\u0202\7n\2\2\u0202")
buf.write("\u0203\7g\2\2\u0203\60\3\2\2\2\u0204\u0205\7g\2\2\u0205")
buf.write("\u0206\7n\2\2\u0206\u0207\7u\2\2\u0207\u0208\7g\2\2\u0208")
buf.write("\62\3\2\2\2\u0209\u020a\7g\2\2\u020a\u020b\7p\2\2\u020b")
buf.write("\u020c\7w\2\2\u020c\u020d\7o\2\2\u020d\64\3\2\2\2\u020e")
buf.write("\u020f\7g\2\2\u020f\u0210\7z\2\2\u0210\u0211\7v\2\2\u0211")
buf.write("\u0212\7g\2\2\u0212\u0213\7t\2\2\u0213\u0214\7p\2\2\u0214")
buf.write("\66\3\2\2\2\u0215\u0216\7h\2\2\u0216\u0217\7n\2\2\u0217")
buf.write("\u0218\7q\2\2\u0218\u0219\7c\2\2\u0219\u021a\7v\2\2\u021a")
buf.write("8\3\2\2\2\u021b\u021c\7h\2\2\u021c\u021d\7q\2\2\u021d")
buf.write("\u021e\7t\2\2\u021e:\3\2\2\2\u021f\u0220\7i\2\2\u0220")
buf.write("\u0221\7q\2\2\u0221\u0222\7v\2\2\u0222\u0223\7q\2\2\u0223")
buf.write("<\3\2\2\2\u0224\u0225\7k\2\2\u0225\u0226\7h\2\2\u0226")
buf.write(">\3\2\2\2\u0227\u0228\7k\2\2\u0228\u0229\7p\2\2\u0229")
buf.write("\u022a\7n\2\2\u022a\u022b\7k\2\2\u022b\u022c\7p\2\2\u022c")
buf.write("\u022d\7g\2\2\u022d@\3\2\2\2\u022e\u022f\7k\2\2\u022f")
buf.write("\u0230\7p\2\2\u0230\u0231\7v\2\2\u0231B\3\2\2\2\u0232")
buf.write("\u0233\7n\2\2\u0233\u0234\7q\2\2\u0234\u0235\7p\2\2\u0235")
buf.write("\u0236\7i\2\2\u0236D\3\2\2\2\u0237\u0238\7t\2\2\u0238")
buf.write("\u0239\7g\2\2\u0239\u023a\7i\2\2\u023a\u023b\7k\2\2\u023b")
buf.write("\u023c\7u\2\2\u023c\u023d\7v\2\2\u023d\u023e\7g\2\2\u023e")
buf.write("\u023f\7t\2\2\u023fF\3\2\2\2\u0240\u0241\7t\2\2\u0241")
buf.write("\u0242\7g\2\2\u0242\u0243\7u\2\2\u0243\u0244\7v\2\2\u0244")
buf.write("\u0245\7t\2\2\u0245\u0246\7k\2\2\u0246\u0247\7e\2\2\u0247")
buf.write("\u0248\7v\2\2\u0248H\3\2\2\2\u0249\u024a\7t\2\2\u024a")
buf.write("\u024b\7g\2\2\u024b\u024c\7v\2\2\u024c\u024d\7w\2\2\u024d")
buf.write("\u024e\7t\2\2\u024e\u024f\7p\2\2\u024fJ\3\2\2\2\u0250")
buf.write("\u0251\7u\2\2\u0251\u0252\7j\2\2\u0252\u0253\7q\2\2\u0253")
buf.write("\u0254\7t\2\2\u0254\u0255\7v\2\2\u0255L\3\2\2\2\u0256")
buf.write("\u0257\7u\2\2\u0257\u0258\7k\2\2\u0258\u0259\7i\2\2\u0259")
buf.write("\u025a\7p\2\2\u025a\u025b\7g\2\2\u025b\u025c\7f\2\2\u025c")
buf.write("N\3\2\2\2\u025d\u025e\7u\2\2\u025e\u025f\7k\2\2\u025f")
buf.write("\u0260\7|\2\2\u0260\u0261\7g\2\2\u0261\u0262\7q\2\2\u0262")
buf.write("\u0263\7h\2\2\u0263P\3\2\2\2\u0264\u0265\7u\2\2\u0265")
buf.write("\u0266\7v\2\2\u0266\u0267\7c\2\2\u0267\u0268\7v\2\2\u0268")
buf.write("\u0269\7k\2\2\u0269\u026a\7e\2\2\u026aR\3\2\2\2\u026b")
buf.write("\u026c\7u\2\2\u026c\u026d\7v\2\2\u026d\u026e\7t\2\2\u026e")
buf.write("\u026f\7w\2\2\u026f\u0270\7e\2\2\u0270\u0271\7v\2\2\u0271")
buf.write("T\3\2\2\2\u0272\u0273\7u\2\2\u0273\u0274\7y\2\2\u0274")
buf.write("\u0275\7k\2\2\u0275\u0276\7v\2\2\u0276\u0277\7e\2\2\u0277")
buf.write("\u0278\7j\2\2\u0278V\3\2\2\2\u0279\u027a\7v\2\2\u027a")
buf.write("\u027b\7{\2\2\u027b\u027c\7r\2\2\u027c\u027d\7g\2\2\u027d")
buf.write("\u027e\7f\2\2\u027e\u027f\7g\2\2\u027f\u0280\7h\2\2\u0280")
buf.write("X\3\2\2\2\u0281\u0282\7w\2\2\u0282\u0283\7p\2\2\u0283")
buf.write("\u0284\7k\2\2\u0284\u0285\7q\2\2\u0285\u0286\7p\2\2\u0286")
buf.write("Z\3\2\2\2\u0287\u0288\7w\2\2\u0288\u0289\7p\2\2\u0289")
buf.write("\u028a\7u\2\2\u028a\u028b\7k\2\2\u028b\u028c\7i\2\2\u028c")
buf.write("\u028d\7p\2\2\u028d\u028e\7g\2\2\u028e\u028f\7f\2\2\u028f")
buf.write("\\\3\2\2\2\u0290\u0291\7x\2\2\u0291\u0292\7q\2\2\u0292")
buf.write("\u0293\7k\2\2\u0293\u0294\7f\2\2\u0294^\3\2\2\2\u0295")
buf.write("\u0296\7x\2\2\u0296\u0297\7q\2\2\u0297\u0298\7n\2\2\u0298")
buf.write("\u0299\7c\2\2\u0299\u029a\7v\2\2\u029a\u029b\7k\2\2\u029b")
buf.write("\u029c\7n\2\2\u029c\u029d\7g\2\2\u029d`\3\2\2\2\u029e")
buf.write("\u029f\7y\2\2\u029f\u02a0\7j\2\2\u02a0\u02a1\7k\2\2\u02a1")
buf.write("\u02a2\7n\2\2\u02a2\u02a3\7g\2\2\u02a3b\3\2\2\2\u02a4")
buf.write("\u02a5\7a\2\2\u02a5\u02a6\7C\2\2\u02a6\u02a7\7n\2\2\u02a7")
buf.write("\u02a8\7k\2\2\u02a8\u02a9\7i\2\2\u02a9\u02aa\7p\2\2\u02aa")
buf.write("\u02ab\7c\2\2\u02ab\u02ac\7u\2\2\u02acd\3\2\2\2\u02ad")
buf.write("\u02ae\7a\2\2\u02ae\u02af\7C\2\2\u02af\u02b0\7n\2\2\u02b0")
buf.write("\u02b1\7k\2\2\u02b1\u02b2\7i\2\2\u02b2\u02b3\7p\2\2\u02b3")
buf.write("\u02b4\7q\2\2\u02b4\u02b5\7h\2\2\u02b5f\3\2\2\2\u02b6")
buf.write("\u02b7\7a\2\2\u02b7\u02b8\7C\2\2\u02b8\u02b9\7v\2\2\u02b9")
buf.write("\u02ba\7q\2\2\u02ba\u02bb\7o\2\2\u02bb\u02bc\7k\2\2\u02bc")
buf.write("\u02bd\7e\2\2\u02bdh\3\2\2\2\u02be\u02bf\7a\2\2\u02bf")
buf.write("\u02c0\7D\2\2\u02c0\u02c1\7q\2\2\u02c1\u02c2\7q\2\2\u02c2")
buf.write("\u02c3\7n\2\2\u02c3j\3\2\2\2\u02c4\u02c5\7a\2\2\u02c5")
buf.write("\u02c6\7E\2\2\u02c6\u02c7\7q\2\2\u02c7\u02c8\7o\2\2\u02c8")
buf.write("\u02c9\7r\2\2\u02c9\u02ca\7n\2\2\u02ca\u02cb\7g\2\2\u02cb")
buf.write("\u02cc\7z\2\2\u02ccl\3\2\2\2\u02cd\u02ce\7a\2\2\u02ce")
buf.write("\u02cf\7I\2\2\u02cf\u02d0\7g\2\2\u02d0\u02d1\7p\2\2\u02d1")
buf.write("\u02d2\7g\2\2\u02d2\u02d3\7t\2\2\u02d3\u02d4\7k\2\2\u02d4")
buf.write("\u02d5\7e\2\2\u02d5n\3\2\2\2\u02d6\u02d7\7a\2\2\u02d7")
buf.write("\u02d8\7K\2\2\u02d8\u02d9\7o\2\2\u02d9\u02da\7c\2\2\u02da")
buf.write("\u02db\7i\2\2\u02db\u02dc\7k\2\2\u02dc\u02dd\7p\2\2\u02dd")
buf.write("\u02de\7c\2\2\u02de\u02df\7t\2\2\u02df\u02e0\7{\2\2\u02e0")
buf.write("p\3\2\2\2\u02e1\u02e2\7a\2\2\u02e2\u02e3\7P\2\2\u02e3")
buf.write("\u02e4\7q\2\2\u02e4\u02e5\7t\2\2\u02e5\u02e6\7g\2\2\u02e6")
buf.write("\u02e7\7v\2\2\u02e7\u02e8\7w\2\2\u02e8\u02e9\7t\2\2\u02e9")
buf.write("\u02ea\7p\2\2\u02ear\3\2\2\2\u02eb\u02ec\7a\2\2\u02ec")
buf.write("\u02ed\7U\2\2\u02ed\u02ee\7v\2\2\u02ee\u02ef\7c\2\2\u02ef")
buf.write("\u02f0\7v\2\2\u02f0\u02f1\7k\2\2\u02f1\u02f2\7e\2\2\u02f2")
buf.write("\u02f3\7a\2\2\u02f3\u02f4\7c\2\2\u02f4\u02f5\7u\2\2\u02f5")
buf.write("\u02f6\7u\2\2\u02f6\u02f7\7g\2\2\u02f7\u02f8\7t\2\2\u02f8")
buf.write("\u02f9\7v\2\2\u02f9t\3\2\2\2\u02fa\u02fb\7a\2\2\u02fb")
buf.write("\u02fc\7V\2\2\u02fc\u02fd\7j\2\2\u02fd\u02fe\7t\2\2\u02fe")
buf.write("\u02ff\7g\2\2\u02ff\u0300\7c\2\2\u0300\u0301\7f\2\2\u0301")
buf.write("\u0302\7a\2\2\u0302\u0303\7n\2\2\u0303\u0304\7q\2\2\u0304")
buf.write("\u0305\7e\2\2\u0305\u0306\7c\2\2\u0306\u0307\7n\2\2\u0307")
buf.write("v\3\2\2\2\u0308\u0309\7*\2\2\u0309x\3\2\2\2\u030a\u030b")
buf.write("\7+\2\2\u030bz\3\2\2\2\u030c\u030d\7]\2\2\u030d|\3\2\2")
buf.write("\2\u030e\u030f\7_\2\2\u030f~\3\2\2\2\u0310\u0311\7}\2")
buf.write("\2\u0311\u0080\3\2\2\2\u0312\u0313\7\177\2\2\u0313\u0082")
buf.write("\3\2\2\2\u0314\u0315\7>\2\2\u0315\u0084\3\2\2\2\u0316")
buf.write("\u0317\7>\2\2\u0317\u0318\7?\2\2\u0318\u0086\3\2\2\2\u0319")
buf.write("\u031a\7@\2\2\u031a\u0088\3\2\2\2\u031b\u031c\7@\2\2\u031c")
buf.write("\u031d\7?\2\2\u031d\u008a\3\2\2\2\u031e\u031f\7>\2\2\u031f")
buf.write("\u0320\7>\2\2\u0320\u008c\3\2\2\2\u0321\u0322\7@\2\2\u0322")
buf.write("\u0323\7@\2\2\u0323\u008e\3\2\2\2\u0324\u0325\7-\2\2\u0325")
buf.write("\u0090\3\2\2\2\u0326\u0327\7-\2\2\u0327\u0328\7-\2\2\u0328")
buf.write("\u0092\3\2\2\2\u0329\u032a\7/\2\2\u032a\u0094\3\2\2\2")
buf.write("\u032b\u032c\7/\2\2\u032c\u032d\7/\2\2\u032d\u0096\3\2")
buf.write("\2\2\u032e\u032f\7,\2\2\u032f\u0098\3\2\2\2\u0330\u0331")
buf.write("\7\61\2\2\u0331\u009a\3\2\2\2\u0332\u0333\7\'\2\2\u0333")
buf.write("\u009c\3\2\2\2\u0334\u0335\7(\2\2\u0335\u009e\3\2\2\2")
buf.write("\u0336\u0337\7~\2\2\u0337\u00a0\3\2\2\2\u0338\u0339\7")
buf.write("(\2\2\u0339\u033a\7(\2\2\u033a\u00a2\3\2\2\2\u033b\u033c")
buf.write("\7~\2\2\u033c\u033d\7~\2\2\u033d\u00a4\3\2\2\2\u033e\u033f")
buf.write("\7`\2\2\u033f\u00a6\3\2\2\2\u0340\u0341\7#\2\2\u0341\u00a8")
buf.write("\3\2\2\2\u0342\u0343\7\u0080\2\2\u0343\u00aa\3\2\2\2\u0344")
buf.write("\u0345\7A\2\2\u0345\u00ac\3\2\2\2\u0346\u0347\7<\2\2\u0347")
buf.write("\u00ae\3\2\2\2\u0348\u0349\7=\2\2\u0349\u00b0\3\2\2\2")
buf.write("\u034a\u034b\7.\2\2\u034b\u00b2\3\2\2\2\u034c\u034d\7")
buf.write("?\2\2\u034d\u00b4\3\2\2\2\u034e\u034f\7,\2\2\u034f\u0350")
buf.write("\7?\2\2\u0350\u00b6\3\2\2\2\u0351\u0352\7\61\2\2\u0352")
buf.write("\u0353\7?\2\2\u0353\u00b8\3\2\2\2\u0354\u0355\7\'\2\2")
buf.write("\u0355\u0356\7?\2\2\u0356\u00ba\3\2\2\2\u0357\u0358\7")
buf.write("-\2\2\u0358\u0359\7?\2\2\u0359\u00bc\3\2\2\2\u035a\u035b")
buf.write("\7/\2\2\u035b\u035c\7?\2\2\u035c\u00be\3\2\2\2\u035d\u035e")
buf.write("\7>\2\2\u035e\u035f\7>\2\2\u035f\u0360\7?\2\2\u0360\u00c0")
buf.write("\3\2\2\2\u0361\u0362\7@\2\2\u0362\u0363\7@\2\2\u0363\u0364")
buf.write("\7?\2\2\u0364\u00c2\3\2\2\2\u0365\u0366\7(\2\2\u0366\u0367")
buf.write("\7?\2\2\u0367\u00c4\3\2\2\2\u0368\u0369\7`\2\2\u0369\u036a")
buf.write("\7?\2\2\u036a\u00c6\3\2\2\2\u036b\u036c\7~\2\2\u036c\u036d")
buf.write("\7?\2\2\u036d\u00c8\3\2\2\2\u036e\u036f\7?\2\2\u036f\u0370")
buf.write("\7?\2\2\u0370\u00ca\3\2\2\2\u0371\u0372\7#\2\2\u0372\u0373")
buf.write("\7?\2\2\u0373\u00cc\3\2\2\2\u0374\u0375\7/\2\2\u0375\u0376")
buf.write("\7@\2\2\u0376\u00ce\3\2\2\2\u0377\u0378\7\60\2\2\u0378")
buf.write("\u00d0\3\2\2\2\u0379\u037a\7\60\2\2\u037a\u037b\7\60\2")
buf.write("\2\u037b\u037c\7\60\2\2\u037c\u00d2\3\2\2\2\u037d\u0382")
buf.write("\5\u00d5k\2\u037e\u0381\5\u00d5k\2\u037f\u0381\5\u00d9")
buf.write("m\2\u0380\u037e\3\2\2\2\u0380\u037f\3\2\2\2\u0381\u0384")
buf.write("\3\2\2\2\u0382\u0380\3\2\2\2\u0382\u0383\3\2\2\2\u0383")
buf.write("\u00d4\3\2\2\2\u0384\u0382\3\2\2\2\u0385\u0388\5\u00d7")
buf.write("l\2\u0386\u0388\5\u00dbn\2\u0387\u0385\3\2\2\2\u0387\u0386")
buf.write("\3\2\2\2\u0388\u00d6\3\2\2\2\u0389\u038a\t\2\2\2\u038a")
buf.write("\u00d8\3\2\2\2\u038b\u038c\t\3\2\2\u038c\u00da\3\2\2\2")
buf.write("\u038d\u038e\7^\2\2\u038e\u038f\7w\2\2\u038f\u0390\3\2")
buf.write("\2\2\u0390\u0398\5\u00ddo\2\u0391\u0392\7^\2\2\u0392\u0393")
buf.write("\7W\2\2\u0393\u0394\3\2\2\2\u0394\u0395\5\u00ddo\2\u0395")
buf.write("\u0396\5\u00ddo\2\u0396\u0398\3\2\2\2\u0397\u038d\3\2")
buf.write("\2\2\u0397\u0391\3\2\2\2\u0398\u00dc\3\2\2\2\u0399\u039a")
buf.write("\5\u00efx\2\u039a\u039b\5\u00efx\2\u039b\u039c\5\u00ef")
buf.write("x\2\u039c\u039d\5\u00efx\2\u039d\u00de\3\2\2\2\u039e\u03a2")
buf.write("\5\u00e1q\2\u039f\u03a2\5\u00f9}\2\u03a0\u03a2\5\u010f")
buf.write("\u0088\2\u03a1\u039e\3\2\2\2\u03a1\u039f\3\2\2\2\u03a1")
buf.write("\u03a0\3\2\2\2\u03a2\u00e0\3\2\2\2\u03a3\u03a5\5\u00e3")
buf.write("r\2\u03a4\u03a6\5\u00f1y\2\u03a5\u03a4\3\2\2\2\u03a5\u03a6")
buf.write("\3\2\2\2\u03a6\u03b0\3\2\2\2\u03a7\u03a9\5\u00e5s\2\u03a8")
buf.write("\u03aa\5\u00f1y\2\u03a9\u03a8\3\2\2\2\u03a9\u03aa\3\2")
buf.write("\2\2\u03aa\u03b0\3\2\2\2\u03ab\u03ad\5\u00e7t\2\u03ac")
buf.write("\u03ae\5\u00f1y\2\u03ad\u03ac\3\2\2\2\u03ad\u03ae\3\2")
buf.write("\2\2\u03ae\u03b0\3\2\2\2\u03af\u03a3\3\2\2\2\u03af\u03a7")
buf.write("\3\2\2\2\u03af\u03ab\3\2\2\2\u03b0\u00e2\3\2\2\2\u03b1")
buf.write("\u03b5\5\u00ebv\2\u03b2\u03b4\5\u00d9m\2\u03b3\u03b2\3")
buf.write("\2\2\2\u03b4\u03b7\3\2\2\2\u03b5\u03b3\3\2\2\2\u03b5\u03b6")
buf.write("\3\2\2\2\u03b6\u00e4\3\2\2\2\u03b7\u03b5\3\2\2\2\u03b8")
buf.write("\u03bc\7\62\2\2\u03b9\u03bb\5\u00edw\2\u03ba\u03b9\3\2")
buf.write("\2\2\u03bb\u03be\3\2\2\2\u03bc\u03ba\3\2\2\2\u03bc\u03bd")
buf.write("\3\2\2\2\u03bd\u00e6\3\2\2\2\u03be\u03bc\3\2\2\2\u03bf")
buf.write("\u03c1\5\u00e9u\2\u03c0\u03c2\5\u00efx\2\u03c1\u03c0\3")
buf.write("\2\2\2\u03c2\u03c3\3\2\2\2\u03c3\u03c1\3\2\2\2\u03c3\u03c4")
buf.write("\3\2\2\2\u03c4\u00e8\3\2\2\2\u03c5\u03c6\7\62\2\2\u03c6")
buf.write("\u03c7\t\4\2\2\u03c7\u00ea\3\2\2\2\u03c8\u03c9\t\5\2\2")
buf.write("\u03c9\u00ec\3\2\2\2\u03ca\u03cb\t\6\2\2\u03cb\u00ee\3")
buf.write("\2\2\2\u03cc\u03cd\t\7\2\2\u03cd\u00f0\3\2\2\2\u03ce\u03d0")
buf.write("\5\u00f3z\2\u03cf\u03d1\5\u00f5{\2\u03d0\u03cf\3\2\2\2")
buf.write("\u03d0\u03d1\3\2\2\2\u03d1\u03de\3\2\2\2\u03d2\u03d3\5")
buf.write("\u00f3z\2\u03d3\u03d4\5\u00f7|\2\u03d4\u03de\3\2\2\2\u03d5")
buf.write("\u03d7\5\u00f5{\2\u03d6\u03d8\5\u00f3z\2\u03d7\u03d6\3")
buf.write("\2\2\2\u03d7\u03d8\3\2\2\2\u03d8\u03de\3\2\2\2\u03d9\u03db")
buf.write("\5\u00f7|\2\u03da\u03dc\5\u00f3z\2\u03db\u03da\3\2\2\2")
buf.write("\u03db\u03dc\3\2\2\2\u03dc\u03de\3\2\2\2\u03dd\u03ce\3")
buf.write("\2\2\2\u03dd\u03d2\3\2\2\2\u03dd\u03d5\3\2\2\2\u03dd\u03d9")
buf.write("\3\2\2\2\u03de\u00f2\3\2\2\2\u03df\u03e0\t\b\2\2\u03e0")
buf.write("\u00f4\3\2\2\2\u03e1\u03e2\t\t\2\2\u03e2\u00f6\3\2\2\2")
buf.write("\u03e3\u03e4\7n\2\2\u03e4\u03e8\7n\2\2\u03e5\u03e6\7N")
buf.write("\2\2\u03e6\u03e8\7N\2\2\u03e7\u03e3\3\2\2\2\u03e7\u03e5")
buf.write("\3\2\2\2\u03e8\u00f8\3\2\2\2\u03e9\u03ec\5\u00fb~\2\u03ea")
buf.write("\u03ec\5\u00fd\177\2\u03eb\u03e9\3\2\2\2\u03eb\u03ea\3")
buf.write("\2\2\2\u03ec\u00fa\3\2\2\2\u03ed\u03ef\5\u00ff\u0080\2")
buf.write("\u03ee\u03f0\5\u0101\u0081\2\u03ef\u03ee\3\2\2\2\u03ef")
buf.write("\u03f0\3\2\2\2\u03f0\u03f2\3\2\2\2\u03f1\u03f3\5\u010d")
buf.write("\u0087\2\u03f2\u03f1\3\2\2\2\u03f2\u03f3\3\2\2\2\u03f3")
buf.write("\u03fa\3\2\2\2\u03f4\u03f5\5\u0105\u0083\2\u03f5\u03f7")
buf.write("\5\u0101\u0081\2\u03f6\u03f8\5\u010d\u0087\2\u03f7\u03f6")
buf.write("\3\2\2\2\u03f7\u03f8\3\2\2\2\u03f8\u03fa\3\2\2\2\u03f9")
buf.write("\u03ed\3\2\2\2\u03f9\u03f4\3\2\2\2\u03fa\u00fc\3\2\2\2")
buf.write("\u03fb\u03fc\5\u00e9u\2\u03fc\u03fd\5\u0107\u0084\2\u03fd")
buf.write("\u03ff\5\u0109\u0085\2\u03fe\u0400\5\u010d\u0087\2\u03ff")
buf.write("\u03fe\3\2\2\2\u03ff\u0400\3\2\2\2\u0400\u0408\3\2\2\2")
buf.write("\u0401\u0402\5\u00e9u\2\u0402\u0403\5\u010b\u0086\2\u0403")
buf.write("\u0405\5\u0109\u0085\2\u0404\u0406\5\u010d\u0087\2\u0405")
buf.write("\u0404\3\2\2\2\u0405\u0406\3\2\2\2\u0406\u0408\3\2\2\2")
buf.write("\u0407\u03fb\3\2\2\2\u0407\u0401\3\2\2\2\u0408\u00fe\3")
buf.write("\2\2\2\u0409\u040b\5\u0105\u0083\2\u040a\u0409\3\2\2\2")
buf.write("\u040a\u040b\3\2\2\2\u040b\u040c\3\2\2\2\u040c\u040d\7")
buf.write("\60\2\2\u040d\u0412\5\u0105\u0083\2\u040e\u040f\5\u0105")
buf.write("\u0083\2\u040f\u0410\7\60\2\2\u0410\u0412\3\2\2\2\u0411")
buf.write("\u040a\3\2\2\2\u0411\u040e\3\2\2\2\u0412\u0100\3\2\2\2")
buf.write("\u0413\u0415\7g\2\2\u0414\u0416\5\u0103\u0082\2\u0415")
buf.write("\u0414\3\2\2\2\u0415\u0416\3\2\2\2\u0416\u0417\3\2\2\2")
buf.write("\u0417\u041e\5\u0105\u0083\2\u0418\u041a\7G\2\2\u0419")
buf.write("\u041b\5\u0103\u0082\2\u041a\u0419\3\2\2\2\u041a\u041b")
buf.write("\3\2\2\2\u041b\u041c\3\2\2\2\u041c\u041e\5\u0105\u0083")
buf.write("\2\u041d\u0413\3\2\2\2\u041d\u0418\3\2\2\2\u041e\u0102")
buf.write("\3\2\2\2\u041f\u0420\t\n\2\2\u0420\u0104\3\2\2\2\u0421")
buf.write("\u0423\5\u00d9m\2\u0422\u0421\3\2\2\2\u0423\u0424\3\2")
buf.write("\2\2\u0424\u0422\3\2\2\2\u0424\u0425\3\2\2\2\u0425\u0106")
buf.write("\3\2\2\2\u0426\u0428\5\u010b\u0086\2\u0427\u0426\3\2\2")
buf.write("\2\u0427\u0428\3\2\2\2\u0428\u0429\3\2\2\2\u0429\u042a")
buf.write("\7\60\2\2\u042a\u042f\5\u010b\u0086\2\u042b\u042c\5\u010b")
buf.write("\u0086\2\u042c\u042d\7\60\2\2\u042d\u042f\3\2\2\2\u042e")
buf.write("\u0427\3\2\2\2\u042e\u042b\3\2\2\2\u042f\u0108\3\2\2\2")
buf.write("\u0430\u0432\7r\2\2\u0431\u0433\5\u0103\u0082\2\u0432")
buf.write("\u0431\3\2\2\2\u0432\u0433\3\2\2\2\u0433\u0434\3\2\2\2")
buf.write("\u0434\u043b\5\u0105\u0083\2\u0435\u0437\7R\2\2\u0436")
buf.write("\u0438\5\u0103\u0082\2\u0437\u0436\3\2\2\2\u0437\u0438")
buf.write("\3\2\2\2\u0438\u0439\3\2\2\2\u0439\u043b\5\u0105\u0083")
buf.write("\2\u043a\u0430\3\2\2\2\u043a\u0435\3\2\2\2\u043b\u010a")
buf.write("\3\2\2\2\u043c\u043e\5\u00efx\2\u043d\u043c\3\2\2\2\u043e")
buf.write("\u043f\3\2\2\2\u043f\u043d\3\2\2\2\u043f\u0440\3\2\2\2")
buf.write("\u0440\u010c\3\2\2\2\u0441\u0442\t\13\2\2\u0442\u010e")
buf.write("\3\2\2\2\u0443\u0444\7)\2\2\u0444\u0445\5\u0111\u0089")
buf.write("\2\u0445\u0446\7)\2\2\u0446\u045a\3\2\2\2\u0447\u0448")
buf.write("\7N\2\2\u0448\u0449\7)\2\2\u0449\u044a\3\2\2\2\u044a\u044b")
buf.write("\5\u0111\u0089\2\u044b\u044c\7)\2\2\u044c\u045a\3\2\2")
buf.write("\2\u044d\u044e\7w\2\2\u044e\u044f\7)\2\2\u044f\u0450\3")
buf.write("\2\2\2\u0450\u0451\5\u0111\u0089\2\u0451\u0452\7)\2\2")
buf.write("\u0452\u045a\3\2\2\2\u0453\u0454\7W\2\2\u0454\u0455\7")
buf.write(")\2\2\u0455\u0456\3\2\2\2\u0456\u0457\5\u0111\u0089\2")
buf.write("\u0457\u0458\7)\2\2\u0458\u045a\3\2\2\2\u0459\u0443\3")
buf.write("\2\2\2\u0459\u0447\3\2\2\2\u0459\u044d\3\2\2\2\u0459\u0453")
buf.write("\3\2\2\2\u045a\u0110\3\2\2\2\u045b\u045d\5\u0113\u008a")
buf.write("\2\u045c\u045b\3\2\2\2\u045d\u045e\3\2\2\2\u045e\u045c")
buf.write("\3\2\2\2\u045e\u045f\3\2\2\2\u045f\u0112\3\2\2\2\u0460")
buf.write("\u0463\n\f\2\2\u0461\u0463\5\u0115\u008b\2\u0462\u0460")
buf.write("\3\2\2\2\u0462\u0461\3\2\2\2\u0463\u0114\3\2\2\2\u0464")
buf.write("\u0469\5\u0117\u008c\2\u0465\u0469\5\u0119\u008d\2\u0466")
buf.write("\u0469\5\u011b\u008e\2\u0467\u0469\5\u00dbn\2\u0468\u0464")
buf.write("\3\2\2\2\u0468\u0465\3\2\2\2\u0468\u0466\3\2\2\2\u0468")
buf.write("\u0467\3\2\2\2\u0469\u0116\3\2\2\2\u046a\u046b\7^\2\2")
buf.write("\u046b\u046c\t\r\2\2\u046c\u0118\3\2\2\2\u046d\u046e\7")
buf.write("^\2\2\u046e\u0479\5\u00edw\2\u046f\u0470\7^\2\2\u0470")
buf.write("\u0471\5\u00edw\2\u0471\u0472\5\u00edw\2\u0472\u0479\3")
buf.write("\2\2\2\u0473\u0474\7^\2\2\u0474\u0475\5\u00edw\2\u0475")
buf.write("\u0476\5\u00edw\2\u0476\u0477\5\u00edw\2\u0477\u0479\3")
buf.write("\2\2\2\u0478\u046d\3\2\2\2\u0478\u046f\3\2\2\2\u0478\u0473")
buf.write("\3\2\2\2\u0479\u011a\3\2\2\2\u047a\u047b\7^\2\2\u047b")
buf.write("\u047c\7z\2\2\u047c\u047e\3\2\2\2\u047d\u047f\5\u00ef")
buf.write("x\2\u047e\u047d\3\2\2\2\u047f\u0480\3\2\2\2\u0480\u047e")
buf.write("\3\2\2\2\u0480\u0481\3\2\2\2\u0481\u011c\3\2\2\2\u0482")
buf.write("\u0484\5\u011f\u0090\2\u0483\u0482\3\2\2\2\u0483\u0484")
buf.write("\3\2\2\2\u0484\u0485\3\2\2\2\u0485\u0487\7$\2\2\u0486")
buf.write("\u0488\5\u0121\u0091\2\u0487\u0486\3\2\2\2\u0487\u0488")
buf.write("\3\2\2\2\u0488\u0489\3\2\2\2\u0489\u048a\7$\2\2\u048a")
buf.write("\u011e\3\2\2\2\u048b\u048c\7w\2\2\u048c\u048f\7:\2\2\u048d")
buf.write("\u048f\t\16\2\2\u048e\u048b\3\2\2\2\u048e\u048d\3\2\2")
buf.write("\2\u048f\u0120\3\2\2\2\u0490\u0492\5\u0123\u0092\2\u0491")
buf.write("\u0490\3\2\2\2\u0492\u0493\3\2\2\2\u0493\u0491\3\2\2\2")
buf.write("\u0493\u0494\3\2\2\2\u0494\u0122\3\2\2\2\u0495\u0498\n")
buf.write("\17\2\2\u0496\u0498\5\u0115\u008b\2\u0497\u0495\3\2\2")
buf.write("\2\u0497\u0496\3\2\2\2\u0498\u0124\3\2\2\2\u0499\u049b")
buf.write("\7%\2\2\u049a\u049c\5\u0129\u0095\2\u049b\u049a\3\2\2")
buf.write("\2\u049b\u049c\3\2\2\2\u049c\u049d\3\2\2\2\u049d\u049f")
buf.write("\5\u00e3r\2\u049e\u04a0\5\u0129\u0095\2\u049f\u049e\3")
buf.write("\2\2\2\u049f\u04a0\3\2\2\2\u04a0\u04a1\3\2\2\2\u04a1\u04a5")
buf.write("\5\u011d\u008f\2\u04a2\u04a4\n\20\2\2\u04a3\u04a2\3\2")
buf.write("\2\2\u04a4\u04a7\3\2\2\2\u04a5\u04a3\3\2\2\2\u04a5\u04a6")
buf.write("\3\2\2\2\u04a6\u04a8\3\2\2\2\u04a7\u04a5\3\2\2\2\u04a8")
buf.write("\u04a9\b\u0093\2\2\u04a9\u0126\3\2\2\2\u04aa\u04ac\7%")
buf.write("\2\2\u04ab\u04ad\5\u0129\u0095\2\u04ac\u04ab\3\2\2\2\u04ac")
buf.write("\u04ad\3\2\2\2\u04ad\u04ae\3\2\2\2\u04ae\u04af\7r\2\2")
buf.write("\u04af\u04b0\7t\2\2\u04b0\u04b1\7c\2\2\u04b1\u04b2\7i")
buf.write("\2\2\u04b2\u04b3\7o\2\2\u04b3\u04b4\7c\2\2\u04b4\u04b5")
buf.write("\3\2\2\2\u04b5\u04b9\5\u0129\u0095\2\u04b6\u04b8\n\20")
buf.write("\2\2\u04b7\u04b6\3\2\2\2\u04b8\u04bb\3\2\2\2\u04b9\u04b7")
buf.write("\3\2\2\2\u04b9\u04ba\3\2\2\2\u04ba\u04bc\3\2\2\2\u04bb")
buf.write("\u04b9\3\2\2\2\u04bc\u04bd\b\u0094\2\2\u04bd\u0128\3\2")
buf.write("\2\2\u04be\u04c0\t\21\2\2\u04bf\u04be\3\2\2\2\u04c0\u04c1")
buf.write("\3\2\2\2\u04c1\u04bf\3\2\2\2\u04c1\u04c2\3\2\2\2\u04c2")
buf.write("\u04c3\3\2\2\2\u04c3\u04c4\b\u0095\2\2\u04c4\u012a\3\2")
buf.write("\2\2\u04c5\u04c7\7\17\2\2\u04c6\u04c8\7\f\2\2\u04c7\u04c6")
buf.write("\3\2\2\2\u04c7\u04c8\3\2\2\2\u04c8\u04cb\3\2\2\2\u04c9")
buf.write("\u04cb\7\f\2\2\u04ca\u04c5\3\2\2\2\u04ca\u04c9\3\2\2\2")
buf.write("\u04cb\u04cc\3\2\2\2\u04cc\u04cd\b\u0096\2\2\u04cd\u012c")
buf.write("\3\2\2\2\u04ce\u04cf\7\61\2\2\u04cf\u04d0\7,\2\2\u04d0")
buf.write("\u04d4\3\2\2\2\u04d1\u04d3\13\2\2\2\u04d2\u04d1\3\2\2")
buf.write("\2\u04d3\u04d6\3\2\2\2\u04d4\u04d5\3\2\2\2\u04d4\u04d2")
buf.write("\3\2\2\2\u04d5\u04d7\3\2\2\2\u04d6\u04d4\3\2\2\2\u04d7")
buf.write("\u04d8\7,\2\2\u04d8\u04d9\7\61\2\2\u04d9\u04da\3\2\2\2")
buf.write("\u04da\u04db\b\u0097\2\2\u04db\u012e\3\2\2\2\u04dc\u04dd")
buf.write("\7\61\2\2\u04dd\u04de\7\61\2\2\u04de\u04e2\3\2\2\2\u04df")
buf.write("\u04e1\n\20\2\2\u04e0\u04df\3\2\2\2\u04e1\u04e4\3\2\2")
buf.write("\2\u04e2\u04e0\3\2\2\2\u04e2\u04e3\3\2\2\2\u04e3\u04e5")
buf.write("\3\2\2\2\u04e4\u04e2\3\2\2\2\u04e5\u04e6\b\u0098\2\2\u04e6")
buf.write("\u0130\3\2\2\2=\2\u0380\u0382\u0387\u0397\u03a1\u03a5")
buf.write("\u03a9\u03ad\u03af\u03b5\u03bc\u03c3\u03d0\u03d7\u03db")
buf.write("\u03dd\u03e7\u03eb\u03ef\u03f2\u03f7\u03f9\u03ff\u0405")
buf.write("\u0407\u040a\u0411\u0415\u041a\u041d\u0424\u0427\u042e")
buf.write("\u0432\u0437\u043a\u043f\u0459\u045e\u0462\u0468\u0478")
buf.write("\u0480\u0483\u0487\u048e\u0493\u0497\u049b\u049f\u04a5")
buf.write("\u04ac\u04b9\u04c1\u04c7\u04ca\u04d4\u04e2\3\b\2\2")
return buf.getvalue()
class CLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
Auto = 15
Break = 16
Case = 17
Char = 18
Const = 19
Continue = 20
Default = 21
Do = 22
Double = 23
Else = 24
Enum = 25
Extern = 26
Float = 27
For = 28
Goto = 29
If = 30
Inline = 31
Int = 32
Long = 33
Register = 34
Restrict = 35
Return = 36
Short = 37
Signed = 38
Sizeof = 39
Static = 40
Struct = 41
Switch = 42
Typedef = 43
Union = 44
Unsigned = 45
Void = 46
Volatile = 47
While = 48
Alignas = 49
Alignof = 50
Atomic = 51
Bool = 52
Complex = 53
Generic = 54
Imaginary = 55
Noreturn = 56
StaticAssert = 57
ThreadLocal = 58
LeftParen = 59
RightParen = 60
LeftBracket = 61
RightBracket = 62
LeftBrace = 63
RightBrace = 64
Less = 65
LessEqual = 66
Greater = 67
GreaterEqual = 68
LeftShift = 69
RightShift = 70
Plus = 71
PlusPlus = 72
Minus = 73
MinusMinus = 74
Star = 75
Div = 76
Mod = 77
And = 78
Or = 79
AndAnd = 80
OrOr = 81
Caret = 82
Not = 83
Tilde = 84
Question = 85
Colon = 86
Semi = 87
Comma = 88
Assign = 89
StarAssign = 90
DivAssign = 91
ModAssign = 92
PlusAssign = 93
MinusAssign = 94
LeftShiftAssign = 95
RightShiftAssign = 96
AndAssign = 97
XorAssign = 98
OrAssign = 99
Equal = 100
NotEqual = 101
Arrow = 102
Dot = 103
Ellipsis = 104
Identifier = 105
Constant = 106
StringLiteral = 107
LineDirective = 108
PragmaDirective = 109
Whitespace = 110
Newline = 111
BlockComment = 112
LineComment = 113
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "",
"'__extension__'", "'__builtin_va_arg'", "'__builtin_offsetof'",
"'__m128'", "'__m128d'", "'__m128i'", "'__typeof__'", "'__inline__'",
"'__stdcall'", "'__declspec'", "'__asm'", "'__attribute__'",
"'__asm__'", "'__volatile__'", "'auto'", "'break'", "'case'",
"'char'", "'const'", "'continue'", "'default'", "'do'", "'double'",
"'else'", "'enum'", "'extern'", "'float'", "'for'", "'goto'",
"'if'", "'inline'", "'int'", "'long'", "'register'", "'restrict'",
"'return'", "'short'", "'signed'", "'sizeof'", "'static'", "'struct'",
"'switch'", "'typedef'", "'union'", "'unsigned'", "'void'",
"'volatile'", "'while'", "'_Alignas'", "'_Alignof'", "'_Atomic'",
"'_Bool'", "'_Complex'", "'_Generic'", "'_Imaginary'", "'_Noreturn'",
"'_Static_assert'", "'_Thread_local'", "'('", "')'", "'['",
"']'", "'{'", "'}'", "'<'", "'<='", "'>'", "'>='", "'<<'", "'>>'",
"'+'", "'++'", "'-'", "'--'", "'*'", "'/'", "'%'", "'&'", "'|'",
"'&&'", "'||'", "'^'", "'!'", "'~'", "'?'", "':'", "';'", "','",
"'='", "'*='", "'/='", "'%='", "'+='", "'-='", "'<<='", "'>>='",
"'&='", "'^='", "'|='", "'=='", "'!='", "'->'", "'.'", "'...'" ]
symbolicNames = [ "",
"Auto", "Break", "Case", "Char", "Const", "Continue", "Default",
"Do", "Double", "Else", "Enum", "Extern", "Float", "For", "Goto",
"If", "Inline", "Int", "Long", "Register", "Restrict", "Return",
"Short", "Signed", "Sizeof", "Static", "Struct", "Switch", "Typedef",
"Union", "Unsigned", "Void", "Volatile", "While", "Alignas",
"Alignof", "Atomic", "Bool", "Complex", "Generic", "Imaginary",
"Noreturn", "StaticAssert", "ThreadLocal", "LeftParen", "RightParen",
"LeftBracket", "RightBracket", "LeftBrace", "RightBrace", "Less",
"LessEqual", "Greater", "GreaterEqual", "LeftShift", "RightShift",
"Plus", "PlusPlus", "Minus", "MinusMinus", "Star", "Div", "Mod",
"And", "Or", "AndAnd", "OrOr", "Caret", "Not", "Tilde", "Question",
"Colon", "Semi", "Comma", "Assign", "StarAssign", "DivAssign",
"ModAssign", "PlusAssign", "MinusAssign", "LeftShiftAssign",
"RightShiftAssign", "AndAssign", "XorAssign", "OrAssign", "Equal",
"NotEqual", "Arrow", "Dot", "Ellipsis", "Identifier", "Constant",
"StringLiteral", "LineDirective", "PragmaDirective", "Whitespace",
"Newline", "BlockComment", "LineComment" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"Auto", "Break", "Case", "Char", "Const", "Continue",
"Default", "Do", "Double", "Else", "Enum", "Extern", "Float",
"For", "Goto", "If", "Inline", "Int", "Long", "Register",
"Restrict", "Return", "Short", "Signed", "Sizeof", "Static",
"Struct", "Switch", "Typedef", "Union", "Unsigned", "Void",
"Volatile", "While", "Alignas", "Alignof", "Atomic", "Bool",
"Complex", "Generic", "Imaginary", "Noreturn", "StaticAssert",
"ThreadLocal", "LeftParen", "RightParen", "LeftBracket",
"RightBracket", "LeftBrace", "RightBrace", "Less", "LessEqual",
"Greater", "GreaterEqual", "LeftShift", "RightShift",
"Plus", "PlusPlus", "Minus", "MinusMinus", "Star", "Div",
"Mod", "And", "Or", "AndAnd", "OrOr", "Caret", "Not",
"Tilde", "Question", "Colon", "Semi", "Comma", "Assign",
"StarAssign", "DivAssign", "ModAssign", "PlusAssign",
"MinusAssign", "LeftShiftAssign", "RightShiftAssign",
"AndAssign", "XorAssign", "OrAssign", "Equal", "NotEqual",
"Arrow", "Dot", "Ellipsis", "Identifier", "IdentifierNondigit",
"Nondigit", "Digit", "UniversalCharacterName", "HexQuad",
"Constant", "IntegerConstant", "DecimalConstant", "OctalConstant",
"HexadecimalConstant", "HexadecimalPrefix", "NonzeroDigit",
"OctalDigit", "HexadecimalDigit", "IntegerSuffix", "UnsignedSuffix",
"LongSuffix", "LongLongSuffix", "FloatingConstant", "DecimalFloatingConstant",
"HexadecimalFloatingConstant", "FractionalConstant", "ExponentPart",
"Sign", "DigitSequence", "HexadecimalFractionalConstant",
"BinaryExponentPart", "HexadecimalDigitSequence", "FloatingSuffix",
"CharacterConstant", "CCharSequence", "CChar", "EscapeSequence",
"SimpleEscapeSequence", "OctalEscapeSequence", "HexadecimalEscapeSequence",
"StringLiteral", "EncodingPrefix", "SCharSequence", "SChar",
"LineDirective", "PragmaDirective", "Whitespace", "Newline",
"BlockComment", "LineComment" ]
grammarFileName = "C.bnf"
def __init__(self, input=None):
super().__init__(input)
self.checkVersion("4.9")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/parser/cparser.py 0000644 0000766 0000000 00001260311 00000000000 022554 0 ustar 00parrt wheel 0000000 0000000 # Generated from C.bnf by ANTLR 4.5.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3s")
buf.write("\u04e9\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4")
buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64")
buf.write("\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t")
buf.write(";\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\t")
buf.write("D\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\tL\4M\t")
buf.write("M\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\tU\3\2")
buf.write("\3\2\3\2\6\2\u00ae\n\2\r\2\16\2\u00af\3\2\3\2\3\2\3\2")
buf.write("\3\2\3\2\5\2\u00b8\n\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2")
buf.write("\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\5\2\u00cc\n\2")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\7")
buf.write("\4\u00db\n\4\f\4\16\4\u00de\13\4\3\5\3\5\3\5\3\5\3\5\3")
buf.write("\5\3\5\5\5\u00e7\n\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3")
buf.write("\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6\u010b")
buf.write("\n\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6\u0115\n\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\7\6\u0122\n\6")
buf.write("\f\6\16\6\u0125\13\6\3\7\3\7\3\7\3\7\3\7\3\7\7\7\u012d")
buf.write("\n\7\f\7\16\7\u0130\13\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3")
buf.write("\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\5\b\u0148\n\b\3\t\3\t\3\n\3\n\3\n\3\n\3\n\3\n\3\n")
buf.write("\3\n\3\n\3\n\3\n\3\n\5\n\u0158\n\n\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\7\13\u0166\n")
buf.write("\13\f\13\16\13\u0169\13\13\3\f\3\f\3\f\3\f\3\f\3\f\3\f")
buf.write("\3\f\3\f\7\f\u0174\n\f\f\f\16\f\u0177\13\f\3\r\3\r\3\r")
buf.write("\3\r\3\r\3\r\3\r\3\r\3\r\7\r\u0182\n\r\f\r\16\r\u0185")
buf.write("\13\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\16\3\16\3\16\7\16\u0196\n\16\f\16\16\16\u0199")
buf.write("\13\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\7")
buf.write("\17\u01a4\n\17\f\17\16\17\u01a7\13\17\3\20\3\20\3\20\3")
buf.write("\20\3\20\3\20\7\20\u01af\n\20\f\20\16\20\u01b2\13\20\3")
buf.write("\21\3\21\3\21\3\21\3\21\3\21\7\21\u01ba\n\21\f\21\16\21")
buf.write("\u01bd\13\21\3\22\3\22\3\22\3\22\3\22\3\22\7\22\u01c5")
buf.write("\n\22\f\22\16\22\u01c8\13\22\3\23\3\23\3\23\3\23\3\23")
buf.write("\3\23\7\23\u01d0\n\23\f\23\16\23\u01d3\13\23\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\24\7\24\u01db\n\24\f\24\16\24\u01de")
buf.write("\13\24\3\25\3\25\3\25\3\25\3\25\3\25\5\25\u01e6\n\25\3")
buf.write("\26\3\26\3\26\3\26\3\26\5\26\u01ed\n\26\3\27\3\27\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\7\30\u01f7\n\30\f\30\16\30\u01fa")
buf.write("\13\30\3\31\3\31\3\32\3\32\5\32\u0200\n\32\3\32\3\32\3")
buf.write("\32\5\32\u0205\n\32\3\33\6\33\u0208\n\33\r\33\16\33\u0209")
buf.write("\3\34\6\34\u020d\n\34\r\34\16\34\u020e\3\35\3\35\3\35")
buf.write("\3\35\3\35\5\35\u0216\n\35\3\36\3\36\3\36\3\36\3\36\3")
buf.write("\36\7\36\u021e\n\36\f\36\16\36\u0221\13\36\3\37\3\37\3")
buf.write("\37\3\37\3\37\5\37\u0228\n\37\3 \3 \3!\3!\3!\3!\3!\3!")
buf.write("\3!\3!\3!\3!\3!\3!\3!\3!\5!\u023a\n!\3\"\3\"\5\"\u023e")
buf.write("\n\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u0247\n\"\3#\3#\3")
buf.write("$\3$\3$\3$\3$\7$\u0250\n$\f$\16$\u0253\13$\3%\3%\5%\u0257")
buf.write("\n%\3%\3%\3%\5%\u025c\n%\3&\3&\5&\u0260\n&\3&\3&\5&\u0264")
buf.write("\n&\5&\u0266\n&\3\'\3\'\3\'\3\'\3\'\3\'\7\'\u026e\n\'")
buf.write("\f\'\16\'\u0271\13\'\3(\3(\5(\u0275\n(\3(\3(\5(\u0279")
buf.write("\n(\3)\3)\5)\u027d\n)\3)\3)\3)\3)\3)\3)\5)\u0285\n)\3")
buf.write(")\3)\3)\3)\3)\3)\3)\5)\u028e\n)\3*\3*\3*\3*\3*\3*\7*\u0296")
buf.write("\n*\f*\16*\u0299\13*\3+\3+\3+\3+\3+\5+\u02a0\n+\3,\3,")
buf.write("\3-\3-\3-\3-\3-\3.\3.\3/\3/\3/\3/\3/\3/\5/\u02b1\n/\3")
buf.write("\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\5\60")
buf.write("\u02bd\n\60\3\61\5\61\u02c0\n\61\3\61\3\61\7\61\u02c4")
buf.write("\n\61\f\61\16\61\u02c7\13\61\3\62\3\62\3\62\3\62\3\62")
buf.write("\3\62\5\62\u02cf\n\62\3\62\3\62\3\62\5\62\u02d4\n\62\3")
buf.write("\62\5\62\u02d7\n\62\3\62\3\62\3\62\3\62\3\62\5\62\u02de")
buf.write("\n\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62")
buf.write("\3\62\3\62\3\62\5\62\u02ed\n\62\3\62\3\62\3\62\3\62\3")
buf.write("\62\3\62\3\62\3\62\3\62\3\62\5\62\u02f9\n\62\3\62\7\62")
buf.write("\u02fc\n\62\f\62\16\62\u02ff\13\62\3\63\3\63\3\63\6\63")
buf.write("\u0304\n\63\r\63\16\63\u0305\3\63\3\63\5\63\u030a\n\63")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65\3\65\7\65")
buf.write("\u0316\n\65\f\65\16\65\u0319\13\65\3\65\5\65\u031c\n\65")
buf.write("\3\66\3\66\3\66\5\66\u0321\n\66\3\66\5\66\u0324\n\66\3")
buf.write("\66\5\66\u0327\n\66\3\67\3\67\3\67\3\67\3\67\7\67\u032e")
buf.write("\n\67\f\67\16\67\u0331\13\67\38\38\58\u0335\n8\38\38\5")
buf.write("8\u0339\n8\38\38\38\58\u033e\n8\38\38\58\u0342\n8\38\5")
buf.write("8\u0345\n8\39\39\39\39\39\79\u034c\n9\f9\169\u034f\13")
buf.write("9\3:\3:\3:\3:\3:\5:\u0356\n:\3;\3;\3;\3;\3;\3;\7;\u035e")
buf.write("\n;\f;\16;\u0361\13;\3<\3<\3<\3<\3<\5<\u0368\n<\5<\u036a")
buf.write("\n<\3=\3=\3=\3=\3=\3=\7=\u0372\n=\f=\16=\u0375\13=\3>")
buf.write("\3>\5>\u0379\n>\3?\3?\5?\u037d\n?\3?\3?\7?\u0381\n?\f")
buf.write("?\16?\u0384\13?\5?\u0386\n?\3@\3@\3@\3@\3@\7@\u038d\n")
buf.write("@\f@\16@\u0390\13@\3@\3@\5@\u0394\n@\3@\5@\u0397\n@\3")
buf.write("@\3@\3@\3@\5@\u039d\n@\3@\3@\3@\3@\3@\3@\3@\3@\3@\3@\3")
buf.write("@\3@\3@\3@\5@\u03ad\n@\3@\3@\7@\u03b1\n@\f@\16@\u03b4")
buf.write("\13@\5@\u03b6\n@\3@\3@\3@\5@\u03bb\n@\3@\5@\u03be\n@\3")
buf.write("@\3@\3@\3@\3@\5@\u03c5\n@\3@\3@\3@\3@\3@\3@\3@\3@\3@\3")
buf.write("@\3@\3@\3@\3@\3@\3@\3@\5@\u03d8\n@\3@\3@\7@\u03dc\n@\f")
buf.write("@\16@\u03df\13@\7@\u03e1\n@\f@\16@\u03e4\13@\3A\3A\3B")
buf.write("\3B\3B\3B\3B\3B\3B\3B\3B\3B\5B\u03f2\nB\3C\3C\5C\u03f6")
buf.write("\nC\3C\3C\3C\3C\3C\5C\u03fd\nC\3C\7C\u0400\nC\fC\16C\u0403")
buf.write("\13C\3D\3D\3D\3E\3E\3E\3E\3E\7E\u040d\nE\fE\16E\u0410")
buf.write("\13E\3F\3F\3F\3F\3F\3F\5F\u0418\nF\3G\3G\3G\3G\3G\6G\u041f")
buf.write("\nG\rG\16G\u0420\3G\3G\3G\3H\3H\3H\3H\3H\3H\3H\3H\3H\3")
buf.write("H\3H\3H\7H\u0432\nH\fH\16H\u0435\13H\5H\u0437\nH\3H\3")
buf.write("H\3H\3H\7H\u043d\nH\fH\16H\u0440\13H\5H\u0442\nH\7H\u0444")
buf.write("\nH\fH\16H\u0447\13H\3H\3H\5H\u044b\nH\3I\3I\3I\3I\3I")
buf.write("\3I\3I\3I\3I\3I\3I\5I\u0458\nI\3J\3J\5J\u045c\nJ\3J\3")
buf.write("J\3K\3K\3K\3K\3K\7K\u0465\nK\fK\16K\u0468\13K\3L\3L\5")
buf.write("L\u046c\nL\3M\5M\u046f\nM\3M\3M\3N\3N\3N\3N\3N\3N\3N\5")
buf.write("N\u047a\nN\3N\3N\3N\3N\3N\3N\5N\u0482\nN\3O\3O\3O\3O\3")
buf.write("O\3O\3O\3O\3O\3O\3O\3O\3O\3O\3O\3O\3O\5O\u0495\nO\3O\3")
buf.write("O\5O\u0499\nO\3O\3O\5O\u049d\nO\3O\3O\3O\3O\3O\3O\5O\u04a5")
buf.write("\nO\3O\3O\5O\u04a9\nO\3O\3O\3O\5O\u04ae\nO\3P\3P\3P\3")
buf.write("P\3P\3P\3P\3P\3P\5P\u04b9\nP\3P\3P\3P\3P\3P\5P\u04c0\n")
buf.write("P\3Q\5Q\u04c3\nQ\3Q\3Q\3R\3R\3R\3R\3R\7R\u04cc\nR\fR\16")
buf.write("R\u04cf\13R\3S\3S\3S\5S\u04d4\nS\3T\5T\u04d7\nT\3T\3T")
buf.write("\5T\u04db\nT\3T\3T\3U\3U\3U\3U\3U\7U\u04e4\nU\fU\16U\u04e7")
buf.write("\13U\3U\2\36\6\n\f\24\26\30\32\34\36 \"$&.:FLRbptx~\u0084")
buf.write("\u0088\u0094\u00a2\u00a8V\2\4\6\b\n\f\16\20\22\24\26\30")
buf.write("\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPRTVXZ\\^`b")
buf.write("dfhjlnprtvxz|~\u0080\u0082\u0084\u0086\u0088\u008a\u008c")
buf.write("\u008e\u0090\u0092\u0094\u0096\u0098\u009a\u009c\u009e")
buf.write("\u00a0\u00a2\u00a4\u00a6\u00a8\2\16\7\2IIKKMMPPUV\3\2")
buf.write("[e\b\2\21\21\34\34$$**--<<\n\2\6\b\24\24\31\31\35\35\"")
buf.write("#\'(/\60\66\67\3\2\6\b\4\2++..\6\2\25\25%%\61\61\65\65")
buf.write("\5\2\n\13!!::\4\2=>ZZ\3\2=>\4\2\r\r\17\17\4\2\20\20\61")
buf.write("\61\u0559\2\u00cb\3\2\2\2\4\u00cd\3\2\2\2\6\u00d4\3\2")
buf.write("\2\2\b\u00e6\3\2\2\2\n\u010a\3\2\2\2\f\u0126\3\2\2\2\16")
buf.write("\u0147\3\2\2\2\20\u0149\3\2\2\2\22\u0157\3\2\2\2\24\u0159")
buf.write("\3\2\2\2\26\u016a\3\2\2\2\30\u0178\3\2\2\2\32\u0186\3")
buf.write("\2\2\2\34\u019a\3\2\2\2\36\u01a8\3\2\2\2 \u01b3\3\2\2")
buf.write("\2\"\u01be\3\2\2\2$\u01c9\3\2\2\2&\u01d4\3\2\2\2(\u01df")
buf.write("\3\2\2\2*\u01ec\3\2\2\2,\u01ee\3\2\2\2.\u01f0\3\2\2\2")
buf.write("\60\u01fb\3\2\2\2\62\u0204\3\2\2\2\64\u0207\3\2\2\2\66")
buf.write("\u020c\3\2\2\28\u0215\3\2\2\2:\u0217\3\2\2\2<\u0227\3")
buf.write("\2\2\2>\u0229\3\2\2\2@\u0239\3\2\2\2B\u0246\3\2\2\2D\u0248")
buf.write("\3\2\2\2F\u024a\3\2\2\2H\u025b\3\2\2\2J\u0265\3\2\2\2")
buf.write("L\u0267\3\2\2\2N\u0278\3\2\2\2P\u028d\3\2\2\2R\u028f\3")
buf.write("\2\2\2T\u029f\3\2\2\2V\u02a1\3\2\2\2X\u02a3\3\2\2\2Z\u02a8")
buf.write("\3\2\2\2\\\u02b0\3\2\2\2^\u02bc\3\2\2\2`\u02bf\3\2\2\2")
buf.write("b\u02ce\3\2\2\2d\u0309\3\2\2\2f\u030b\3\2\2\2h\u031b\3")
buf.write("\2\2\2j\u0326\3\2\2\2l\u032f\3\2\2\2n\u0344\3\2\2\2p\u0346")
buf.write("\3\2\2\2r\u0355\3\2\2\2t\u0357\3\2\2\2v\u0369\3\2\2\2")
buf.write("x\u036b\3\2\2\2z\u0376\3\2\2\2|\u0385\3\2\2\2~\u03b5\3")
buf.write("\2\2\2\u0080\u03e5\3\2\2\2\u0082\u03f1\3\2\2\2\u0084\u03f3")
buf.write("\3\2\2\2\u0086\u0404\3\2\2\2\u0088\u0407\3\2\2\2\u008a")
buf.write("\u0417\3\2\2\2\u008c\u0419\3\2\2\2\u008e\u044a\3\2\2\2")
buf.write("\u0090\u0457\3\2\2\2\u0092\u0459\3\2\2\2\u0094\u045f\3")
buf.write("\2\2\2\u0096\u046b\3\2\2\2\u0098\u046e\3\2\2\2\u009a\u0481")
buf.write("\3\2\2\2\u009c\u04ad\3\2\2\2\u009e\u04bf\3\2\2\2\u00a0")
buf.write("\u04c2\3\2\2\2\u00a2\u04c6\3\2\2\2\u00a4\u04d3\3\2\2\2")
buf.write("\u00a6\u04d6\3\2\2\2\u00a8\u04de\3\2\2\2\u00aa\u00cc\7")
buf.write("k\2\2\u00ab\u00cc\7l\2\2\u00ac\u00ae\7m\2\2\u00ad\u00ac")
buf.write("\3\2\2\2\u00ae\u00af\3\2\2\2\u00af\u00ad\3\2\2\2\u00af")
buf.write("\u00b0\3\2\2\2\u00b0\u00cc\3\2\2\2\u00b1\u00b2\7=\2\2")
buf.write("\u00b2\u00b3\5.\30\2\u00b3\u00b4\7>\2\2\u00b4\u00cc\3")
buf.write("\2\2\2\u00b5\u00cc\5\4\3\2\u00b6\u00b8\7\3\2\2\u00b7\u00b6")
buf.write("\3\2\2\2\u00b7\u00b8\3\2\2\2\u00b8\u00b9\3\2\2\2\u00b9")
buf.write("\u00ba\7=\2\2\u00ba\u00bb\5\u0092J\2\u00bb\u00bc\7>\2")
buf.write("\2\u00bc\u00cc\3\2\2\2\u00bd\u00be\7\4\2\2\u00be\u00bf")
buf.write("\7=\2\2\u00bf\u00c0\5\16\b\2\u00c0\u00c1\7Z\2\2\u00c1")
buf.write("\u00c2\5z>\2\u00c2\u00c3\7>\2\2\u00c3\u00cc\3\2\2\2\u00c4")
buf.write("\u00c5\7\5\2\2\u00c5\u00c6\7=\2\2\u00c6\u00c7\5z>\2\u00c7")
buf.write("\u00c8\7Z\2\2\u00c8\u00c9\5\16\b\2\u00c9\u00ca\7>\2\2")
buf.write("\u00ca\u00cc\3\2\2\2\u00cb\u00aa\3\2\2\2\u00cb\u00ab\3")
buf.write("\2\2\2\u00cb\u00ad\3\2\2\2\u00cb\u00b1\3\2\2\2\u00cb\u00b5")
buf.write("\3\2\2\2\u00cb\u00b7\3\2\2\2\u00cb\u00bd\3\2\2\2\u00cb")
buf.write("\u00c4\3\2\2\2\u00cc\3\3\2\2\2\u00cd\u00ce\78\2\2\u00ce")
buf.write("\u00cf\7=\2\2\u00cf\u00d0\5*\26\2\u00d0\u00d1\7Z\2\2\u00d1")
buf.write("\u00d2\5\6\4\2\u00d2\u00d3\7>\2\2\u00d3\5\3\2\2\2\u00d4")
buf.write("\u00d5\b\4\1\2\u00d5\u00d6\5\b\5\2\u00d6\u00dc\3\2\2\2")
buf.write("\u00d7\u00d8\f\3\2\2\u00d8\u00d9\7Z\2\2\u00d9\u00db\5")
buf.write("\b\5\2\u00da\u00d7\3\2\2\2\u00db\u00de\3\2\2\2\u00dc\u00da")
buf.write("\3\2\2\2\u00dc\u00dd\3\2\2\2\u00dd\7\3\2\2\2\u00de\u00dc")
buf.write("\3\2\2\2\u00df\u00e0\5z>\2\u00e0\u00e1\7X\2\2\u00e1\u00e2")
buf.write("\5*\26\2\u00e2\u00e7\3\2\2\2\u00e3\u00e4\7\27\2\2\u00e4")
buf.write("\u00e5\7X\2\2\u00e5\u00e7\5*\26\2\u00e6\u00df\3\2\2\2")
buf.write("\u00e6\u00e3\3\2\2\2\u00e7\t\3\2\2\2\u00e8\u00e9\b\6\1")
buf.write("\2\u00e9\u010b\5\2\2\2\u00ea\u00eb\7=\2\2\u00eb\u00ec")
buf.write("\5z>\2\u00ec\u00ed\7>\2\2\u00ed\u00ee\7A\2\2\u00ee\u00ef")
buf.write("\5\u0084C\2\u00ef\u00f0\7B\2\2\u00f0\u010b\3\2\2\2\u00f1")
buf.write("\u00f2\7=\2\2\u00f2\u00f3\5z>\2\u00f3\u00f4\7>\2\2\u00f4")
buf.write("\u00f5\7A\2\2\u00f5\u00f6\5\u0084C\2\u00f6\u00f7\7Z\2")
buf.write("\2\u00f7\u00f8\7B\2\2\u00f8\u010b\3\2\2\2\u00f9\u00fa")
buf.write("\7\3\2\2\u00fa\u00fb\7=\2\2\u00fb\u00fc\5z>\2\u00fc\u00fd")
buf.write("\7>\2\2\u00fd\u00fe\7A\2\2\u00fe\u00ff\5\u0084C\2\u00ff")
buf.write("\u0100\7B\2\2\u0100\u010b\3\2\2\2\u0101\u0102\7\3\2\2")
buf.write("\u0102\u0103\7=\2\2\u0103\u0104\5z>\2\u0104\u0105\7>\2")
buf.write("\2\u0105\u0106\7A\2\2\u0106\u0107\5\u0084C\2\u0107\u0108")
buf.write("\7Z\2\2\u0108\u0109\7B\2\2\u0109\u010b\3\2\2\2\u010a\u00e8")
buf.write("\3\2\2\2\u010a\u00ea\3\2\2\2\u010a\u00f1\3\2\2\2\u010a")
buf.write("\u00f9\3\2\2\2\u010a\u0101\3\2\2\2\u010b\u0123\3\2\2\2")
buf.write("\u010c\u010d\f\f\2\2\u010d\u010e\7?\2\2\u010e\u010f\5")
buf.write(".\30\2\u010f\u0110\7@\2\2\u0110\u0122\3\2\2\2\u0111\u0112")
buf.write("\f\13\2\2\u0112\u0114\7=\2\2\u0113\u0115\5\f\7\2\u0114")
buf.write("\u0113\3\2\2\2\u0114\u0115\3\2\2\2\u0115\u0116\3\2\2\2")
buf.write("\u0116\u0122\7>\2\2\u0117\u0118\f\n\2\2\u0118\u0119\7")
buf.write("i\2\2\u0119\u0122\7k\2\2\u011a\u011b\f\t\2\2\u011b\u011c")
buf.write("\7h\2\2\u011c\u0122\7k\2\2\u011d\u011e\f\b\2\2\u011e\u0122")
buf.write("\7J\2\2\u011f\u0120\f\7\2\2\u0120\u0122\7L\2\2\u0121\u010c")
buf.write("\3\2\2\2\u0121\u0111\3\2\2\2\u0121\u0117\3\2\2\2\u0121")
buf.write("\u011a\3\2\2\2\u0121\u011d\3\2\2\2\u0121\u011f\3\2\2\2")
buf.write("\u0122\u0125\3\2\2\2\u0123\u0121\3\2\2\2\u0123\u0124\3")
buf.write("\2\2\2\u0124\13\3\2\2\2\u0125\u0123\3\2\2\2\u0126\u0127")
buf.write("\b\7\1\2\u0127\u0128\5*\26\2\u0128\u012e\3\2\2\2\u0129")
buf.write("\u012a\f\3\2\2\u012a\u012b\7Z\2\2\u012b\u012d\5*\26\2")
buf.write("\u012c\u0129\3\2\2\2\u012d\u0130\3\2\2\2\u012e\u012c\3")
buf.write("\2\2\2\u012e\u012f\3\2\2\2\u012f\r\3\2\2\2\u0130\u012e")
buf.write("\3\2\2\2\u0131\u0148\5\n\6\2\u0132\u0133\7J\2\2\u0133")
buf.write("\u0148\5\16\b\2\u0134\u0135\7L\2\2\u0135\u0148\5\16\b")
buf.write("\2\u0136\u0137\5\20\t\2\u0137\u0138\5\22\n\2\u0138\u0148")
buf.write("\3\2\2\2\u0139\u013a\7)\2\2\u013a\u0148\5\16\b\2\u013b")
buf.write("\u013c\7)\2\2\u013c\u013d\7=\2\2\u013d\u013e\5z>\2\u013e")
buf.write("\u013f\7>\2\2\u013f\u0148\3\2\2\2\u0140\u0141\7\64\2\2")
buf.write("\u0141\u0142\7=\2\2\u0142\u0143\5z>\2\u0143\u0144\7>\2")
buf.write("\2\u0144\u0148\3\2\2\2\u0145\u0146\7R\2\2\u0146\u0148")
buf.write("\7k\2\2\u0147\u0131\3\2\2\2\u0147\u0132\3\2\2\2\u0147")
buf.write("\u0134\3\2\2\2\u0147\u0136\3\2\2\2\u0147\u0139\3\2\2\2")
buf.write("\u0147\u013b\3\2\2\2\u0147\u0140\3\2\2\2\u0147\u0145\3")
buf.write("\2\2\2\u0148\17\3\2\2\2\u0149\u014a\t\2\2\2\u014a\21\3")
buf.write("\2\2\2\u014b\u0158\5\16\b\2\u014c\u014d\7=\2\2\u014d\u014e")
buf.write("\5z>\2\u014e\u014f\7>\2\2\u014f\u0150\5\22\n\2\u0150\u0158")
buf.write("\3\2\2\2\u0151\u0152\7\3\2\2\u0152\u0153\7=\2\2\u0153")
buf.write("\u0154\5z>\2\u0154\u0155\7>\2\2\u0155\u0156\5\22\n\2\u0156")
buf.write("\u0158\3\2\2\2\u0157\u014b\3\2\2\2\u0157\u014c\3\2\2\2")
buf.write("\u0157\u0151\3\2\2\2\u0158\23\3\2\2\2\u0159\u015a\b\13")
buf.write("\1\2\u015a\u015b\5\22\n\2\u015b\u0167\3\2\2\2\u015c\u015d")
buf.write("\f\5\2\2\u015d\u015e\7M\2\2\u015e\u0166\5\22\n\2\u015f")
buf.write("\u0160\f\4\2\2\u0160\u0161\7N\2\2\u0161\u0166\5\22\n\2")
buf.write("\u0162\u0163\f\3\2\2\u0163\u0164\7O\2\2\u0164\u0166\5")
buf.write("\22\n\2\u0165\u015c\3\2\2\2\u0165\u015f\3\2\2\2\u0165")
buf.write("\u0162\3\2\2\2\u0166\u0169\3\2\2\2\u0167\u0165\3\2\2\2")
buf.write("\u0167\u0168\3\2\2\2\u0168\25\3\2\2\2\u0169\u0167\3\2")
buf.write("\2\2\u016a\u016b\b\f\1\2\u016b\u016c\5\24\13\2\u016c\u0175")
buf.write("\3\2\2\2\u016d\u016e\f\4\2\2\u016e\u016f\7I\2\2\u016f")
buf.write("\u0174\5\24\13\2\u0170\u0171\f\3\2\2\u0171\u0172\7K\2")
buf.write("\2\u0172\u0174\5\24\13\2\u0173\u016d\3\2\2\2\u0173\u0170")
buf.write("\3\2\2\2\u0174\u0177\3\2\2\2\u0175\u0173\3\2\2\2\u0175")
buf.write("\u0176\3\2\2\2\u0176\27\3\2\2\2\u0177\u0175\3\2\2\2\u0178")
buf.write("\u0179\b\r\1\2\u0179\u017a\5\26\f\2\u017a\u0183\3\2\2")
buf.write("\2\u017b\u017c\f\4\2\2\u017c\u017d\7G\2\2\u017d\u0182")
buf.write("\5\26\f\2\u017e\u017f\f\3\2\2\u017f\u0180\7H\2\2\u0180")
buf.write("\u0182\5\26\f\2\u0181\u017b\3\2\2\2\u0181\u017e\3\2\2")
buf.write("\2\u0182\u0185\3\2\2\2\u0183\u0181\3\2\2\2\u0183\u0184")
buf.write("\3\2\2\2\u0184\31\3\2\2\2\u0185\u0183\3\2\2\2\u0186\u0187")
buf.write("\b\16\1\2\u0187\u0188\5\30\r\2\u0188\u0197\3\2\2\2\u0189")
buf.write("\u018a\f\6\2\2\u018a\u018b\7C\2\2\u018b\u0196\5\30\r\2")
buf.write("\u018c\u018d\f\5\2\2\u018d\u018e\7E\2\2\u018e\u0196\5")
buf.write("\30\r\2\u018f\u0190\f\4\2\2\u0190\u0191\7D\2\2\u0191\u0196")
buf.write("\5\30\r\2\u0192\u0193\f\3\2\2\u0193\u0194\7F\2\2\u0194")
buf.write("\u0196\5\30\r\2\u0195\u0189\3\2\2\2\u0195\u018c\3\2\2")
buf.write("\2\u0195\u018f\3\2\2\2\u0195\u0192\3\2\2\2\u0196\u0199")
buf.write("\3\2\2\2\u0197\u0195\3\2\2\2\u0197\u0198\3\2\2\2\u0198")
buf.write("\33\3\2\2\2\u0199\u0197\3\2\2\2\u019a\u019b\b\17\1\2\u019b")
buf.write("\u019c\5\32\16\2\u019c\u01a5\3\2\2\2\u019d\u019e\f\4\2")
buf.write("\2\u019e\u019f\7f\2\2\u019f\u01a4\5\32\16\2\u01a0\u01a1")
buf.write("\f\3\2\2\u01a1\u01a2\7g\2\2\u01a2\u01a4\5\32\16\2\u01a3")
buf.write("\u019d\3\2\2\2\u01a3\u01a0\3\2\2\2\u01a4\u01a7\3\2\2\2")
buf.write("\u01a5\u01a3\3\2\2\2\u01a5\u01a6\3\2\2\2\u01a6\35\3\2")
buf.write("\2\2\u01a7\u01a5\3\2\2\2\u01a8\u01a9\b\20\1\2\u01a9\u01aa")
buf.write("\5\34\17\2\u01aa\u01b0\3\2\2\2\u01ab\u01ac\f\3\2\2\u01ac")
buf.write("\u01ad\7P\2\2\u01ad\u01af\5\34\17\2\u01ae\u01ab\3\2\2")
buf.write("\2\u01af\u01b2\3\2\2\2\u01b0\u01ae\3\2\2\2\u01b0\u01b1")
buf.write("\3\2\2\2\u01b1\37\3\2\2\2\u01b2\u01b0\3\2\2\2\u01b3\u01b4")
buf.write("\b\21\1\2\u01b4\u01b5\5\36\20\2\u01b5\u01bb\3\2\2\2\u01b6")
buf.write("\u01b7\f\3\2\2\u01b7\u01b8\7T\2\2\u01b8\u01ba\5\36\20")
buf.write("\2\u01b9\u01b6\3\2\2\2\u01ba\u01bd\3\2\2\2\u01bb\u01b9")
buf.write("\3\2\2\2\u01bb\u01bc\3\2\2\2\u01bc!\3\2\2\2\u01bd\u01bb")
buf.write("\3\2\2\2\u01be\u01bf\b\22\1\2\u01bf\u01c0\5 \21\2\u01c0")
buf.write("\u01c6\3\2\2\2\u01c1\u01c2\f\3\2\2\u01c2\u01c3\7Q\2\2")
buf.write("\u01c3\u01c5\5 \21\2\u01c4\u01c1\3\2\2\2\u01c5\u01c8\3")
buf.write("\2\2\2\u01c6\u01c4\3\2\2\2\u01c6\u01c7\3\2\2\2\u01c7#")
buf.write("\3\2\2\2\u01c8\u01c6\3\2\2\2\u01c9\u01ca\b\23\1\2\u01ca")
buf.write("\u01cb\5\"\22\2\u01cb\u01d1\3\2\2\2\u01cc\u01cd\f\3\2")
buf.write("\2\u01cd\u01ce\7R\2\2\u01ce\u01d0\5\"\22\2\u01cf\u01cc")
buf.write("\3\2\2\2\u01d0\u01d3\3\2\2\2\u01d1\u01cf\3\2\2\2\u01d1")
buf.write("\u01d2\3\2\2\2\u01d2%\3\2\2\2\u01d3\u01d1\3\2\2\2\u01d4")
buf.write("\u01d5\b\24\1\2\u01d5\u01d6\5$\23\2\u01d6\u01dc\3\2\2")
buf.write("\2\u01d7\u01d8\f\3\2\2\u01d8\u01d9\7S\2\2\u01d9\u01db")
buf.write("\5$\23\2\u01da\u01d7\3\2\2\2\u01db\u01de\3\2\2\2\u01dc")
buf.write("\u01da\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd\'\3\2\2\2\u01de")
buf.write("\u01dc\3\2\2\2\u01df\u01e5\5&\24\2\u01e0\u01e1\7W\2\2")
buf.write("\u01e1\u01e2\5.\30\2\u01e2\u01e3\7X\2\2\u01e3\u01e4\5")
buf.write("(\25\2\u01e4\u01e6\3\2\2\2\u01e5\u01e0\3\2\2\2\u01e5\u01e6")
buf.write("\3\2\2\2\u01e6)\3\2\2\2\u01e7\u01ed\5(\25\2\u01e8\u01e9")
buf.write("\5\16\b\2\u01e9\u01ea\5,\27\2\u01ea\u01eb\5*\26\2\u01eb")
buf.write("\u01ed\3\2\2\2\u01ec\u01e7\3\2\2\2\u01ec\u01e8\3\2\2\2")
buf.write("\u01ed+\3\2\2\2\u01ee\u01ef\t\3\2\2\u01ef-\3\2\2\2\u01f0")
buf.write("\u01f1\b\30\1\2\u01f1\u01f2\5*\26\2\u01f2\u01f8\3\2\2")
buf.write("\2\u01f3\u01f4\f\3\2\2\u01f4\u01f5\7Z\2\2\u01f5\u01f7")
buf.write("\5*\26\2\u01f6\u01f3\3\2\2\2\u01f7\u01fa\3\2\2\2\u01f8")
buf.write("\u01f6\3\2\2\2\u01f8\u01f9\3\2\2\2\u01f9/\3\2\2\2\u01fa")
buf.write("\u01f8\3\2\2\2\u01fb\u01fc\5(\25\2\u01fc\61\3\2\2\2\u01fd")
buf.write("\u01ff\5\64\33\2\u01fe\u0200\5:\36\2\u01ff\u01fe\3\2\2")
buf.write("\2\u01ff\u0200\3\2\2\2\u0200\u0201\3\2\2\2\u0201\u0202")
buf.write("\7Y\2\2\u0202\u0205\3\2\2\2\u0203\u0205\5\u008cG\2\u0204")
buf.write("\u01fd\3\2\2\2\u0204\u0203\3\2\2\2\u0205\63\3\2\2\2\u0206")
buf.write("\u0208\58\35\2\u0207\u0206\3\2\2\2\u0208\u0209\3\2\2\2")
buf.write("\u0209\u0207\3\2\2\2\u0209\u020a\3\2\2\2\u020a\65\3\2")
buf.write("\2\2\u020b\u020d\58\35\2\u020c\u020b\3\2\2\2\u020d\u020e")
buf.write("\3\2\2\2\u020e\u020c\3\2\2\2\u020e\u020f\3\2\2\2\u020f")
buf.write("\67\3\2\2\2\u0210\u0216\5> \2\u0211\u0216\5@!\2\u0212")
buf.write("\u0216\5Z.\2\u0213\u0216\5\\/\2\u0214\u0216\5^\60\2\u0215")
buf.write("\u0210\3\2\2\2\u0215\u0211\3\2\2\2\u0215\u0212\3\2\2\2")
buf.write("\u0215\u0213\3\2\2\2\u0215\u0214\3\2\2\2\u02169\3\2\2")
buf.write("\2\u0217\u0218\b\36\1\2\u0218\u0219\5<\37\2\u0219\u021f")
buf.write("\3\2\2\2\u021a\u021b\f\3\2\2\u021b\u021c\7Z\2\2\u021c")
buf.write("\u021e\5<\37\2\u021d\u021a\3\2\2\2\u021e\u0221\3\2\2\2")
buf.write("\u021f\u021d\3\2\2\2\u021f\u0220\3\2\2\2\u0220;\3\2\2")
buf.write("\2\u0221\u021f\3\2\2\2\u0222\u0228\5`\61\2\u0223\u0224")
buf.write("\5`\61\2\u0224\u0225\7[\2\2\u0225\u0226\5\u0082B\2\u0226")
buf.write("\u0228\3\2\2\2\u0227\u0222\3\2\2\2\u0227\u0223\3\2\2\2")
buf.write("\u0228=\3\2\2\2\u0229\u022a\t\4\2\2\u022a?\3\2\2\2\u022b")
buf.write("\u023a\t\5\2\2\u022c\u022d\7\3\2\2\u022d\u022e\7=\2\2")
buf.write("\u022e\u022f\t\6\2\2\u022f\u023a\7>\2\2\u0230\u023a\5")
buf.write("X-\2\u0231\u023a\5B\"\2\u0232\u023a\5P)\2\u0233\u023a")
buf.write("\5\u0080A\2\u0234\u0235\7\t\2\2\u0235\u0236\7=\2\2\u0236")
buf.write("\u0237\5\60\31\2\u0237\u0238\7>\2\2\u0238\u023a\3\2\2")
buf.write("\2\u0239\u022b\3\2\2\2\u0239\u022c\3\2\2\2\u0239\u0230")
buf.write("\3\2\2\2\u0239\u0231\3\2\2\2\u0239\u0232\3\2\2\2\u0239")
buf.write("\u0233\3\2\2\2\u0239\u0234\3\2\2\2\u023aA\3\2\2\2\u023b")
buf.write("\u023d\5D#\2\u023c\u023e\7k\2\2\u023d\u023c\3\2\2\2\u023d")
buf.write("\u023e\3\2\2\2\u023e\u023f\3\2\2\2\u023f\u0240\7A\2\2")
buf.write("\u0240\u0241\5F$\2\u0241\u0242\7B\2\2\u0242\u0247\3\2")
buf.write("\2\2\u0243\u0244\5D#\2\u0244\u0245\7k\2\2\u0245\u0247")
buf.write("\3\2\2\2\u0246\u023b\3\2\2\2\u0246\u0243\3\2\2\2\u0247")
buf.write("C\3\2\2\2\u0248\u0249\t\7\2\2\u0249E\3\2\2\2\u024a\u024b")
buf.write("\b$\1\2\u024b\u024c\5H%\2\u024c\u0251\3\2\2\2\u024d\u024e")
buf.write("\f\3\2\2\u024e\u0250\5H%\2\u024f\u024d\3\2\2\2\u0250\u0253")
buf.write("\3\2\2\2\u0251\u024f\3\2\2\2\u0251\u0252\3\2\2\2\u0252")
buf.write("G\3\2\2\2\u0253\u0251\3\2\2\2\u0254\u0256\5J&\2\u0255")
buf.write("\u0257\5L\'\2\u0256\u0255\3\2\2\2\u0256\u0257\3\2\2\2")
buf.write("\u0257\u0258\3\2\2\2\u0258\u0259\7Y\2\2\u0259\u025c\3")
buf.write("\2\2\2\u025a\u025c\5\u008cG\2\u025b\u0254\3\2\2\2\u025b")
buf.write("\u025a\3\2\2\2\u025cI\3\2\2\2\u025d\u025f\5@!\2\u025e")
buf.write("\u0260\5J&\2\u025f\u025e\3\2\2\2\u025f\u0260\3\2\2\2\u0260")
buf.write("\u0266\3\2\2\2\u0261\u0263\5Z.\2\u0262\u0264\5J&\2\u0263")
buf.write("\u0262\3\2\2\2\u0263\u0264\3\2\2\2\u0264\u0266\3\2\2\2")
buf.write("\u0265\u025d\3\2\2\2\u0265\u0261\3\2\2\2\u0266K\3\2\2")
buf.write("\2\u0267\u0268\b\'\1\2\u0268\u0269\5N(\2\u0269\u026f\3")
buf.write("\2\2\2\u026a\u026b\f\3\2\2\u026b\u026c\7Z\2\2\u026c\u026e")
buf.write("\5N(\2\u026d\u026a\3\2\2\2\u026e\u0271\3\2\2\2\u026f\u026d")
buf.write("\3\2\2\2\u026f\u0270\3\2\2\2\u0270M\3\2\2\2\u0271\u026f")
buf.write("\3\2\2\2\u0272\u0279\5`\61\2\u0273\u0275\5`\61\2\u0274")
buf.write("\u0273\3\2\2\2\u0274\u0275\3\2\2\2\u0275\u0276\3\2\2\2")
buf.write("\u0276\u0277\7X\2\2\u0277\u0279\5\60\31\2\u0278\u0272")
buf.write("\3\2\2\2\u0278\u0274\3\2\2\2\u0279O\3\2\2\2\u027a\u027c")
buf.write("\7\33\2\2\u027b\u027d\7k\2\2\u027c\u027b\3\2\2\2\u027c")
buf.write("\u027d\3\2\2\2\u027d\u027e\3\2\2\2\u027e\u027f\7A\2\2")
buf.write("\u027f\u0280\5R*\2\u0280\u0281\7B\2\2\u0281\u028e\3\2")
buf.write("\2\2\u0282\u0284\7\33\2\2\u0283\u0285\7k\2\2\u0284\u0283")
buf.write("\3\2\2\2\u0284\u0285\3\2\2\2\u0285\u0286\3\2\2\2\u0286")
buf.write("\u0287\7A\2\2\u0287\u0288\5R*\2\u0288\u0289\7Z\2\2\u0289")
buf.write("\u028a\7B\2\2\u028a\u028e\3\2\2\2\u028b\u028c\7\33\2\2")
buf.write("\u028c\u028e\7k\2\2\u028d\u027a\3\2\2\2\u028d\u0282\3")
buf.write("\2\2\2\u028d\u028b\3\2\2\2\u028eQ\3\2\2\2\u028f\u0290")
buf.write("\b*\1\2\u0290\u0291\5T+\2\u0291\u0297\3\2\2\2\u0292\u0293")
buf.write("\f\3\2\2\u0293\u0294\7Z\2\2\u0294\u0296\5T+\2\u0295\u0292")
buf.write("\3\2\2\2\u0296\u0299\3\2\2\2\u0297\u0295\3\2\2\2\u0297")
buf.write("\u0298\3\2\2\2\u0298S\3\2\2\2\u0299\u0297\3\2\2\2\u029a")
buf.write("\u02a0\5V,\2\u029b\u029c\5V,\2\u029c\u029d\7[\2\2\u029d")
buf.write("\u029e\5\60\31\2\u029e\u02a0\3\2\2\2\u029f\u029a\3\2\2")
buf.write("\2\u029f\u029b\3\2\2\2\u02a0U\3\2\2\2\u02a1\u02a2\7k\2")
buf.write("\2\u02a2W\3\2\2\2\u02a3\u02a4\7\65\2\2\u02a4\u02a5\7=")
buf.write("\2\2\u02a5\u02a6\5z>\2\u02a6\u02a7\7>\2\2\u02a7Y\3\2\2")
buf.write("\2\u02a8\u02a9\t\b\2\2\u02a9[\3\2\2\2\u02aa\u02b1\t\t")
buf.write("\2\2\u02ab\u02b1\5f\64\2\u02ac\u02ad\7\f\2\2\u02ad\u02ae")
buf.write("\7=\2\2\u02ae\u02af\7k\2\2\u02af\u02b1\7>\2\2\u02b0\u02aa")
buf.write("\3\2\2\2\u02b0\u02ab\3\2\2\2\u02b0\u02ac\3\2\2\2\u02b1")
buf.write("]\3\2\2\2\u02b2\u02b3\7\63\2\2\u02b3\u02b4\7=\2\2\u02b4")
buf.write("\u02b5\5z>\2\u02b5\u02b6\7>\2\2\u02b6\u02bd\3\2\2\2\u02b7")
buf.write("\u02b8\7\63\2\2\u02b8\u02b9\7=\2\2\u02b9\u02ba\5\60\31")
buf.write("\2\u02ba\u02bb\7>\2\2\u02bb\u02bd\3\2\2\2\u02bc\u02b2")
buf.write("\3\2\2\2\u02bc\u02b7\3\2\2\2\u02bd_\3\2\2\2\u02be\u02c0")
buf.write("\5n8\2\u02bf\u02be\3\2\2\2\u02bf\u02c0\3\2\2\2\u02c0\u02c1")
buf.write("\3\2\2\2\u02c1\u02c5\5b\62\2\u02c2\u02c4\5d\63\2\u02c3")
buf.write("\u02c2\3\2\2\2\u02c4\u02c7\3\2\2\2\u02c5\u02c3\3\2\2\2")
buf.write("\u02c5\u02c6\3\2\2\2\u02c6a\3\2\2\2\u02c7\u02c5\3\2\2")
buf.write("\2\u02c8\u02c9\b\62\1\2\u02c9\u02cf\7k\2\2\u02ca\u02cb")
buf.write("\7=\2\2\u02cb\u02cc\5`\61\2\u02cc\u02cd\7>\2\2\u02cd\u02cf")
buf.write("\3\2\2\2\u02ce\u02c8\3\2\2\2\u02ce\u02ca\3\2\2\2\u02cf")
buf.write("\u02fd\3\2\2\2\u02d0\u02d1\f\b\2\2\u02d1\u02d3\7?\2\2")
buf.write("\u02d2\u02d4\5p9\2\u02d3\u02d2\3\2\2\2\u02d3\u02d4\3\2")
buf.write("\2\2\u02d4\u02d6\3\2\2\2\u02d5\u02d7\5*\26\2\u02d6\u02d5")
buf.write("\3\2\2\2\u02d6\u02d7\3\2\2\2\u02d7\u02d8\3\2\2\2\u02d8")
buf.write("\u02fc\7@\2\2\u02d9\u02da\f\7\2\2\u02da\u02db\7?\2\2\u02db")
buf.write("\u02dd\7*\2\2\u02dc\u02de\5p9\2\u02dd\u02dc\3\2\2\2\u02dd")
buf.write("\u02de\3\2\2\2\u02de\u02df\3\2\2\2\u02df\u02e0\5*\26\2")
buf.write("\u02e0\u02e1\7@\2\2\u02e1\u02fc\3\2\2\2\u02e2\u02e3\f")
buf.write("\6\2\2\u02e3\u02e4\7?\2\2\u02e4\u02e5\5p9\2\u02e5\u02e6")
buf.write("\7*\2\2\u02e6\u02e7\5*\26\2\u02e7\u02e8\7@\2\2\u02e8\u02fc")
buf.write("\3\2\2\2\u02e9\u02ea\f\5\2\2\u02ea\u02ec\7?\2\2\u02eb")
buf.write("\u02ed\5p9\2\u02ec\u02eb\3\2\2\2\u02ec\u02ed\3\2\2\2\u02ed")
buf.write("\u02ee\3\2\2\2\u02ee\u02ef\7M\2\2\u02ef\u02fc\7@\2\2\u02f0")
buf.write("\u02f1\f\4\2\2\u02f1\u02f2\7=\2\2\u02f2\u02f3\5r:\2\u02f3")
buf.write("\u02f4\7>\2\2\u02f4\u02fc\3\2\2\2\u02f5\u02f6\f\3\2\2")
buf.write("\u02f6\u02f8\7=\2\2\u02f7\u02f9\5x=\2\u02f8\u02f7\3\2")
buf.write("\2\2\u02f8\u02f9\3\2\2\2\u02f9\u02fa\3\2\2\2\u02fa\u02fc")
buf.write("\7>\2\2\u02fb\u02d0\3\2\2\2\u02fb\u02d9\3\2\2\2\u02fb")
buf.write("\u02e2\3\2\2\2\u02fb\u02e9\3\2\2\2\u02fb\u02f0\3\2\2\2")
buf.write("\u02fb\u02f5\3\2\2\2\u02fc\u02ff\3\2\2\2\u02fd\u02fb\3")
buf.write("\2\2\2\u02fd\u02fe\3\2\2\2\u02fec\3\2\2\2\u02ff\u02fd")
buf.write("\3\2\2\2\u0300\u0301\7\r\2\2\u0301\u0303\7=\2\2\u0302")
buf.write("\u0304\7m\2\2\u0303\u0302\3\2\2\2\u0304\u0305\3\2\2\2")
buf.write("\u0305\u0303\3\2\2\2\u0305\u0306\3\2\2\2\u0306\u0307\3")
buf.write("\2\2\2\u0307\u030a\7>\2\2\u0308\u030a\5f\64\2\u0309\u0300")
buf.write("\3\2\2\2\u0309\u0308\3\2\2\2\u030ae\3\2\2\2\u030b\u030c")
buf.write("\7\16\2\2\u030c\u030d\7=\2\2\u030d\u030e\7=\2\2\u030e")
buf.write("\u030f\5h\65\2\u030f\u0310\7>\2\2\u0310\u0311\7>\2\2\u0311")
buf.write("g\3\2\2\2\u0312\u0317\5j\66\2\u0313\u0314\7Z\2\2\u0314")
buf.write("\u0316\5j\66\2\u0315\u0313\3\2\2\2\u0316\u0319\3\2\2\2")
buf.write("\u0317\u0315\3\2\2\2\u0317\u0318\3\2\2\2\u0318\u031c\3")
buf.write("\2\2\2\u0319\u0317\3\2\2\2\u031a\u031c\3\2\2\2\u031b\u0312")
buf.write("\3\2\2\2\u031b\u031a\3\2\2\2\u031ci\3\2\2\2\u031d\u0323")
buf.write("\n\n\2\2\u031e\u0320\7=\2\2\u031f\u0321\5\f\7\2\u0320")
buf.write("\u031f\3\2\2\2\u0320\u0321\3\2\2\2\u0321\u0322\3\2\2\2")
buf.write("\u0322\u0324\7>\2\2\u0323\u031e\3\2\2\2\u0323\u0324\3")
buf.write("\2\2\2\u0324\u0327\3\2\2\2\u0325\u0327\3\2\2\2\u0326\u031d")
buf.write("\3\2\2\2\u0326\u0325\3\2\2\2\u0327k\3\2\2\2\u0328\u032e")
buf.write("\n\13\2\2\u0329\u032a\7=\2\2\u032a\u032b\5l\67\2\u032b")
buf.write("\u032c\7>\2\2\u032c\u032e\3\2\2\2\u032d\u0328\3\2\2\2")
buf.write("\u032d\u0329\3\2\2\2\u032e\u0331\3\2\2\2\u032f\u032d\3")
buf.write("\2\2\2\u032f\u0330\3\2\2\2\u0330m\3\2\2\2\u0331\u032f")
buf.write("\3\2\2\2\u0332\u0334\7M\2\2\u0333\u0335\5p9\2\u0334\u0333")
buf.write("\3\2\2\2\u0334\u0335\3\2\2\2\u0335\u0345\3\2\2\2\u0336")
buf.write("\u0338\7M\2\2\u0337\u0339\5p9\2\u0338\u0337\3\2\2\2\u0338")
buf.write("\u0339\3\2\2\2\u0339\u033a\3\2\2\2\u033a\u0345\5n8\2\u033b")
buf.write("\u033d\7T\2\2\u033c\u033e\5p9\2\u033d\u033c\3\2\2\2\u033d")
buf.write("\u033e\3\2\2\2\u033e\u0345\3\2\2\2\u033f\u0341\7T\2\2")
buf.write("\u0340\u0342\5p9\2\u0341\u0340\3\2\2\2\u0341\u0342\3\2")
buf.write("\2\2\u0342\u0343\3\2\2\2\u0343\u0345\5n8\2\u0344\u0332")
buf.write("\3\2\2\2\u0344\u0336\3\2\2\2\u0344\u033b\3\2\2\2\u0344")
buf.write("\u033f\3\2\2\2\u0345o\3\2\2\2\u0346\u0347\b9\1\2\u0347")
buf.write("\u0348\5Z.\2\u0348\u034d\3\2\2\2\u0349\u034a\f\3\2\2\u034a")
buf.write("\u034c\5Z.\2\u034b\u0349\3\2\2\2\u034c\u034f\3\2\2\2\u034d")
buf.write("\u034b\3\2\2\2\u034d\u034e\3\2\2\2\u034eq\3\2\2\2\u034f")
buf.write("\u034d\3\2\2\2\u0350\u0356\5t;\2\u0351\u0352\5t;\2\u0352")
buf.write("\u0353\7Z\2\2\u0353\u0354\7j\2\2\u0354\u0356\3\2\2\2\u0355")
buf.write("\u0350\3\2\2\2\u0355\u0351\3\2\2\2\u0356s\3\2\2\2\u0357")
buf.write("\u0358\b;\1\2\u0358\u0359\5v<\2\u0359\u035f\3\2\2\2\u035a")
buf.write("\u035b\f\3\2\2\u035b\u035c\7Z\2\2\u035c\u035e\5v<\2\u035d")
buf.write("\u035a\3\2\2\2\u035e\u0361\3\2\2\2\u035f\u035d\3\2\2\2")
buf.write("\u035f\u0360\3\2\2\2\u0360u\3\2\2\2\u0361\u035f\3\2\2")
buf.write("\2\u0362\u0363\5\64\33\2\u0363\u0364\5`\61\2\u0364\u036a")
buf.write("\3\2\2\2\u0365\u0367\5\66\34\2\u0366\u0368\5|?\2\u0367")
buf.write("\u0366\3\2\2\2\u0367\u0368\3\2\2\2\u0368\u036a\3\2\2\2")
buf.write("\u0369\u0362\3\2\2\2\u0369\u0365\3\2\2\2\u036aw\3\2\2")
buf.write("\2\u036b\u036c\b=\1\2\u036c\u036d\7k\2\2\u036d\u0373\3")
buf.write("\2\2\2\u036e\u036f\f\3\2\2\u036f\u0370\7Z\2\2\u0370\u0372")
buf.write("\7k\2\2\u0371\u036e\3\2\2\2\u0372\u0375\3\2\2\2\u0373")
buf.write("\u0371\3\2\2\2\u0373\u0374\3\2\2\2\u0374y\3\2\2\2\u0375")
buf.write("\u0373\3\2\2\2\u0376\u0378\5J&\2\u0377\u0379\5|?\2\u0378")
buf.write("\u0377\3\2\2\2\u0378\u0379\3\2\2\2\u0379{\3\2\2\2\u037a")
buf.write("\u0386\5n8\2\u037b\u037d\5n8\2\u037c\u037b\3\2\2\2\u037c")
buf.write("\u037d\3\2\2\2\u037d\u037e\3\2\2\2\u037e\u0382\5~@\2\u037f")
buf.write("\u0381\5d\63\2\u0380\u037f\3\2\2\2\u0381\u0384\3\2\2\2")
buf.write("\u0382\u0380\3\2\2\2\u0382\u0383\3\2\2\2\u0383\u0386\3")
buf.write("\2\2\2\u0384\u0382\3\2\2\2\u0385\u037a\3\2\2\2\u0385\u037c")
buf.write("\3\2\2\2\u0386}\3\2\2\2\u0387\u0388\b@\1\2\u0388\u0389")
buf.write("\7=\2\2\u0389\u038a\5|?\2\u038a\u038e\7>\2\2\u038b\u038d")
buf.write("\5d\63\2\u038c\u038b\3\2\2\2\u038d\u0390\3\2\2\2\u038e")
buf.write("\u038c\3\2\2\2\u038e\u038f\3\2\2\2\u038f\u03b6\3\2\2\2")
buf.write("\u0390\u038e\3\2\2\2\u0391\u0393\7?\2\2\u0392\u0394\5")
buf.write("p9\2\u0393\u0392\3\2\2\2\u0393\u0394\3\2\2\2\u0394\u0396")
buf.write("\3\2\2\2\u0395\u0397\5*\26\2\u0396\u0395\3\2\2\2\u0396")
buf.write("\u0397\3\2\2\2\u0397\u0398\3\2\2\2\u0398\u03b6\7@\2\2")
buf.write("\u0399\u039a\7?\2\2\u039a\u039c\7*\2\2\u039b\u039d\5p")
buf.write("9\2\u039c\u039b\3\2\2\2\u039c\u039d\3\2\2\2\u039d\u039e")
buf.write("\3\2\2\2\u039e\u039f\5*\26\2\u039f\u03a0\7@\2\2\u03a0")
buf.write("\u03b6\3\2\2\2\u03a1\u03a2\7?\2\2\u03a2\u03a3\5p9\2\u03a3")
buf.write("\u03a4\7*\2\2\u03a4\u03a5\5*\26\2\u03a5\u03a6\7@\2\2\u03a6")
buf.write("\u03b6\3\2\2\2\u03a7\u03a8\7?\2\2\u03a8\u03a9\7M\2\2\u03a9")
buf.write("\u03b6\7@\2\2\u03aa\u03ac\7=\2\2\u03ab\u03ad\5r:\2\u03ac")
buf.write("\u03ab\3\2\2\2\u03ac\u03ad\3\2\2\2\u03ad\u03ae\3\2\2\2")
buf.write("\u03ae\u03b2\7>\2\2\u03af\u03b1\5d\63\2\u03b0\u03af\3")
buf.write("\2\2\2\u03b1\u03b4\3\2\2\2\u03b2\u03b0\3\2\2\2\u03b2\u03b3")
buf.write("\3\2\2\2\u03b3\u03b6\3\2\2\2\u03b4\u03b2\3\2\2\2\u03b5")
buf.write("\u0387\3\2\2\2\u03b5\u0391\3\2\2\2\u03b5\u0399\3\2\2\2")
buf.write("\u03b5\u03a1\3\2\2\2\u03b5\u03a7\3\2\2\2\u03b5\u03aa\3")
buf.write("\2\2\2\u03b6\u03e2\3\2\2\2\u03b7\u03b8\f\7\2\2\u03b8\u03ba")
buf.write("\7?\2\2\u03b9\u03bb\5p9\2\u03ba\u03b9\3\2\2\2\u03ba\u03bb")
buf.write("\3\2\2\2\u03bb\u03bd\3\2\2\2\u03bc\u03be\5*\26\2\u03bd")
buf.write("\u03bc\3\2\2\2\u03bd\u03be\3\2\2\2\u03be\u03bf\3\2\2\2")
buf.write("\u03bf\u03e1\7@\2\2\u03c0\u03c1\f\6\2\2\u03c1\u03c2\7")
buf.write("?\2\2\u03c2\u03c4\7*\2\2\u03c3\u03c5\5p9\2\u03c4\u03c3")
buf.write("\3\2\2\2\u03c4\u03c5\3\2\2\2\u03c5\u03c6\3\2\2\2\u03c6")
buf.write("\u03c7\5*\26\2\u03c7\u03c8\7@\2\2\u03c8\u03e1\3\2\2\2")
buf.write("\u03c9\u03ca\f\5\2\2\u03ca\u03cb\7?\2\2\u03cb\u03cc\5")
buf.write("p9\2\u03cc\u03cd\7*\2\2\u03cd\u03ce\5*\26\2\u03ce\u03cf")
buf.write("\7@\2\2\u03cf\u03e1\3\2\2\2\u03d0\u03d1\f\4\2\2\u03d1")
buf.write("\u03d2\7?\2\2\u03d2\u03d3\7M\2\2\u03d3\u03e1\7@\2\2\u03d4")
buf.write("\u03d5\f\3\2\2\u03d5\u03d7\7=\2\2\u03d6\u03d8\5r:\2\u03d7")
buf.write("\u03d6\3\2\2\2\u03d7\u03d8\3\2\2\2\u03d8\u03d9\3\2\2\2")
buf.write("\u03d9\u03dd\7>\2\2\u03da\u03dc\5d\63\2\u03db\u03da\3")
buf.write("\2\2\2\u03dc\u03df\3\2\2\2\u03dd\u03db\3\2\2\2\u03dd\u03de")
buf.write("\3\2\2\2\u03de\u03e1\3\2\2\2\u03df\u03dd\3\2\2\2\u03e0")
buf.write("\u03b7\3\2\2\2\u03e0\u03c0\3\2\2\2\u03e0\u03c9\3\2\2\2")
buf.write("\u03e0\u03d0\3\2\2\2\u03e0\u03d4\3\2\2\2\u03e1\u03e4\3")
buf.write("\2\2\2\u03e2\u03e0\3\2\2\2\u03e2\u03e3\3\2\2\2\u03e3\177")
buf.write("\3\2\2\2\u03e4\u03e2\3\2\2\2\u03e5\u03e6\7k\2\2\u03e6")
buf.write("\u0081\3\2\2\2\u03e7\u03f2\5*\26\2\u03e8\u03e9\7A\2\2")
buf.write("\u03e9\u03ea\5\u0084C\2\u03ea\u03eb\7B\2\2\u03eb\u03f2")
buf.write("\3\2\2\2\u03ec\u03ed\7A\2\2\u03ed\u03ee\5\u0084C\2\u03ee")
buf.write("\u03ef\7Z\2\2\u03ef\u03f0\7B\2\2\u03f0\u03f2\3\2\2\2\u03f1")
buf.write("\u03e7\3\2\2\2\u03f1\u03e8\3\2\2\2\u03f1\u03ec\3\2\2\2")
buf.write("\u03f2\u0083\3\2\2\2\u03f3\u03f5\bC\1\2\u03f4\u03f6\5")
buf.write("\u0086D\2\u03f5\u03f4\3\2\2\2\u03f5\u03f6\3\2\2\2\u03f6")
buf.write("\u03f7\3\2\2\2\u03f7\u03f8\5\u0082B\2\u03f8\u0401\3\2")
buf.write("\2\2\u03f9\u03fa\f\3\2\2\u03fa\u03fc\7Z\2\2\u03fb\u03fd")
buf.write("\5\u0086D\2\u03fc\u03fb\3\2\2\2\u03fc\u03fd\3\2\2\2\u03fd")
buf.write("\u03fe\3\2\2\2\u03fe\u0400\5\u0082B\2\u03ff\u03f9\3\2")
buf.write("\2\2\u0400\u0403\3\2\2\2\u0401\u03ff\3\2\2\2\u0401\u0402")
buf.write("\3\2\2\2\u0402\u0085\3\2\2\2\u0403\u0401\3\2\2\2\u0404")
buf.write("\u0405\5\u0088E\2\u0405\u0406\7[\2\2\u0406\u0087\3\2\2")
buf.write("\2\u0407\u0408\bE\1\2\u0408\u0409\5\u008aF\2\u0409\u040e")
buf.write("\3\2\2\2\u040a\u040b\f\3\2\2\u040b\u040d\5\u008aF\2\u040c")
buf.write("\u040a\3\2\2\2\u040d\u0410\3\2\2\2\u040e\u040c\3\2\2\2")
buf.write("\u040e\u040f\3\2\2\2\u040f\u0089\3\2\2\2\u0410\u040e\3")
buf.write("\2\2\2\u0411\u0412\7?\2\2\u0412\u0413\5\60\31\2\u0413")
buf.write("\u0414\7@\2\2\u0414\u0418\3\2\2\2\u0415\u0416\7i\2\2\u0416")
buf.write("\u0418\7k\2\2\u0417\u0411\3\2\2\2\u0417\u0415\3\2\2\2")
buf.write("\u0418\u008b\3\2\2\2\u0419\u041a\7;\2\2\u041a\u041b\7")
buf.write("=\2\2\u041b\u041c\5\60\31\2\u041c\u041e\7Z\2\2\u041d\u041f")
buf.write("\7m\2\2\u041e\u041d\3\2\2\2\u041f\u0420\3\2\2\2\u0420")
buf.write("\u041e\3\2\2\2\u0420\u0421\3\2\2\2\u0421\u0422\3\2\2\2")
buf.write("\u0422\u0423\7>\2\2\u0423\u0424\7Y\2\2\u0424\u008d\3\2")
buf.write("\2\2\u0425\u044b\5\u0090I\2\u0426\u044b\5\u0092J\2\u0427")
buf.write("\u044b\5\u0098M\2\u0428\u044b\5\u009aN\2\u0429\u044b\5")
buf.write("\u009cO\2\u042a\u044b\5\u009eP\2\u042b\u042c\t\f\2\2\u042c")
buf.write("\u042d\t\r\2\2\u042d\u0436\7=\2\2\u042e\u0433\5&\24\2")
buf.write("\u042f\u0430\7Z\2\2\u0430\u0432\5&\24\2\u0431\u042f\3")
buf.write("\2\2\2\u0432\u0435\3\2\2\2\u0433\u0431\3\2\2\2\u0433\u0434")
buf.write("\3\2\2\2\u0434\u0437\3\2\2\2\u0435\u0433\3\2\2\2\u0436")
buf.write("\u042e\3\2\2\2\u0436\u0437\3\2\2\2\u0437\u0445\3\2\2\2")
buf.write("\u0438\u0441\7X\2\2\u0439\u043e\5&\24\2\u043a\u043b\7")
buf.write("Z\2\2\u043b\u043d\5&\24\2\u043c\u043a\3\2\2\2\u043d\u0440")
buf.write("\3\2\2\2\u043e\u043c\3\2\2\2\u043e\u043f\3\2\2\2\u043f")
buf.write("\u0442\3\2\2\2\u0440\u043e\3\2\2\2\u0441\u0439\3\2\2\2")
buf.write("\u0441\u0442\3\2\2\2\u0442\u0444\3\2\2\2\u0443\u0438\3")
buf.write("\2\2\2\u0444\u0447\3\2\2\2\u0445\u0443\3\2\2\2\u0445\u0446")
buf.write("\3\2\2\2\u0446\u0448\3\2\2\2\u0447\u0445\3\2\2\2\u0448")
buf.write("\u0449\7>\2\2\u0449\u044b\7Y\2\2\u044a\u0425\3\2\2\2\u044a")
buf.write("\u0426\3\2\2\2\u044a\u0427\3\2\2\2\u044a\u0428\3\2\2\2")
buf.write("\u044a\u0429\3\2\2\2\u044a\u042a\3\2\2\2\u044a\u042b\3")
buf.write("\2\2\2\u044b\u008f\3\2\2\2\u044c\u044d\7k\2\2\u044d\u044e")
buf.write("\7X\2\2\u044e\u0458\5\u008eH\2\u044f\u0450\7\23\2\2\u0450")
buf.write("\u0451\5\60\31\2\u0451\u0452\7X\2\2\u0452\u0453\5\u008e")
buf.write("H\2\u0453\u0458\3\2\2\2\u0454\u0455\7\27\2\2\u0455\u0456")
buf.write("\7X\2\2\u0456\u0458\5\u008eH\2\u0457\u044c\3\2\2\2\u0457")
buf.write("\u044f\3\2\2\2\u0457\u0454\3\2\2\2\u0458\u0091\3\2\2\2")
buf.write("\u0459\u045b\7A\2\2\u045a\u045c\5\u0094K\2\u045b\u045a")
buf.write("\3\2\2\2\u045b\u045c\3\2\2\2\u045c\u045d\3\2\2\2\u045d")
buf.write("\u045e\7B\2\2\u045e\u0093\3\2\2\2\u045f\u0460\bK\1\2\u0460")
buf.write("\u0461\5\u0096L\2\u0461\u0466\3\2\2\2\u0462\u0463\f\3")
buf.write("\2\2\u0463\u0465\5\u0096L\2\u0464\u0462\3\2\2\2\u0465")
buf.write("\u0468\3\2\2\2\u0466\u0464\3\2\2\2\u0466\u0467\3\2\2\2")
buf.write("\u0467\u0095\3\2\2\2\u0468\u0466\3\2\2\2\u0469\u046c\5")
buf.write("\62\32\2\u046a\u046c\5\u008eH\2\u046b\u0469\3\2\2\2\u046b")
buf.write("\u046a\3\2\2\2\u046c\u0097\3\2\2\2\u046d\u046f\5.\30\2")
buf.write("\u046e\u046d\3\2\2\2\u046e\u046f\3\2\2\2\u046f\u0470\3")
buf.write("\2\2\2\u0470\u0471\7Y\2\2\u0471\u0099\3\2\2\2\u0472\u0473")
buf.write("\7 \2\2\u0473\u0474\7=\2\2\u0474\u0475\5.\30\2\u0475\u0476")
buf.write("\7>\2\2\u0476\u0479\5\u008eH\2\u0477\u0478\7\32\2\2\u0478")
buf.write("\u047a\5\u008eH\2\u0479\u0477\3\2\2\2\u0479\u047a\3\2")
buf.write("\2\2\u047a\u0482\3\2\2\2\u047b\u047c\7,\2\2\u047c\u047d")
buf.write("\7=\2\2\u047d\u047e\5.\30\2\u047e\u047f\7>\2\2\u047f\u0480")
buf.write("\5\u008eH\2\u0480\u0482\3\2\2\2\u0481\u0472\3\2\2\2\u0481")
buf.write("\u047b\3\2\2\2\u0482\u009b\3\2\2\2\u0483\u0484\7\62\2")
buf.write("\2\u0484\u0485\7=\2\2\u0485\u0486\5.\30\2\u0486\u0487")
buf.write("\7>\2\2\u0487\u0488\5\u008eH\2\u0488\u04ae\3\2\2\2\u0489")
buf.write("\u048a\7\30\2\2\u048a\u048b\5\u008eH\2\u048b\u048c\7\62")
buf.write("\2\2\u048c\u048d\7=\2\2\u048d\u048e\5.\30\2\u048e\u048f")
buf.write("\7>\2\2\u048f\u0490\7Y\2\2\u0490\u04ae\3\2\2\2\u0491\u0492")
buf.write("\7\36\2\2\u0492\u0494\7=\2\2\u0493\u0495\5.\30\2\u0494")
buf.write("\u0493\3\2\2\2\u0494\u0495\3\2\2\2\u0495\u0496\3\2\2\2")
buf.write("\u0496\u0498\7Y\2\2\u0497\u0499\5.\30\2\u0498\u0497\3")
buf.write("\2\2\2\u0498\u0499\3\2\2\2\u0499\u049a\3\2\2\2\u049a\u049c")
buf.write("\7Y\2\2\u049b\u049d\5.\30\2\u049c\u049b\3\2\2\2\u049c")
buf.write("\u049d\3\2\2\2\u049d\u049e\3\2\2\2\u049e\u049f\7>\2\2")
buf.write("\u049f\u04ae\5\u008eH\2\u04a0\u04a1\7\36\2\2\u04a1\u04a2")
buf.write("\7=\2\2\u04a2\u04a4\5\62\32\2\u04a3\u04a5\5.\30\2\u04a4")
buf.write("\u04a3\3\2\2\2\u04a4\u04a5\3\2\2\2\u04a5\u04a6\3\2\2\2")
buf.write("\u04a6\u04a8\7Y\2\2\u04a7\u04a9\5.\30\2\u04a8\u04a7\3")
buf.write("\2\2\2\u04a8\u04a9\3\2\2\2\u04a9\u04aa\3\2\2\2\u04aa\u04ab")
buf.write("\7>\2\2\u04ab\u04ac\5\u008eH\2\u04ac\u04ae\3\2\2\2\u04ad")
buf.write("\u0483\3\2\2\2\u04ad\u0489\3\2\2\2\u04ad\u0491\3\2\2\2")
buf.write("\u04ad\u04a0\3\2\2\2\u04ae\u009d\3\2\2\2\u04af\u04b0\7")
buf.write("\37\2\2\u04b0\u04b1\7k\2\2\u04b1\u04c0\7Y\2\2\u04b2\u04b3")
buf.write("\7\26\2\2\u04b3\u04c0\7Y\2\2\u04b4\u04b5\7\22\2\2\u04b5")
buf.write("\u04c0\7Y\2\2\u04b6\u04b8\7&\2\2\u04b7\u04b9\5.\30\2\u04b8")
buf.write("\u04b7\3\2\2\2\u04b8\u04b9\3\2\2\2\u04b9\u04ba\3\2\2\2")
buf.write("\u04ba\u04c0\7Y\2\2\u04bb\u04bc\7\37\2\2\u04bc\u04bd\5")
buf.write("\16\b\2\u04bd\u04be\7Y\2\2\u04be\u04c0\3\2\2\2\u04bf\u04af")
buf.write("\3\2\2\2\u04bf\u04b2\3\2\2\2\u04bf\u04b4\3\2\2\2\u04bf")
buf.write("\u04b6\3\2\2\2\u04bf\u04bb\3\2\2\2\u04c0\u009f\3\2\2\2")
buf.write("\u04c1\u04c3\5\u00a2R\2\u04c2\u04c1\3\2\2\2\u04c2\u04c3")
buf.write("\3\2\2\2\u04c3\u04c4\3\2\2\2\u04c4\u04c5\7\2\2\3\u04c5")
buf.write("\u00a1\3\2\2\2\u04c6\u04c7\bR\1\2\u04c7\u04c8\5\u00a4")
buf.write("S\2\u04c8\u04cd\3\2\2\2\u04c9\u04ca\f\3\2\2\u04ca\u04cc")
buf.write("\5\u00a4S\2\u04cb\u04c9\3\2\2\2\u04cc\u04cf\3\2\2\2\u04cd")
buf.write("\u04cb\3\2\2\2\u04cd\u04ce\3\2\2\2\u04ce\u00a3\3\2\2\2")
buf.write("\u04cf\u04cd\3\2\2\2\u04d0\u04d4\5\u00a6T\2\u04d1\u04d4")
buf.write("\5\62\32\2\u04d2\u04d4\7Y\2\2\u04d3\u04d0\3\2\2\2\u04d3")
buf.write("\u04d1\3\2\2\2\u04d3\u04d2\3\2\2\2\u04d4\u00a5\3\2\2\2")
buf.write("\u04d5\u04d7\5\64\33\2\u04d6\u04d5\3\2\2\2\u04d6\u04d7")
buf.write("\3\2\2\2\u04d7\u04d8\3\2\2\2\u04d8\u04da\5`\61\2\u04d9")
buf.write("\u04db\5\u00a8U\2\u04da\u04d9\3\2\2\2\u04da\u04db\3\2")
buf.write("\2\2\u04db\u04dc\3\2\2\2\u04dc\u04dd\5\u0092J\2\u04dd")
buf.write("\u00a7\3\2\2\2\u04de\u04df\bU\1\2\u04df\u04e0\5\62\32")
buf.write("\2\u04e0\u04e5\3\2\2\2\u04e1\u04e2\f\3\2\2\u04e2\u04e4")
buf.write("\5\62\32\2\u04e3\u04e1\3\2\2\2\u04e4\u04e7\3\2\2\2\u04e5")
buf.write("\u04e3\3\2\2\2\u04e5\u04e6\3\2\2\2\u04e6\u00a9\3\2\2\2")
buf.write("\u04e7\u04e5\3\2\2\2\u008c\u00af\u00b7\u00cb\u00dc\u00e6")
buf.write("\u010a\u0114\u0121\u0123\u012e\u0147\u0157\u0165\u0167")
buf.write("\u0173\u0175\u0181\u0183\u0195\u0197\u01a3\u01a5\u01b0")
buf.write("\u01bb\u01c6\u01d1\u01dc\u01e5\u01ec\u01f8\u01ff\u0204")
buf.write("\u0209\u020e\u0215\u021f\u0227\u0239\u023d\u0246\u0251")
buf.write("\u0256\u025b\u025f\u0263\u0265\u026f\u0274\u0278\u027c")
buf.write("\u0284\u028d\u0297\u029f\u02b0\u02bc\u02bf\u02c5\u02ce")
buf.write("\u02d3\u02d6\u02dd\u02ec\u02f8\u02fb\u02fd\u0305\u0309")
buf.write("\u0317\u031b\u0320\u0323\u0326\u032d\u032f\u0334\u0338")
buf.write("\u033d\u0341\u0344\u034d\u0355\u035f\u0367\u0369\u0373")
buf.write("\u0378\u037c\u0382\u0385\u038e\u0393\u0396\u039c\u03ac")
buf.write("\u03b2\u03b5\u03ba\u03bd\u03c4\u03d7\u03dd\u03e0\u03e2")
buf.write("\u03f1\u03f5\u03fc\u0401\u040e\u0417\u0420\u0433\u0436")
buf.write("\u043e\u0441\u0445\u044a\u0457\u045b\u0466\u046b\u046e")
buf.write("\u0479\u0481\u0494\u0498\u049c\u04a4\u04a8\u04ad\u04b8")
buf.write("\u04bf\u04c2\u04cd\u04d3\u04d6\u04da\u04e5")
return buf.getvalue()
class CParser ( Parser ):
grammarFileName = "C.bnf"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "", "'__extension__'", "'__builtin_va_arg'",
"'__builtin_offsetof'", "'__m128'", "'__m128d'", "'__m128i'",
"'__typeof__'", "'__inline__'", "'__stdcall'", "'__declspec'",
"'__asm'", "'__attribute__'", "'__asm__'", "'__volatile__'",
"'auto'", "'break'", "'case'", "'char'", "'const'",
"'continue'", "'default'", "'do'", "'double'", "'else'",
"'enum'", "'extern'", "'float'", "'for'", "'goto'",
"'if'", "'inline'", "'int'", "'long'", "'register'",
"'restrict'", "'return'", "'short'", "'signed'", "'sizeof'",
"'static'", "'struct'", "'switch'", "'typedef'", "'union'",
"'unsigned'", "'void'", "'volatile'", "'while'", "'_Alignas'",
"'_Alignof'", "'_Atomic'", "'_Bool'", "'_Complex'",
"'_Generic'", "'_Imaginary'", "'_Noreturn'", "'_Static_assert'",
"'_Thread_local'", "'('", "')'", "'['", "']'", "'{'",
"'}'", "'<'", "'<='", "'>'", "'>='", "'<<'", "'>>'",
"'+'", "'++'", "'-'", "'--'", "'*'", "'/'", "'%'",
"'&'", "'|'", "'&&'", "'||'", "'^'", "'!'", "'~'",
"'?'", "':'", "';'", "','", "'='", "'*='", "'/='",
"'%='", "'+='", "'-='", "'<<='", "'>>='", "'&='", "'^='",
"'|='", "'=='", "'!='", "'->'", "'.'", "'...'" ]
symbolicNames = [ "", "", "", "",
"", "", "", "",
"", "", "", "",
"", "", "", "Auto", "Break",
"Case", "Char", "Const", "Continue", "Default", "Do",
"Double", "Else", "Enum", "Extern", "Float", "For",
"Goto", "If", "Inline", "Int", "Long", "Register",
"Restrict", "Return", "Short", "Signed", "Sizeof",
"Static", "Struct", "Switch", "Typedef", "Union",
"Unsigned", "Void", "Volatile", "While", "Alignas",
"Alignof", "Atomic", "Bool", "Complex", "Generic",
"Imaginary", "Noreturn", "StaticAssert", "ThreadLocal",
"LeftParen", "RightParen", "LeftBracket", "RightBracket",
"LeftBrace", "RightBrace", "Less", "LessEqual", "Greater",
"GreaterEqual", "LeftShift", "RightShift", "Plus",
"PlusPlus", "Minus", "MinusMinus", "Star", "Div",
"Mod", "And", "Or", "AndAnd", "OrOr", "Caret", "Not",
"Tilde", "Question", "Colon", "Semi", "Comma", "Assign",
"StarAssign", "DivAssign", "ModAssign", "PlusAssign",
"MinusAssign", "LeftShiftAssign", "RightShiftAssign",
"AndAssign", "XorAssign", "OrAssign", "Equal", "NotEqual",
"Arrow", "Dot", "Ellipsis", "Identifier", "Constant",
"StringLiteral", "LineDirective", "PragmaDirective",
"Whitespace", "Newline", "BlockComment", "LineComment" ]
RULE_primaryExpression = 0
RULE_genericSelection = 1
RULE_genericAssocList = 2
RULE_genericAssociation = 3
RULE_postfixExpression = 4
RULE_argumentExpressionList = 5
RULE_unaryExpression = 6
RULE_unaryOperator = 7
RULE_castExpression = 8
RULE_multiplicativeExpression = 9
RULE_additiveExpression = 10
RULE_shiftExpression = 11
RULE_relationalExpression = 12
RULE_equalityExpression = 13
RULE_andExpression = 14
RULE_exclusiveOrExpression = 15
RULE_inclusiveOrExpression = 16
RULE_logicalAndExpression = 17
RULE_logicalOrExpression = 18
RULE_conditionalExpression = 19
RULE_assignmentExpression = 20
RULE_assignmentOperator = 21
RULE_expression = 22
RULE_constantExpression = 23
RULE_declaration = 24
RULE_declarationSpecifiers = 25
RULE_declarationSpecifiers2 = 26
RULE_declarationSpecifier = 27
RULE_initDeclaratorList = 28
RULE_initDeclarator = 29
RULE_storageClassSpecifier = 30
RULE_typeSpecifier = 31
RULE_structOrUnionSpecifier = 32
RULE_structOrUnion = 33
RULE_structDeclarationList = 34
RULE_structDeclaration = 35
RULE_specifierQualifierList = 36
RULE_structDeclaratorList = 37
RULE_structDeclarator = 38
RULE_enumSpecifier = 39
RULE_enumeratorList = 40
RULE_enumerator = 41
RULE_enumerationConstant = 42
RULE_atomicTypeSpecifier = 43
RULE_typeQualifier = 44
RULE_functionSpecifier = 45
RULE_alignmentSpecifier = 46
RULE_declarator = 47
RULE_directDeclarator = 48
RULE_gccDeclaratorExtension = 49
RULE_gccAttributeSpecifier = 50
RULE_gccAttributeList = 51
RULE_gccAttribute = 52
RULE_nestedParenthesesBlock = 53
RULE_pointer = 54
RULE_typeQualifierList = 55
RULE_parameterTypeList = 56
RULE_parameterList = 57
RULE_parameterDeclaration = 58
RULE_identifierList = 59
RULE_typeName = 60
RULE_abstractDeclarator = 61
RULE_directAbstractDeclarator = 62
RULE_typedefName = 63
RULE_initializer = 64
RULE_initializerList = 65
RULE_designation = 66
RULE_designatorList = 67
RULE_designator = 68
RULE_staticAssertDeclaration = 69
RULE_statement = 70
RULE_labeledStatement = 71
RULE_compoundStatement = 72
RULE_blockItemList = 73
RULE_blockItem = 74
RULE_expressionStatement = 75
RULE_selectionStatement = 76
RULE_iterationStatement = 77
RULE_jumpStatement = 78
RULE_compilationUnit = 79
RULE_translationUnit = 80
RULE_externalDeclaration = 81
RULE_functionDefinition = 82
RULE_declarationList = 83
ruleNames = [ "primaryExpression", "genericSelection", "genericAssocList",
"genericAssociation", "postfixExpression", "argumentExpressionList",
"unaryExpression", "unaryOperator", "castExpression",
"multiplicativeExpression", "additiveExpression", "shiftExpression",
"relationalExpression", "equalityExpression", "andExpression",
"exclusiveOrExpression", "inclusiveOrExpression", "logicalAndExpression",
"logicalOrExpression", "conditionalExpression", "assignmentExpression",
"assignmentOperator", "expression", "constantExpression",
"declaration", "declarationSpecifiers", "declarationSpecifiers2",
"declarationSpecifier", "initDeclaratorList", "initDeclarator",
"storageClassSpecifier", "typeSpecifier", "structOrUnionSpecifier",
"structOrUnion", "structDeclarationList", "structDeclaration",
"specifierQualifierList", "structDeclaratorList", "structDeclarator",
"enumSpecifier", "enumeratorList", "enumerator", "enumerationConstant",
"atomicTypeSpecifier", "typeQualifier", "functionSpecifier",
"alignmentSpecifier", "declarator", "directDeclarator",
"gccDeclaratorExtension", "gccAttributeSpecifier", "gccAttributeList",
"gccAttribute", "nestedParenthesesBlock", "pointer",
"typeQualifierList", "parameterTypeList", "parameterList",
"parameterDeclaration", "identifierList", "typeName",
"abstractDeclarator", "directAbstractDeclarator", "typedefName",
"initializer", "initializerList", "designation", "designatorList",
"designator", "staticAssertDeclaration", "statement",
"labeledStatement", "compoundStatement", "blockItemList",
"blockItem", "expressionStatement", "selectionStatement",
"iterationStatement", "jumpStatement", "compilationUnit",
"translationUnit", "externalDeclaration", "functionDefinition",
"declarationList" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
Auto=15
Break=16
Case=17
Char=18
Const=19
Continue=20
Default=21
Do=22
Double=23
Else=24
Enum=25
Extern=26
Float=27
For=28
Goto=29
If=30
Inline=31
Int=32
Long=33
Register=34
Restrict=35
Return=36
Short=37
Signed=38
Sizeof=39
Static=40
Struct=41
Switch=42
Typedef=43
Union=44
Unsigned=45
Void=46
Volatile=47
While=48
Alignas=49
Alignof=50
Atomic=51
Bool=52
Complex=53
Generic=54
Imaginary=55
Noreturn=56
StaticAssert=57
ThreadLocal=58
LeftParen=59
RightParen=60
LeftBracket=61
RightBracket=62
LeftBrace=63
RightBrace=64
Less=65
LessEqual=66
Greater=67
GreaterEqual=68
LeftShift=69
RightShift=70
Plus=71
PlusPlus=72
Minus=73
MinusMinus=74
Star=75
Div=76
Mod=77
And=78
Or=79
AndAnd=80
OrOr=81
Caret=82
Not=83
Tilde=84
Question=85
Colon=86
Semi=87
Comma=88
Assign=89
StarAssign=90
DivAssign=91
ModAssign=92
PlusAssign=93
MinusAssign=94
LeftShiftAssign=95
RightShiftAssign=96
AndAssign=97
XorAssign=98
OrAssign=99
Equal=100
NotEqual=101
Arrow=102
Dot=103
Ellipsis=104
Identifier=105
Constant=106
StringLiteral=107
LineDirective=108
PragmaDirective=109
Whitespace=110
Newline=111
BlockComment=112
LineComment=113
def __init__(self, input:TokenStream):
super().__init__(input)
self.checkVersion("4.9")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class PrimaryExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def Constant(self):
return self.getToken(CParser.Constant, 0)
def StringLiteral(self, i:int=None):
if i is None:
return self.getTokens(CParser.StringLiteral)
else:
return self.getToken(CParser.StringLiteral, i)
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def genericSelection(self):
return self.getTypedRuleContext(CParser.GenericSelectionContext,0)
def compoundStatement(self):
return self.getTypedRuleContext(CParser.CompoundStatementContext,0)
def unaryExpression(self):
return self.getTypedRuleContext(CParser.UnaryExpressionContext,0)
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def getRuleIndex(self):
return CParser.RULE_primaryExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrimaryExpression" ):
listener.enterPrimaryExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrimaryExpression" ):
listener.exitPrimaryExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrimaryExpression" ):
return visitor.visitPrimaryExpression(self)
else:
return visitor.visitChildren(self)
def primaryExpression(self):
localctx = CParser.PrimaryExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_primaryExpression)
self._la = 0 # Token type
try:
self.state = 201
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 168
self.match(CParser.Identifier)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 169
self.match(CParser.Constant)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 171
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 170
self.match(CParser.StringLiteral)
else:
raise NoViableAltException(self)
self.state = 173
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 175
self.match(CParser.LeftParen)
self.state = 176
self.expression(0)
self.state = 177
self.match(CParser.RightParen)
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 179
self.genericSelection()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 181
_la = self._input.LA(1)
if _la==CParser.T__0:
self.state = 180
self.match(CParser.T__0)
self.state = 183
self.match(CParser.LeftParen)
self.state = 184
self.compoundStatement()
self.state = 185
self.match(CParser.RightParen)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 187
self.match(CParser.T__1)
self.state = 188
self.match(CParser.LeftParen)
self.state = 189
self.unaryExpression()
self.state = 190
self.match(CParser.Comma)
self.state = 191
self.typeName()
self.state = 192
self.match(CParser.RightParen)
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 194
self.match(CParser.T__2)
self.state = 195
self.match(CParser.LeftParen)
self.state = 196
self.typeName()
self.state = 197
self.match(CParser.Comma)
self.state = 198
self.unaryExpression()
self.state = 199
self.match(CParser.RightParen)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GenericSelectionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def genericAssocList(self):
return self.getTypedRuleContext(CParser.GenericAssocListContext,0)
def getRuleIndex(self):
return CParser.RULE_genericSelection
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGenericSelection" ):
listener.enterGenericSelection(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGenericSelection" ):
listener.exitGenericSelection(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGenericSelection" ):
return visitor.visitGenericSelection(self)
else:
return visitor.visitChildren(self)
def genericSelection(self):
localctx = CParser.GenericSelectionContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_genericSelection)
try:
self.enterOuterAlt(localctx, 1)
self.state = 203
self.match(CParser.Generic)
self.state = 204
self.match(CParser.LeftParen)
self.state = 205
self.assignmentExpression()
self.state = 206
self.match(CParser.Comma)
self.state = 207
self.genericAssocList(0)
self.state = 208
self.match(CParser.RightParen)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GenericAssocListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def genericAssociation(self):
return self.getTypedRuleContext(CParser.GenericAssociationContext,0)
def genericAssocList(self):
return self.getTypedRuleContext(CParser.GenericAssocListContext,0)
def getRuleIndex(self):
return CParser.RULE_genericAssocList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGenericAssocList" ):
listener.enterGenericAssocList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGenericAssocList" ):
listener.exitGenericAssocList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGenericAssocList" ):
return visitor.visitGenericAssocList(self)
else:
return visitor.visitChildren(self)
def genericAssocList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.GenericAssocListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 4
self.enterRecursionRule(localctx, 4, self.RULE_genericAssocList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 211
self.genericAssociation()
self._ctx.stop = self._input.LT(-1)
self.state = 218
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.GenericAssocListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_genericAssocList)
self.state = 213
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 214
self.match(CParser.Comma)
self.state = 215
self.genericAssociation()
self.state = 220
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class GenericAssociationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_genericAssociation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGenericAssociation" ):
listener.enterGenericAssociation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGenericAssociation" ):
listener.exitGenericAssociation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGenericAssociation" ):
return visitor.visitGenericAssociation(self)
else:
return visitor.visitChildren(self)
def genericAssociation(self):
localctx = CParser.GenericAssociationContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_genericAssociation)
try:
self.state = 228
token = self._input.LA(1)
if token in [CParser.T__0, CParser.T__3, CParser.T__4, CParser.T__5, CParser.T__6, CParser.Char, CParser.Const, CParser.Double, CParser.Enum, CParser.Float, CParser.Int, CParser.Long, CParser.Restrict, CParser.Short, CParser.Signed, CParser.Struct, CParser.Union, CParser.Unsigned, CParser.Void, CParser.Volatile, CParser.Atomic, CParser.Bool, CParser.Complex, CParser.Identifier]:
self.enterOuterAlt(localctx, 1)
self.state = 221
self.typeName()
self.state = 222
self.match(CParser.Colon)
self.state = 223
self.assignmentExpression()
elif token in [CParser.Default]:
self.enterOuterAlt(localctx, 2)
self.state = 225
self.match(CParser.Default)
self.state = 226
self.match(CParser.Colon)
self.state = 227
self.assignmentExpression()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PostfixExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def primaryExpression(self):
return self.getTypedRuleContext(CParser.PrimaryExpressionContext,0)
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def initializerList(self):
return self.getTypedRuleContext(CParser.InitializerListContext,0)
def postfixExpression(self):
return self.getTypedRuleContext(CParser.PostfixExpressionContext,0)
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def argumentExpressionList(self):
return self.getTypedRuleContext(CParser.ArgumentExpressionListContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_postfixExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPostfixExpression" ):
listener.enterPostfixExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPostfixExpression" ):
listener.exitPostfixExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPostfixExpression" ):
return visitor.visitPostfixExpression(self)
else:
return visitor.visitChildren(self)
def postfixExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.PostfixExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 8
self.enterRecursionRule(localctx, 8, self.RULE_postfixExpression, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 264
la_ = self._interp.adaptivePredict(self._input,5,self._ctx)
if la_ == 1:
self.state = 231
self.primaryExpression()
pass
elif la_ == 2:
self.state = 232
self.match(CParser.LeftParen)
self.state = 233
self.typeName()
self.state = 234
self.match(CParser.RightParen)
self.state = 235
self.match(CParser.LeftBrace)
self.state = 236
self.initializerList(0)
self.state = 237
self.match(CParser.RightBrace)
pass
elif la_ == 3:
self.state = 239
self.match(CParser.LeftParen)
self.state = 240
self.typeName()
self.state = 241
self.match(CParser.RightParen)
self.state = 242
self.match(CParser.LeftBrace)
self.state = 243
self.initializerList(0)
self.state = 244
self.match(CParser.Comma)
self.state = 245
self.match(CParser.RightBrace)
pass
elif la_ == 4:
self.state = 247
self.match(CParser.T__0)
self.state = 248
self.match(CParser.LeftParen)
self.state = 249
self.typeName()
self.state = 250
self.match(CParser.RightParen)
self.state = 251
self.match(CParser.LeftBrace)
self.state = 252
self.initializerList(0)
self.state = 253
self.match(CParser.RightBrace)
pass
elif la_ == 5:
self.state = 255
self.match(CParser.T__0)
self.state = 256
self.match(CParser.LeftParen)
self.state = 257
self.typeName()
self.state = 258
self.match(CParser.RightParen)
self.state = 259
self.match(CParser.LeftBrace)
self.state = 260
self.initializerList(0)
self.state = 261
self.match(CParser.Comma)
self.state = 262
self.match(CParser.RightBrace)
pass
self._ctx.stop = self._input.LT(-1)
self.state = 289
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,8,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 287
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
if la_ == 1:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 266
if not self.precpred(self._ctx, 10):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 10)")
self.state = 267
self.match(CParser.LeftBracket)
self.state = 268
self.expression(0)
self.state = 269
self.match(CParser.RightBracket)
pass
elif la_ == 2:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 271
if not self.precpred(self._ctx, 9):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 9)")
self.state = 272
self.match(CParser.LeftParen)
self.state = 274
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 273
self.argumentExpressionList(0)
self.state = 276
self.match(CParser.RightParen)
pass
elif la_ == 3:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 277
if not self.precpred(self._ctx, 8):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 8)")
self.state = 278
self.match(CParser.Dot)
self.state = 279
self.match(CParser.Identifier)
pass
elif la_ == 4:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 280
if not self.precpred(self._ctx, 7):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 7)")
self.state = 281
self.match(CParser.Arrow)
self.state = 282
self.match(CParser.Identifier)
pass
elif la_ == 5:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 283
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 6)")
self.state = 284
self.match(CParser.PlusPlus)
pass
elif la_ == 6:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 285
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 286
self.match(CParser.MinusMinus)
pass
self.state = 291
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,8,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ArgumentExpressionListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def argumentExpressionList(self):
return self.getTypedRuleContext(CParser.ArgumentExpressionListContext,0)
def getRuleIndex(self):
return CParser.RULE_argumentExpressionList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArgumentExpressionList" ):
listener.enterArgumentExpressionList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArgumentExpressionList" ):
listener.exitArgumentExpressionList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArgumentExpressionList" ):
return visitor.visitArgumentExpressionList(self)
else:
return visitor.visitChildren(self)
def argumentExpressionList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.ArgumentExpressionListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 10
self.enterRecursionRule(localctx, 10, self.RULE_argumentExpressionList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 293
self.assignmentExpression()
self._ctx.stop = self._input.LT(-1)
self.state = 300
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,9,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.ArgumentExpressionListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_argumentExpressionList)
self.state = 295
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 296
self.match(CParser.Comma)
self.state = 297
self.assignmentExpression()
self.state = 302
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,9,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class UnaryExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def postfixExpression(self):
return self.getTypedRuleContext(CParser.PostfixExpressionContext,0)
def unaryExpression(self):
return self.getTypedRuleContext(CParser.UnaryExpressionContext,0)
def unaryOperator(self):
return self.getTypedRuleContext(CParser.UnaryOperatorContext,0)
def castExpression(self):
return self.getTypedRuleContext(CParser.CastExpressionContext,0)
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_unaryExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnaryExpression" ):
listener.enterUnaryExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnaryExpression" ):
listener.exitUnaryExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnaryExpression" ):
return visitor.visitUnaryExpression(self)
else:
return visitor.visitChildren(self)
def unaryExpression(self):
localctx = CParser.UnaryExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_unaryExpression)
try:
self.state = 325
la_ = self._interp.adaptivePredict(self._input,10,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 303
self.postfixExpression(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 304
self.match(CParser.PlusPlus)
self.state = 305
self.unaryExpression()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 306
self.match(CParser.MinusMinus)
self.state = 307
self.unaryExpression()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 308
self.unaryOperator()
self.state = 309
self.castExpression()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 311
self.match(CParser.Sizeof)
self.state = 312
self.unaryExpression()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 313
self.match(CParser.Sizeof)
self.state = 314
self.match(CParser.LeftParen)
self.state = 315
self.typeName()
self.state = 316
self.match(CParser.RightParen)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 318
self.match(CParser.Alignof)
self.state = 319
self.match(CParser.LeftParen)
self.state = 320
self.typeName()
self.state = 321
self.match(CParser.RightParen)
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 323
self.match(CParser.AndAnd)
self.state = 324
self.match(CParser.Identifier)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UnaryOperatorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return CParser.RULE_unaryOperator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnaryOperator" ):
listener.enterUnaryOperator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnaryOperator" ):
listener.exitUnaryOperator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnaryOperator" ):
return visitor.visitUnaryOperator(self)
else:
return visitor.visitChildren(self)
def unaryOperator(self):
localctx = CParser.UnaryOperatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_unaryOperator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 327
_la = self._input.LA(1)
if not(((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CastExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def unaryExpression(self):
return self.getTypedRuleContext(CParser.UnaryExpressionContext,0)
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def castExpression(self):
return self.getTypedRuleContext(CParser.CastExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_castExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCastExpression" ):
listener.enterCastExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCastExpression" ):
listener.exitCastExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCastExpression" ):
return visitor.visitCastExpression(self)
else:
return visitor.visitChildren(self)
def castExpression(self):
localctx = CParser.CastExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_castExpression)
try:
self.state = 341
la_ = self._interp.adaptivePredict(self._input,11,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 329
self.unaryExpression()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 330
self.match(CParser.LeftParen)
self.state = 331
self.typeName()
self.state = 332
self.match(CParser.RightParen)
self.state = 333
self.castExpression()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 335
self.match(CParser.T__0)
self.state = 336
self.match(CParser.LeftParen)
self.state = 337
self.typeName()
self.state = 338
self.match(CParser.RightParen)
self.state = 339
self.castExpression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MultiplicativeExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def castExpression(self):
return self.getTypedRuleContext(CParser.CastExpressionContext,0)
def multiplicativeExpression(self):
return self.getTypedRuleContext(CParser.MultiplicativeExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_multiplicativeExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMultiplicativeExpression" ):
listener.enterMultiplicativeExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMultiplicativeExpression" ):
listener.exitMultiplicativeExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMultiplicativeExpression" ):
return visitor.visitMultiplicativeExpression(self)
else:
return visitor.visitChildren(self)
def multiplicativeExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.MultiplicativeExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 18
self.enterRecursionRule(localctx, 18, self.RULE_multiplicativeExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 344
self.castExpression()
self._ctx.stop = self._input.LT(-1)
self.state = 357
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 355
la_ = self._interp.adaptivePredict(self._input,12,self._ctx)
if la_ == 1:
localctx = CParser.MultiplicativeExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplicativeExpression)
self.state = 346
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 347
self.match(CParser.Star)
self.state = 348
self.castExpression()
pass
elif la_ == 2:
localctx = CParser.MultiplicativeExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplicativeExpression)
self.state = 349
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 350
self.match(CParser.Div)
self.state = 351
self.castExpression()
pass
elif la_ == 3:
localctx = CParser.MultiplicativeExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplicativeExpression)
self.state = 352
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 353
self.match(CParser.Mod)
self.state = 354
self.castExpression()
pass
self.state = 359
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class AdditiveExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def multiplicativeExpression(self):
return self.getTypedRuleContext(CParser.MultiplicativeExpressionContext,0)
def additiveExpression(self):
return self.getTypedRuleContext(CParser.AdditiveExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_additiveExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAdditiveExpression" ):
listener.enterAdditiveExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAdditiveExpression" ):
listener.exitAdditiveExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAdditiveExpression" ):
return visitor.visitAdditiveExpression(self)
else:
return visitor.visitChildren(self)
def additiveExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.AdditiveExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 20
self.enterRecursionRule(localctx, 20, self.RULE_additiveExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 361
self.multiplicativeExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 371
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,15,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 369
la_ = self._interp.adaptivePredict(self._input,14,self._ctx)
if la_ == 1:
localctx = CParser.AdditiveExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_additiveExpression)
self.state = 363
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 364
self.match(CParser.Plus)
self.state = 365
self.multiplicativeExpression(0)
pass
elif la_ == 2:
localctx = CParser.AdditiveExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_additiveExpression)
self.state = 366
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 367
self.match(CParser.Minus)
self.state = 368
self.multiplicativeExpression(0)
pass
self.state = 373
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,15,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ShiftExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def additiveExpression(self):
return self.getTypedRuleContext(CParser.AdditiveExpressionContext,0)
def shiftExpression(self):
return self.getTypedRuleContext(CParser.ShiftExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_shiftExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShiftExpression" ):
listener.enterShiftExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShiftExpression" ):
listener.exitShiftExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShiftExpression" ):
return visitor.visitShiftExpression(self)
else:
return visitor.visitChildren(self)
def shiftExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.ShiftExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 22
self.enterRecursionRule(localctx, 22, self.RULE_shiftExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 375
self.additiveExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 385
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 383
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
localctx = CParser.ShiftExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_shiftExpression)
self.state = 377
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 378
self.match(CParser.LeftShift)
self.state = 379
self.additiveExpression(0)
pass
elif la_ == 2:
localctx = CParser.ShiftExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_shiftExpression)
self.state = 380
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 381
self.match(CParser.RightShift)
self.state = 382
self.additiveExpression(0)
pass
self.state = 387
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class RelationalExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def shiftExpression(self):
return self.getTypedRuleContext(CParser.ShiftExpressionContext,0)
def relationalExpression(self):
return self.getTypedRuleContext(CParser.RelationalExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_relationalExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRelationalExpression" ):
listener.enterRelationalExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRelationalExpression" ):
listener.exitRelationalExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRelationalExpression" ):
return visitor.visitRelationalExpression(self)
else:
return visitor.visitChildren(self)
def relationalExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.RelationalExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 24
self.enterRecursionRule(localctx, 24, self.RULE_relationalExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 389
self.shiftExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 405
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,19,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 403
la_ = self._interp.adaptivePredict(self._input,18,self._ctx)
if la_ == 1:
localctx = CParser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 391
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 392
self.match(CParser.Less)
self.state = 393
self.shiftExpression(0)
pass
elif la_ == 2:
localctx = CParser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 394
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 395
self.match(CParser.Greater)
self.state = 396
self.shiftExpression(0)
pass
elif la_ == 3:
localctx = CParser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 397
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 398
self.match(CParser.LessEqual)
self.state = 399
self.shiftExpression(0)
pass
elif la_ == 4:
localctx = CParser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 400
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 401
self.match(CParser.GreaterEqual)
self.state = 402
self.shiftExpression(0)
pass
self.state = 407
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,19,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class EqualityExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def relationalExpression(self):
return self.getTypedRuleContext(CParser.RelationalExpressionContext,0)
def equalityExpression(self):
return self.getTypedRuleContext(CParser.EqualityExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_equalityExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEqualityExpression" ):
listener.enterEqualityExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEqualityExpression" ):
listener.exitEqualityExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEqualityExpression" ):
return visitor.visitEqualityExpression(self)
else:
return visitor.visitChildren(self)
def equalityExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.EqualityExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 26
self.enterRecursionRule(localctx, 26, self.RULE_equalityExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 409
self.relationalExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 419
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 417
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
localctx = CParser.EqualityExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_equalityExpression)
self.state = 411
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 412
self.match(CParser.Equal)
self.state = 413
self.relationalExpression(0)
pass
elif la_ == 2:
localctx = CParser.EqualityExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_equalityExpression)
self.state = 414
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 415
self.match(CParser.NotEqual)
self.state = 416
self.relationalExpression(0)
pass
self.state = 421
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class AndExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def equalityExpression(self):
return self.getTypedRuleContext(CParser.EqualityExpressionContext,0)
def andExpression(self):
return self.getTypedRuleContext(CParser.AndExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_andExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAndExpression" ):
listener.enterAndExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAndExpression" ):
listener.exitAndExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAndExpression" ):
return visitor.visitAndExpression(self)
else:
return visitor.visitChildren(self)
def andExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.AndExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 28
self.enterRecursionRule(localctx, 28, self.RULE_andExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 423
self.equalityExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 430
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,22,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.AndExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_andExpression)
self.state = 425
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 426
self.match(CParser.And)
self.state = 427
self.equalityExpression(0)
self.state = 432
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,22,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ExclusiveOrExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def andExpression(self):
return self.getTypedRuleContext(CParser.AndExpressionContext,0)
def exclusiveOrExpression(self):
return self.getTypedRuleContext(CParser.ExclusiveOrExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_exclusiveOrExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExclusiveOrExpression" ):
listener.enterExclusiveOrExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExclusiveOrExpression" ):
listener.exitExclusiveOrExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExclusiveOrExpression" ):
return visitor.visitExclusiveOrExpression(self)
else:
return visitor.visitChildren(self)
def exclusiveOrExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.ExclusiveOrExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 30
self.enterRecursionRule(localctx, 30, self.RULE_exclusiveOrExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 434
self.andExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 441
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,23,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.ExclusiveOrExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_exclusiveOrExpression)
self.state = 436
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 437
self.match(CParser.Caret)
self.state = 438
self.andExpression(0)
self.state = 443
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,23,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class InclusiveOrExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def exclusiveOrExpression(self):
return self.getTypedRuleContext(CParser.ExclusiveOrExpressionContext,0)
def inclusiveOrExpression(self):
return self.getTypedRuleContext(CParser.InclusiveOrExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_inclusiveOrExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInclusiveOrExpression" ):
listener.enterInclusiveOrExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInclusiveOrExpression" ):
listener.exitInclusiveOrExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInclusiveOrExpression" ):
return visitor.visitInclusiveOrExpression(self)
else:
return visitor.visitChildren(self)
def inclusiveOrExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.InclusiveOrExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 32
self.enterRecursionRule(localctx, 32, self.RULE_inclusiveOrExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 445
self.exclusiveOrExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 452
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,24,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.InclusiveOrExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_inclusiveOrExpression)
self.state = 447
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 448
self.match(CParser.Or)
self.state = 449
self.exclusiveOrExpression(0)
self.state = 454
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,24,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class LogicalAndExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def inclusiveOrExpression(self):
return self.getTypedRuleContext(CParser.InclusiveOrExpressionContext,0)
def logicalAndExpression(self):
return self.getTypedRuleContext(CParser.LogicalAndExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_logicalAndExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLogicalAndExpression" ):
listener.enterLogicalAndExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLogicalAndExpression" ):
listener.exitLogicalAndExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLogicalAndExpression" ):
return visitor.visitLogicalAndExpression(self)
else:
return visitor.visitChildren(self)
def logicalAndExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.LogicalAndExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 34
self.enterRecursionRule(localctx, 34, self.RULE_logicalAndExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 456
self.inclusiveOrExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 463
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,25,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.LogicalAndExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_logicalAndExpression)
self.state = 458
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 459
self.match(CParser.AndAnd)
self.state = 460
self.inclusiveOrExpression(0)
self.state = 465
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,25,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class LogicalOrExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def logicalAndExpression(self):
return self.getTypedRuleContext(CParser.LogicalAndExpressionContext,0)
def logicalOrExpression(self):
return self.getTypedRuleContext(CParser.LogicalOrExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_logicalOrExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLogicalOrExpression" ):
listener.enterLogicalOrExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLogicalOrExpression" ):
listener.exitLogicalOrExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLogicalOrExpression" ):
return visitor.visitLogicalOrExpression(self)
else:
return visitor.visitChildren(self)
def logicalOrExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.LogicalOrExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 36
self.enterRecursionRule(localctx, 36, self.RULE_logicalOrExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 467
self.logicalAndExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 474
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,26,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.LogicalOrExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_logicalOrExpression)
self.state = 469
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 470
self.match(CParser.OrOr)
self.state = 471
self.logicalAndExpression(0)
self.state = 476
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,26,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ConditionalExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def logicalOrExpression(self):
return self.getTypedRuleContext(CParser.LogicalOrExpressionContext,0)
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def conditionalExpression(self):
return self.getTypedRuleContext(CParser.ConditionalExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_conditionalExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConditionalExpression" ):
listener.enterConditionalExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConditionalExpression" ):
listener.exitConditionalExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConditionalExpression" ):
return visitor.visitConditionalExpression(self)
else:
return visitor.visitChildren(self)
def conditionalExpression(self):
localctx = CParser.ConditionalExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_conditionalExpression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 477
self.logicalOrExpression(0)
self.state = 483
la_ = self._interp.adaptivePredict(self._input,27,self._ctx)
if la_ == 1:
self.state = 478
self.match(CParser.Question)
self.state = 479
self.expression(0)
self.state = 480
self.match(CParser.Colon)
self.state = 481
self.conditionalExpression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def conditionalExpression(self):
return self.getTypedRuleContext(CParser.ConditionalExpressionContext,0)
def unaryExpression(self):
return self.getTypedRuleContext(CParser.UnaryExpressionContext,0)
def assignmentOperator(self):
return self.getTypedRuleContext(CParser.AssignmentOperatorContext,0)
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_assignmentExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssignmentExpression" ):
listener.enterAssignmentExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssignmentExpression" ):
listener.exitAssignmentExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAssignmentExpression" ):
return visitor.visitAssignmentExpression(self)
else:
return visitor.visitChildren(self)
def assignmentExpression(self):
localctx = CParser.AssignmentExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_assignmentExpression)
try:
self.state = 490
la_ = self._interp.adaptivePredict(self._input,28,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 485
self.conditionalExpression()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 486
self.unaryExpression()
self.state = 487
self.assignmentOperator()
self.state = 488
self.assignmentExpression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentOperatorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return CParser.RULE_assignmentOperator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssignmentOperator" ):
listener.enterAssignmentOperator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssignmentOperator" ):
listener.exitAssignmentOperator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAssignmentOperator" ):
return visitor.visitAssignmentOperator(self)
else:
return visitor.visitChildren(self)
def assignmentOperator(self):
localctx = CParser.AssignmentOperatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_assignmentOperator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 492
_la = self._input.LA(1)
if not(((((_la - 89)) & ~0x3f) == 0 and ((1 << (_la - 89)) & ((1 << (CParser.Assign - 89)) | (1 << (CParser.StarAssign - 89)) | (1 << (CParser.DivAssign - 89)) | (1 << (CParser.ModAssign - 89)) | (1 << (CParser.PlusAssign - 89)) | (1 << (CParser.MinusAssign - 89)) | (1 << (CParser.LeftShiftAssign - 89)) | (1 << (CParser.RightShiftAssign - 89)) | (1 << (CParser.AndAssign - 89)) | (1 << (CParser.XorAssign - 89)) | (1 << (CParser.OrAssign - 89)))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_expression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpression" ):
listener.enterExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpression" ):
listener.exitExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpression" ):
return visitor.visitExpression(self)
else:
return visitor.visitChildren(self)
def expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.ExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 44
self.enterRecursionRule(localctx, 44, self.RULE_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 495
self.assignmentExpression()
self._ctx.stop = self._input.LT(-1)
self.state = 502
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,29,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.ExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 497
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 498
self.match(CParser.Comma)
self.state = 499
self.assignmentExpression()
self.state = 504
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,29,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ConstantExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def conditionalExpression(self):
return self.getTypedRuleContext(CParser.ConditionalExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_constantExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConstantExpression" ):
listener.enterConstantExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConstantExpression" ):
listener.exitConstantExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConstantExpression" ):
return visitor.visitConstantExpression(self)
else:
return visitor.visitChildren(self)
def constantExpression(self):
localctx = CParser.ConstantExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_constantExpression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 505
self.conditionalExpression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarationSpecifiers(self):
return self.getTypedRuleContext(CParser.DeclarationSpecifiersContext,0)
def initDeclaratorList(self):
return self.getTypedRuleContext(CParser.InitDeclaratorListContext,0)
def staticAssertDeclaration(self):
return self.getTypedRuleContext(CParser.StaticAssertDeclarationContext,0)
def getRuleIndex(self):
return CParser.RULE_declaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclaration" ):
listener.enterDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclaration" ):
listener.exitDeclaration(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclaration" ):
return visitor.visitDeclaration(self)
else:
return visitor.visitChildren(self)
def declaration(self):
localctx = CParser.DeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_declaration)
self._la = 0 # Token type
try:
self.state = 514
token = self._input.LA(1)
if token in [CParser.T__0, CParser.T__3, CParser.T__4, CParser.T__5, CParser.T__6, CParser.T__7, CParser.T__8, CParser.T__9, CParser.T__11, CParser.Auto, CParser.Char, CParser.Const, CParser.Double, CParser.Enum, CParser.Extern, CParser.Float, CParser.Inline, CParser.Int, CParser.Long, CParser.Register, CParser.Restrict, CParser.Short, CParser.Signed, CParser.Static, CParser.Struct, CParser.Typedef, CParser.Union, CParser.Unsigned, CParser.Void, CParser.Volatile, CParser.Alignas, CParser.Atomic, CParser.Bool, CParser.Complex, CParser.Noreturn, CParser.ThreadLocal, CParser.Identifier]:
self.enterOuterAlt(localctx, 1)
self.state = 507
self.declarationSpecifiers()
self.state = 509
_la = self._input.LA(1)
if ((((_la - 59)) & ~0x3f) == 0 and ((1 << (_la - 59)) & ((1 << (CParser.LeftParen - 59)) | (1 << (CParser.Star - 59)) | (1 << (CParser.Caret - 59)) | (1 << (CParser.Identifier - 59)))) != 0):
self.state = 508
self.initDeclaratorList(0)
self.state = 511
self.match(CParser.Semi)
elif token in [CParser.StaticAssert]:
self.enterOuterAlt(localctx, 2)
self.state = 513
self.staticAssertDeclaration()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationSpecifiersContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarationSpecifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.DeclarationSpecifierContext)
else:
return self.getTypedRuleContext(CParser.DeclarationSpecifierContext,i)
def getRuleIndex(self):
return CParser.RULE_declarationSpecifiers
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarationSpecifiers" ):
listener.enterDeclarationSpecifiers(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarationSpecifiers" ):
listener.exitDeclarationSpecifiers(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarationSpecifiers" ):
return visitor.visitDeclarationSpecifiers(self)
else:
return visitor.visitChildren(self)
def declarationSpecifiers(self):
localctx = CParser.DeclarationSpecifiersContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_declarationSpecifiers)
try:
self.enterOuterAlt(localctx, 1)
self.state = 517
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 516
self.declarationSpecifier()
else:
raise NoViableAltException(self)
self.state = 519
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,32,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationSpecifiers2Context(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarationSpecifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.DeclarationSpecifierContext)
else:
return self.getTypedRuleContext(CParser.DeclarationSpecifierContext,i)
def getRuleIndex(self):
return CParser.RULE_declarationSpecifiers2
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarationSpecifiers2" ):
listener.enterDeclarationSpecifiers2(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarationSpecifiers2" ):
listener.exitDeclarationSpecifiers2(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarationSpecifiers2" ):
return visitor.visitDeclarationSpecifiers2(self)
else:
return visitor.visitChildren(self)
def declarationSpecifiers2(self):
localctx = CParser.DeclarationSpecifiers2Context(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_declarationSpecifiers2)
try:
self.enterOuterAlt(localctx, 1)
self.state = 522
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 521
self.declarationSpecifier()
else:
raise NoViableAltException(self)
self.state = 524
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,33,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def storageClassSpecifier(self):
return self.getTypedRuleContext(CParser.StorageClassSpecifierContext,0)
def typeSpecifier(self):
return self.getTypedRuleContext(CParser.TypeSpecifierContext,0)
def typeQualifier(self):
return self.getTypedRuleContext(CParser.TypeQualifierContext,0)
def functionSpecifier(self):
return self.getTypedRuleContext(CParser.FunctionSpecifierContext,0)
def alignmentSpecifier(self):
return self.getTypedRuleContext(CParser.AlignmentSpecifierContext,0)
def getRuleIndex(self):
return CParser.RULE_declarationSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarationSpecifier" ):
listener.enterDeclarationSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarationSpecifier" ):
listener.exitDeclarationSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarationSpecifier" ):
return visitor.visitDeclarationSpecifier(self)
else:
return visitor.visitChildren(self)
def declarationSpecifier(self):
localctx = CParser.DeclarationSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_declarationSpecifier)
try:
self.state = 531
la_ = self._interp.adaptivePredict(self._input,34,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 526
self.storageClassSpecifier()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 527
self.typeSpecifier()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 528
self.typeQualifier()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 529
self.functionSpecifier()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 530
self.alignmentSpecifier()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InitDeclaratorListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def initDeclarator(self):
return self.getTypedRuleContext(CParser.InitDeclaratorContext,0)
def initDeclaratorList(self):
return self.getTypedRuleContext(CParser.InitDeclaratorListContext,0)
def getRuleIndex(self):
return CParser.RULE_initDeclaratorList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInitDeclaratorList" ):
listener.enterInitDeclaratorList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInitDeclaratorList" ):
listener.exitInitDeclaratorList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInitDeclaratorList" ):
return visitor.visitInitDeclaratorList(self)
else:
return visitor.visitChildren(self)
def initDeclaratorList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.InitDeclaratorListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 56
self.enterRecursionRule(localctx, 56, self.RULE_initDeclaratorList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 534
self.initDeclarator()
self._ctx.stop = self._input.LT(-1)
self.state = 541
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,35,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.InitDeclaratorListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_initDeclaratorList)
self.state = 536
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 537
self.match(CParser.Comma)
self.state = 538
self.initDeclarator()
self.state = 543
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,35,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class InitDeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarator(self):
return self.getTypedRuleContext(CParser.DeclaratorContext,0)
def initializer(self):
return self.getTypedRuleContext(CParser.InitializerContext,0)
def getRuleIndex(self):
return CParser.RULE_initDeclarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInitDeclarator" ):
listener.enterInitDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInitDeclarator" ):
listener.exitInitDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInitDeclarator" ):
return visitor.visitInitDeclarator(self)
else:
return visitor.visitChildren(self)
def initDeclarator(self):
localctx = CParser.InitDeclaratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_initDeclarator)
try:
self.state = 549
la_ = self._interp.adaptivePredict(self._input,36,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 544
self.declarator()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 545
self.declarator()
self.state = 546
self.match(CParser.Assign)
self.state = 547
self.initializer()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StorageClassSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return CParser.RULE_storageClassSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStorageClassSpecifier" ):
listener.enterStorageClassSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStorageClassSpecifier" ):
listener.exitStorageClassSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStorageClassSpecifier" ):
return visitor.visitStorageClassSpecifier(self)
else:
return visitor.visitChildren(self)
def storageClassSpecifier(self):
localctx = CParser.StorageClassSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_storageClassSpecifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 551
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Auto) | (1 << CParser.Extern) | (1 << CParser.Register) | (1 << CParser.Static) | (1 << CParser.Typedef) | (1 << CParser.ThreadLocal))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def atomicTypeSpecifier(self):
return self.getTypedRuleContext(CParser.AtomicTypeSpecifierContext,0)
def structOrUnionSpecifier(self):
return self.getTypedRuleContext(CParser.StructOrUnionSpecifierContext,0)
def enumSpecifier(self):
return self.getTypedRuleContext(CParser.EnumSpecifierContext,0)
def typedefName(self):
return self.getTypedRuleContext(CParser.TypedefNameContext,0)
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_typeSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeSpecifier" ):
listener.enterTypeSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeSpecifier" ):
listener.exitTypeSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeSpecifier" ):
return visitor.visitTypeSpecifier(self)
else:
return visitor.visitChildren(self)
def typeSpecifier(self):
localctx = CParser.TypeSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_typeSpecifier)
self._la = 0 # Token type
try:
self.state = 567
token = self._input.LA(1)
if token in [CParser.T__3, CParser.T__4, CParser.T__5, CParser.Char, CParser.Double, CParser.Float, CParser.Int, CParser.Long, CParser.Short, CParser.Signed, CParser.Unsigned, CParser.Void, CParser.Bool, CParser.Complex]:
self.enterOuterAlt(localctx, 1)
self.state = 553
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.Char) | (1 << CParser.Double) | (1 << CParser.Float) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Bool) | (1 << CParser.Complex))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
elif token in [CParser.T__0]:
self.enterOuterAlt(localctx, 2)
self.state = 554
self.match(CParser.T__0)
self.state = 555
self.match(CParser.LeftParen)
self.state = 556
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 557
self.match(CParser.RightParen)
elif token in [CParser.Atomic]:
self.enterOuterAlt(localctx, 3)
self.state = 558
self.atomicTypeSpecifier()
elif token in [CParser.Struct, CParser.Union]:
self.enterOuterAlt(localctx, 4)
self.state = 559
self.structOrUnionSpecifier()
elif token in [CParser.Enum]:
self.enterOuterAlt(localctx, 5)
self.state = 560
self.enumSpecifier()
elif token in [CParser.Identifier]:
self.enterOuterAlt(localctx, 6)
self.state = 561
self.typedefName()
elif token in [CParser.T__6]:
self.enterOuterAlt(localctx, 7)
self.state = 562
self.match(CParser.T__6)
self.state = 563
self.match(CParser.LeftParen)
self.state = 564
self.constantExpression()
self.state = 565
self.match(CParser.RightParen)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StructOrUnionSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def structOrUnion(self):
return self.getTypedRuleContext(CParser.StructOrUnionContext,0)
def structDeclarationList(self):
return self.getTypedRuleContext(CParser.StructDeclarationListContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_structOrUnionSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructOrUnionSpecifier" ):
listener.enterStructOrUnionSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructOrUnionSpecifier" ):
listener.exitStructOrUnionSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructOrUnionSpecifier" ):
return visitor.visitStructOrUnionSpecifier(self)
else:
return visitor.visitChildren(self)
def structOrUnionSpecifier(self):
localctx = CParser.StructOrUnionSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_structOrUnionSpecifier)
self._la = 0 # Token type
try:
self.state = 580
la_ = self._interp.adaptivePredict(self._input,39,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 569
self.structOrUnion()
self.state = 571
_la = self._input.LA(1)
if _la==CParser.Identifier:
self.state = 570
self.match(CParser.Identifier)
self.state = 573
self.match(CParser.LeftBrace)
self.state = 574
self.structDeclarationList(0)
self.state = 575
self.match(CParser.RightBrace)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 577
self.structOrUnion()
self.state = 578
self.match(CParser.Identifier)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StructOrUnionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return CParser.RULE_structOrUnion
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructOrUnion" ):
listener.enterStructOrUnion(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructOrUnion" ):
listener.exitStructOrUnion(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructOrUnion" ):
return visitor.visitStructOrUnion(self)
else:
return visitor.visitChildren(self)
def structOrUnion(self):
localctx = CParser.StructOrUnionContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_structOrUnion)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 582
_la = self._input.LA(1)
if not(_la==CParser.Struct or _la==CParser.Union):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StructDeclarationListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def structDeclaration(self):
return self.getTypedRuleContext(CParser.StructDeclarationContext,0)
def structDeclarationList(self):
return self.getTypedRuleContext(CParser.StructDeclarationListContext,0)
def getRuleIndex(self):
return CParser.RULE_structDeclarationList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructDeclarationList" ):
listener.enterStructDeclarationList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructDeclarationList" ):
listener.exitStructDeclarationList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructDeclarationList" ):
return visitor.visitStructDeclarationList(self)
else:
return visitor.visitChildren(self)
def structDeclarationList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.StructDeclarationListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 68
self.enterRecursionRule(localctx, 68, self.RULE_structDeclarationList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 585
self.structDeclaration()
self._ctx.stop = self._input.LT(-1)
self.state = 591
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,40,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.StructDeclarationListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_structDeclarationList)
self.state = 587
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 588
self.structDeclaration()
self.state = 593
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,40,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class StructDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def specifierQualifierList(self):
return self.getTypedRuleContext(CParser.SpecifierQualifierListContext,0)
def structDeclaratorList(self):
return self.getTypedRuleContext(CParser.StructDeclaratorListContext,0)
def staticAssertDeclaration(self):
return self.getTypedRuleContext(CParser.StaticAssertDeclarationContext,0)
def getRuleIndex(self):
return CParser.RULE_structDeclaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructDeclaration" ):
listener.enterStructDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructDeclaration" ):
listener.exitStructDeclaration(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructDeclaration" ):
return visitor.visitStructDeclaration(self)
else:
return visitor.visitChildren(self)
def structDeclaration(self):
localctx = CParser.StructDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_structDeclaration)
self._la = 0 # Token type
try:
self.state = 601
token = self._input.LA(1)
if token in [CParser.T__0, CParser.T__3, CParser.T__4, CParser.T__5, CParser.T__6, CParser.Char, CParser.Const, CParser.Double, CParser.Enum, CParser.Float, CParser.Int, CParser.Long, CParser.Restrict, CParser.Short, CParser.Signed, CParser.Struct, CParser.Union, CParser.Unsigned, CParser.Void, CParser.Volatile, CParser.Atomic, CParser.Bool, CParser.Complex, CParser.Identifier]:
self.enterOuterAlt(localctx, 1)
self.state = 594
self.specifierQualifierList()
self.state = 596
_la = self._input.LA(1)
if ((((_la - 59)) & ~0x3f) == 0 and ((1 << (_la - 59)) & ((1 << (CParser.LeftParen - 59)) | (1 << (CParser.Star - 59)) | (1 << (CParser.Caret - 59)) | (1 << (CParser.Colon - 59)) | (1 << (CParser.Identifier - 59)))) != 0):
self.state = 595
self.structDeclaratorList(0)
self.state = 598
self.match(CParser.Semi)
elif token in [CParser.StaticAssert]:
self.enterOuterAlt(localctx, 2)
self.state = 600
self.staticAssertDeclaration()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SpecifierQualifierListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeSpecifier(self):
return self.getTypedRuleContext(CParser.TypeSpecifierContext,0)
def specifierQualifierList(self):
return self.getTypedRuleContext(CParser.SpecifierQualifierListContext,0)
def typeQualifier(self):
return self.getTypedRuleContext(CParser.TypeQualifierContext,0)
def getRuleIndex(self):
return CParser.RULE_specifierQualifierList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecifierQualifierList" ):
listener.enterSpecifierQualifierList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecifierQualifierList" ):
listener.exitSpecifierQualifierList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSpecifierQualifierList" ):
return visitor.visitSpecifierQualifierList(self)
else:
return visitor.visitChildren(self)
def specifierQualifierList(self):
localctx = CParser.SpecifierQualifierListContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_specifierQualifierList)
try:
self.state = 611
la_ = self._interp.adaptivePredict(self._input,45,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 603
self.typeSpecifier()
self.state = 605
la_ = self._interp.adaptivePredict(self._input,43,self._ctx)
if la_ == 1:
self.state = 604
self.specifierQualifierList()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 607
self.typeQualifier()
self.state = 609
la_ = self._interp.adaptivePredict(self._input,44,self._ctx)
if la_ == 1:
self.state = 608
self.specifierQualifierList()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StructDeclaratorListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def structDeclarator(self):
return self.getTypedRuleContext(CParser.StructDeclaratorContext,0)
def structDeclaratorList(self):
return self.getTypedRuleContext(CParser.StructDeclaratorListContext,0)
def getRuleIndex(self):
return CParser.RULE_structDeclaratorList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructDeclaratorList" ):
listener.enterStructDeclaratorList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructDeclaratorList" ):
listener.exitStructDeclaratorList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructDeclaratorList" ):
return visitor.visitStructDeclaratorList(self)
else:
return visitor.visitChildren(self)
def structDeclaratorList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.StructDeclaratorListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 74
self.enterRecursionRule(localctx, 74, self.RULE_structDeclaratorList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 614
self.structDeclarator()
self._ctx.stop = self._input.LT(-1)
self.state = 621
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,46,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.StructDeclaratorListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_structDeclaratorList)
self.state = 616
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 617
self.match(CParser.Comma)
self.state = 618
self.structDeclarator()
self.state = 623
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,46,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class StructDeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarator(self):
return self.getTypedRuleContext(CParser.DeclaratorContext,0)
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_structDeclarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructDeclarator" ):
listener.enterStructDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructDeclarator" ):
listener.exitStructDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructDeclarator" ):
return visitor.visitStructDeclarator(self)
else:
return visitor.visitChildren(self)
def structDeclarator(self):
localctx = CParser.StructDeclaratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_structDeclarator)
self._la = 0 # Token type
try:
self.state = 630
la_ = self._interp.adaptivePredict(self._input,48,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 624
self.declarator()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 626
_la = self._input.LA(1)
if ((((_la - 59)) & ~0x3f) == 0 and ((1 << (_la - 59)) & ((1 << (CParser.LeftParen - 59)) | (1 << (CParser.Star - 59)) | (1 << (CParser.Caret - 59)) | (1 << (CParser.Identifier - 59)))) != 0):
self.state = 625
self.declarator()
self.state = 628
self.match(CParser.Colon)
self.state = 629
self.constantExpression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EnumSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def enumeratorList(self):
return self.getTypedRuleContext(CParser.EnumeratorListContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_enumSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEnumSpecifier" ):
listener.enterEnumSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEnumSpecifier" ):
listener.exitEnumSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEnumSpecifier" ):
return visitor.visitEnumSpecifier(self)
else:
return visitor.visitChildren(self)
def enumSpecifier(self):
localctx = CParser.EnumSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_enumSpecifier)
self._la = 0 # Token type
try:
self.state = 651
la_ = self._interp.adaptivePredict(self._input,51,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 632
self.match(CParser.Enum)
self.state = 634
_la = self._input.LA(1)
if _la==CParser.Identifier:
self.state = 633
self.match(CParser.Identifier)
self.state = 636
self.match(CParser.LeftBrace)
self.state = 637
self.enumeratorList(0)
self.state = 638
self.match(CParser.RightBrace)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 640
self.match(CParser.Enum)
self.state = 642
_la = self._input.LA(1)
if _la==CParser.Identifier:
self.state = 641
self.match(CParser.Identifier)
self.state = 644
self.match(CParser.LeftBrace)
self.state = 645
self.enumeratorList(0)
self.state = 646
self.match(CParser.Comma)
self.state = 647
self.match(CParser.RightBrace)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 649
self.match(CParser.Enum)
self.state = 650
self.match(CParser.Identifier)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EnumeratorListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def enumerator(self):
return self.getTypedRuleContext(CParser.EnumeratorContext,0)
def enumeratorList(self):
return self.getTypedRuleContext(CParser.EnumeratorListContext,0)
def getRuleIndex(self):
return CParser.RULE_enumeratorList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEnumeratorList" ):
listener.enterEnumeratorList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEnumeratorList" ):
listener.exitEnumeratorList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEnumeratorList" ):
return visitor.visitEnumeratorList(self)
else:
return visitor.visitChildren(self)
def enumeratorList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.EnumeratorListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 80
self.enterRecursionRule(localctx, 80, self.RULE_enumeratorList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 654
self.enumerator()
self._ctx.stop = self._input.LT(-1)
self.state = 661
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,52,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.EnumeratorListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_enumeratorList)
self.state = 656
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 657
self.match(CParser.Comma)
self.state = 658
self.enumerator()
self.state = 663
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,52,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class EnumeratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def enumerationConstant(self):
return self.getTypedRuleContext(CParser.EnumerationConstantContext,0)
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_enumerator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEnumerator" ):
listener.enterEnumerator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEnumerator" ):
listener.exitEnumerator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEnumerator" ):
return visitor.visitEnumerator(self)
else:
return visitor.visitChildren(self)
def enumerator(self):
localctx = CParser.EnumeratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_enumerator)
try:
self.state = 669
la_ = self._interp.adaptivePredict(self._input,53,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 664
self.enumerationConstant()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 665
self.enumerationConstant()
self.state = 666
self.match(CParser.Assign)
self.state = 667
self.constantExpression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EnumerationConstantContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_enumerationConstant
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEnumerationConstant" ):
listener.enterEnumerationConstant(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEnumerationConstant" ):
listener.exitEnumerationConstant(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEnumerationConstant" ):
return visitor.visitEnumerationConstant(self)
else:
return visitor.visitChildren(self)
def enumerationConstant(self):
localctx = CParser.EnumerationConstantContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_enumerationConstant)
try:
self.enterOuterAlt(localctx, 1)
self.state = 671
self.match(CParser.Identifier)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AtomicTypeSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def getRuleIndex(self):
return CParser.RULE_atomicTypeSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomicTypeSpecifier" ):
listener.enterAtomicTypeSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomicTypeSpecifier" ):
listener.exitAtomicTypeSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomicTypeSpecifier" ):
return visitor.visitAtomicTypeSpecifier(self)
else:
return visitor.visitChildren(self)
def atomicTypeSpecifier(self):
localctx = CParser.AtomicTypeSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_atomicTypeSpecifier)
try:
self.enterOuterAlt(localctx, 1)
self.state = 673
self.match(CParser.Atomic)
self.state = 674
self.match(CParser.LeftParen)
self.state = 675
self.typeName()
self.state = 676
self.match(CParser.RightParen)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeQualifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return CParser.RULE_typeQualifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeQualifier" ):
listener.enterTypeQualifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeQualifier" ):
listener.exitTypeQualifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeQualifier" ):
return visitor.visitTypeQualifier(self)
else:
return visitor.visitChildren(self)
def typeQualifier(self):
localctx = CParser.TypeQualifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_typeQualifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 678
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gccAttributeSpecifier(self):
return self.getTypedRuleContext(CParser.GccAttributeSpecifierContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_functionSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunctionSpecifier" ):
listener.enterFunctionSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunctionSpecifier" ):
listener.exitFunctionSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunctionSpecifier" ):
return visitor.visitFunctionSpecifier(self)
else:
return visitor.visitChildren(self)
def functionSpecifier(self):
localctx = CParser.FunctionSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_functionSpecifier)
self._la = 0 # Token type
try:
self.state = 686
token = self._input.LA(1)
if token in [CParser.T__7, CParser.T__8, CParser.Inline, CParser.Noreturn]:
self.enterOuterAlt(localctx, 1)
self.state = 680
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.Inline) | (1 << CParser.Noreturn))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
elif token in [CParser.T__11]:
self.enterOuterAlt(localctx, 2)
self.state = 681
self.gccAttributeSpecifier()
elif token in [CParser.T__9]:
self.enterOuterAlt(localctx, 3)
self.state = 682
self.match(CParser.T__9)
self.state = 683
self.match(CParser.LeftParen)
self.state = 684
self.match(CParser.Identifier)
self.state = 685
self.match(CParser.RightParen)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AlignmentSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_alignmentSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAlignmentSpecifier" ):
listener.enterAlignmentSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAlignmentSpecifier" ):
listener.exitAlignmentSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAlignmentSpecifier" ):
return visitor.visitAlignmentSpecifier(self)
else:
return visitor.visitChildren(self)
def alignmentSpecifier(self):
localctx = CParser.AlignmentSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_alignmentSpecifier)
try:
self.state = 698
la_ = self._interp.adaptivePredict(self._input,55,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 688
self.match(CParser.Alignas)
self.state = 689
self.match(CParser.LeftParen)
self.state = 690
self.typeName()
self.state = 691
self.match(CParser.RightParen)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 693
self.match(CParser.Alignas)
self.state = 694
self.match(CParser.LeftParen)
self.state = 695
self.constantExpression()
self.state = 696
self.match(CParser.RightParen)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def directDeclarator(self):
return self.getTypedRuleContext(CParser.DirectDeclaratorContext,0)
def pointer(self):
return self.getTypedRuleContext(CParser.PointerContext,0)
def gccDeclaratorExtension(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.GccDeclaratorExtensionContext)
else:
return self.getTypedRuleContext(CParser.GccDeclaratorExtensionContext,i)
def getRuleIndex(self):
return CParser.RULE_declarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarator" ):
listener.enterDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarator" ):
listener.exitDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarator" ):
return visitor.visitDeclarator(self)
else:
return visitor.visitChildren(self)
def declarator(self):
localctx = CParser.DeclaratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 94, self.RULE_declarator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 701
_la = self._input.LA(1)
if _la==CParser.Star or _la==CParser.Caret:
self.state = 700
self.pointer()
self.state = 703
self.directDeclarator(0)
self.state = 707
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,57,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 704
self.gccDeclaratorExtension()
self.state = 709
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,57,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DirectDeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def declarator(self):
return self.getTypedRuleContext(CParser.DeclaratorContext,0)
def directDeclarator(self):
return self.getTypedRuleContext(CParser.DirectDeclaratorContext,0)
def typeQualifierList(self):
return self.getTypedRuleContext(CParser.TypeQualifierListContext,0)
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def parameterTypeList(self):
return self.getTypedRuleContext(CParser.ParameterTypeListContext,0)
def identifierList(self):
return self.getTypedRuleContext(CParser.IdentifierListContext,0)
def getRuleIndex(self):
return CParser.RULE_directDeclarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDirectDeclarator" ):
listener.enterDirectDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDirectDeclarator" ):
listener.exitDirectDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDirectDeclarator" ):
return visitor.visitDirectDeclarator(self)
else:
return visitor.visitChildren(self)
def directDeclarator(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.DirectDeclaratorContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 96
self.enterRecursionRule(localctx, 96, self.RULE_directDeclarator, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 716
token = self._input.LA(1)
if token in [CParser.Identifier]:
self.state = 711
self.match(CParser.Identifier)
elif token in [CParser.LeftParen]:
self.state = 712
self.match(CParser.LeftParen)
self.state = 713
self.declarator()
self.state = 714
self.match(CParser.RightParen)
else:
raise NoViableAltException(self)
self._ctx.stop = self._input.LT(-1)
self.state = 763
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,65,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 761
la_ = self._interp.adaptivePredict(self._input,64,self._ctx)
if la_ == 1:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 718
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 6)")
self.state = 719
self.match(CParser.LeftBracket)
self.state = 721
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 720
self.typeQualifierList(0)
self.state = 724
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 723
self.assignmentExpression()
self.state = 726
self.match(CParser.RightBracket)
pass
elif la_ == 2:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 727
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 728
self.match(CParser.LeftBracket)
self.state = 729
self.match(CParser.Static)
self.state = 731
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 730
self.typeQualifierList(0)
self.state = 733
self.assignmentExpression()
self.state = 734
self.match(CParser.RightBracket)
pass
elif la_ == 3:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 736
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 737
self.match(CParser.LeftBracket)
self.state = 738
self.typeQualifierList(0)
self.state = 739
self.match(CParser.Static)
self.state = 740
self.assignmentExpression()
self.state = 741
self.match(CParser.RightBracket)
pass
elif la_ == 4:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 743
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 744
self.match(CParser.LeftBracket)
self.state = 746
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 745
self.typeQualifierList(0)
self.state = 748
self.match(CParser.Star)
self.state = 749
self.match(CParser.RightBracket)
pass
elif la_ == 5:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 750
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 751
self.match(CParser.LeftParen)
self.state = 752
self.parameterTypeList()
self.state = 753
self.match(CParser.RightParen)
pass
elif la_ == 6:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 755
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 756
self.match(CParser.LeftParen)
self.state = 758
_la = self._input.LA(1)
if _la==CParser.Identifier:
self.state = 757
self.identifierList(0)
self.state = 760
self.match(CParser.RightParen)
pass
self.state = 765
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,65,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class GccDeclaratorExtensionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def StringLiteral(self, i:int=None):
if i is None:
return self.getTokens(CParser.StringLiteral)
else:
return self.getToken(CParser.StringLiteral, i)
def gccAttributeSpecifier(self):
return self.getTypedRuleContext(CParser.GccAttributeSpecifierContext,0)
def getRuleIndex(self):
return CParser.RULE_gccDeclaratorExtension
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGccDeclaratorExtension" ):
listener.enterGccDeclaratorExtension(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGccDeclaratorExtension" ):
listener.exitGccDeclaratorExtension(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGccDeclaratorExtension" ):
return visitor.visitGccDeclaratorExtension(self)
else:
return visitor.visitChildren(self)
def gccDeclaratorExtension(self):
localctx = CParser.GccDeclaratorExtensionContext(self, self._ctx, self.state)
self.enterRule(localctx, 98, self.RULE_gccDeclaratorExtension)
self._la = 0 # Token type
try:
self.state = 775
token = self._input.LA(1)
if token in [CParser.T__10]:
self.enterOuterAlt(localctx, 1)
self.state = 766
self.match(CParser.T__10)
self.state = 767
self.match(CParser.LeftParen)
self.state = 769
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 768
self.match(CParser.StringLiteral)
self.state = 771
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CParser.StringLiteral):
break
self.state = 773
self.match(CParser.RightParen)
elif token in [CParser.T__11]:
self.enterOuterAlt(localctx, 2)
self.state = 774
self.gccAttributeSpecifier()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GccAttributeSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gccAttributeList(self):
return self.getTypedRuleContext(CParser.GccAttributeListContext,0)
def getRuleIndex(self):
return CParser.RULE_gccAttributeSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGccAttributeSpecifier" ):
listener.enterGccAttributeSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGccAttributeSpecifier" ):
listener.exitGccAttributeSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGccAttributeSpecifier" ):
return visitor.visitGccAttributeSpecifier(self)
else:
return visitor.visitChildren(self)
def gccAttributeSpecifier(self):
localctx = CParser.GccAttributeSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 100, self.RULE_gccAttributeSpecifier)
try:
self.enterOuterAlt(localctx, 1)
self.state = 777
self.match(CParser.T__11)
self.state = 778
self.match(CParser.LeftParen)
self.state = 779
self.match(CParser.LeftParen)
self.state = 780
self.gccAttributeList()
self.state = 781
self.match(CParser.RightParen)
self.state = 782
self.match(CParser.RightParen)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GccAttributeListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gccAttribute(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.GccAttributeContext)
else:
return self.getTypedRuleContext(CParser.GccAttributeContext,i)
def getRuleIndex(self):
return CParser.RULE_gccAttributeList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGccAttributeList" ):
listener.enterGccAttributeList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGccAttributeList" ):
listener.exitGccAttributeList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGccAttributeList" ):
return visitor.visitGccAttributeList(self)
else:
return visitor.visitChildren(self)
def gccAttributeList(self):
localctx = CParser.GccAttributeListContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_gccAttributeList)
self._la = 0 # Token type
try:
self.state = 793
la_ = self._interp.adaptivePredict(self._input,69,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 784
self.gccAttribute()
self.state = 789
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CParser.Comma:
self.state = 785
self.match(CParser.Comma)
self.state = 786
self.gccAttribute()
self.state = 791
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GccAttributeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def argumentExpressionList(self):
return self.getTypedRuleContext(CParser.ArgumentExpressionListContext,0)
def getRuleIndex(self):
return CParser.RULE_gccAttribute
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGccAttribute" ):
listener.enterGccAttribute(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGccAttribute" ):
listener.exitGccAttribute(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGccAttribute" ):
return visitor.visitGccAttribute(self)
else:
return visitor.visitChildren(self)
def gccAttribute(self):
localctx = CParser.GccAttributeContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_gccAttribute)
self._la = 0 # Token type
try:
self.state = 804
token = self._input.LA(1)
if token in [CParser.T__0, CParser.T__1, CParser.T__2, CParser.T__3, CParser.T__4, CParser.T__5, CParser.T__6, CParser.T__7, CParser.T__8, CParser.T__9, CParser.T__10, CParser.T__11, CParser.T__12, CParser.T__13, CParser.Auto, CParser.Break, CParser.Case, CParser.Char, CParser.Const, CParser.Continue, CParser.Default, CParser.Do, CParser.Double, CParser.Else, CParser.Enum, CParser.Extern, CParser.Float, CParser.For, CParser.Goto, CParser.If, CParser.Inline, CParser.Int, CParser.Long, CParser.Register, CParser.Restrict, CParser.Return, CParser.Short, CParser.Signed, CParser.Sizeof, CParser.Static, CParser.Struct, CParser.Switch, CParser.Typedef, CParser.Union, CParser.Unsigned, CParser.Void, CParser.Volatile, CParser.While, CParser.Alignas, CParser.Alignof, CParser.Atomic, CParser.Bool, CParser.Complex, CParser.Generic, CParser.Imaginary, CParser.Noreturn, CParser.StaticAssert, CParser.ThreadLocal, CParser.LeftBracket, CParser.RightBracket, CParser.LeftBrace, CParser.RightBrace, CParser.Less, CParser.LessEqual, CParser.Greater, CParser.GreaterEqual, CParser.LeftShift, CParser.RightShift, CParser.Plus, CParser.PlusPlus, CParser.Minus, CParser.MinusMinus, CParser.Star, CParser.Div, CParser.Mod, CParser.And, CParser.Or, CParser.AndAnd, CParser.OrOr, CParser.Caret, CParser.Not, CParser.Tilde, CParser.Question, CParser.Colon, CParser.Semi, CParser.Assign, CParser.StarAssign, CParser.DivAssign, CParser.ModAssign, CParser.PlusAssign, CParser.MinusAssign, CParser.LeftShiftAssign, CParser.RightShiftAssign, CParser.AndAssign, CParser.XorAssign, CParser.OrAssign, CParser.Equal, CParser.NotEqual, CParser.Arrow, CParser.Dot, CParser.Ellipsis, CParser.Identifier, CParser.Constant, CParser.StringLiteral, CParser.LineDirective, CParser.PragmaDirective, CParser.Whitespace, CParser.Newline, CParser.BlockComment, CParser.LineComment]:
self.enterOuterAlt(localctx, 1)
self.state = 795
_la = self._input.LA(1)
if _la <= 0 or ((((_la - 59)) & ~0x3f) == 0 and ((1 << (_la - 59)) & ((1 << (CParser.LeftParen - 59)) | (1 << (CParser.RightParen - 59)) | (1 << (CParser.Comma - 59)))) != 0):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 801
_la = self._input.LA(1)
if _la==CParser.LeftParen:
self.state = 796
self.match(CParser.LeftParen)
self.state = 798
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 797
self.argumentExpressionList(0)
self.state = 800
self.match(CParser.RightParen)
elif token in [CParser.RightParen, CParser.Comma]:
self.enterOuterAlt(localctx, 2)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NestedParenthesesBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def nestedParenthesesBlock(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.NestedParenthesesBlockContext)
else:
return self.getTypedRuleContext(CParser.NestedParenthesesBlockContext,i)
def getRuleIndex(self):
return CParser.RULE_nestedParenthesesBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNestedParenthesesBlock" ):
listener.enterNestedParenthesesBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNestedParenthesesBlock" ):
listener.exitNestedParenthesesBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNestedParenthesesBlock" ):
return visitor.visitNestedParenthesesBlock(self)
else:
return visitor.visitChildren(self)
def nestedParenthesesBlock(self):
localctx = CParser.NestedParenthesesBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 106, self.RULE_nestedParenthesesBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 813
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__10) | (1 << CParser.T__11) | (1 << CParser.T__12) | (1 << CParser.T__13) | (1 << CParser.Auto) | (1 << CParser.Break) | (1 << CParser.Case) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Continue) | (1 << CParser.Default) | (1 << CParser.Do) | (1 << CParser.Double) | (1 << CParser.Else) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.For) | (1 << CParser.Goto) | (1 << CParser.If) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Return) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Sizeof) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Switch) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.While) | (1 << CParser.Alignas) | (1 << CParser.Alignof) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Generic) | (1 << CParser.Imaginary) | (1 << CParser.Noreturn) | (1 << CParser.StaticAssert) | (1 << CParser.ThreadLocal) | (1 << CParser.LeftParen) | (1 << CParser.LeftBracket) | (1 << CParser.RightBracket) | (1 << CParser.LeftBrace))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (CParser.RightBrace - 64)) | (1 << (CParser.Less - 64)) | (1 << (CParser.LessEqual - 64)) | (1 << (CParser.Greater - 64)) | (1 << (CParser.GreaterEqual - 64)) | (1 << (CParser.LeftShift - 64)) | (1 << (CParser.RightShift - 64)) | (1 << (CParser.Plus - 64)) | (1 << (CParser.PlusPlus - 64)) | (1 << (CParser.Minus - 64)) | (1 << (CParser.MinusMinus - 64)) | (1 << (CParser.Star - 64)) | (1 << (CParser.Div - 64)) | (1 << (CParser.Mod - 64)) | (1 << (CParser.And - 64)) | (1 << (CParser.Or - 64)) | (1 << (CParser.AndAnd - 64)) | (1 << (CParser.OrOr - 64)) | (1 << (CParser.Caret - 64)) | (1 << (CParser.Not - 64)) | (1 << (CParser.Tilde - 64)) | (1 << (CParser.Question - 64)) | (1 << (CParser.Colon - 64)) | (1 << (CParser.Semi - 64)) | (1 << (CParser.Comma - 64)) | (1 << (CParser.Assign - 64)) | (1 << (CParser.StarAssign - 64)) | (1 << (CParser.DivAssign - 64)) | (1 << (CParser.ModAssign - 64)) | (1 << (CParser.PlusAssign - 64)) | (1 << (CParser.MinusAssign - 64)) | (1 << (CParser.LeftShiftAssign - 64)) | (1 << (CParser.RightShiftAssign - 64)) | (1 << (CParser.AndAssign - 64)) | (1 << (CParser.XorAssign - 64)) | (1 << (CParser.OrAssign - 64)) | (1 << (CParser.Equal - 64)) | (1 << (CParser.NotEqual - 64)) | (1 << (CParser.Arrow - 64)) | (1 << (CParser.Dot - 64)) | (1 << (CParser.Ellipsis - 64)) | (1 << (CParser.Identifier - 64)) | (1 << (CParser.Constant - 64)) | (1 << (CParser.StringLiteral - 64)) | (1 << (CParser.LineDirective - 64)) | (1 << (CParser.PragmaDirective - 64)) | (1 << (CParser.Whitespace - 64)) | (1 << (CParser.Newline - 64)) | (1 << (CParser.BlockComment - 64)) | (1 << (CParser.LineComment - 64)))) != 0):
self.state = 811
token = self._input.LA(1)
if token in [CParser.T__0, CParser.T__1, CParser.T__2, CParser.T__3, CParser.T__4, CParser.T__5, CParser.T__6, CParser.T__7, CParser.T__8, CParser.T__9, CParser.T__10, CParser.T__11, CParser.T__12, CParser.T__13, CParser.Auto, CParser.Break, CParser.Case, CParser.Char, CParser.Const, CParser.Continue, CParser.Default, CParser.Do, CParser.Double, CParser.Else, CParser.Enum, CParser.Extern, CParser.Float, CParser.For, CParser.Goto, CParser.If, CParser.Inline, CParser.Int, CParser.Long, CParser.Register, CParser.Restrict, CParser.Return, CParser.Short, CParser.Signed, CParser.Sizeof, CParser.Static, CParser.Struct, CParser.Switch, CParser.Typedef, CParser.Union, CParser.Unsigned, CParser.Void, CParser.Volatile, CParser.While, CParser.Alignas, CParser.Alignof, CParser.Atomic, CParser.Bool, CParser.Complex, CParser.Generic, CParser.Imaginary, CParser.Noreturn, CParser.StaticAssert, CParser.ThreadLocal, CParser.LeftBracket, CParser.RightBracket, CParser.LeftBrace, CParser.RightBrace, CParser.Less, CParser.LessEqual, CParser.Greater, CParser.GreaterEqual, CParser.LeftShift, CParser.RightShift, CParser.Plus, CParser.PlusPlus, CParser.Minus, CParser.MinusMinus, CParser.Star, CParser.Div, CParser.Mod, CParser.And, CParser.Or, CParser.AndAnd, CParser.OrOr, CParser.Caret, CParser.Not, CParser.Tilde, CParser.Question, CParser.Colon, CParser.Semi, CParser.Comma, CParser.Assign, CParser.StarAssign, CParser.DivAssign, CParser.ModAssign, CParser.PlusAssign, CParser.MinusAssign, CParser.LeftShiftAssign, CParser.RightShiftAssign, CParser.AndAssign, CParser.XorAssign, CParser.OrAssign, CParser.Equal, CParser.NotEqual, CParser.Arrow, CParser.Dot, CParser.Ellipsis, CParser.Identifier, CParser.Constant, CParser.StringLiteral, CParser.LineDirective, CParser.PragmaDirective, CParser.Whitespace, CParser.Newline, CParser.BlockComment, CParser.LineComment]:
self.state = 806
_la = self._input.LA(1)
if _la <= 0 or _la==CParser.LeftParen or _la==CParser.RightParen:
self._errHandler.recoverInline(self)
else:
self.consume()
elif token in [CParser.LeftParen]:
self.state = 807
self.match(CParser.LeftParen)
self.state = 808
self.nestedParenthesesBlock()
self.state = 809
self.match(CParser.RightParen)
else:
raise NoViableAltException(self)
self.state = 815
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PointerContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeQualifierList(self):
return self.getTypedRuleContext(CParser.TypeQualifierListContext,0)
def pointer(self):
return self.getTypedRuleContext(CParser.PointerContext,0)
def getRuleIndex(self):
return CParser.RULE_pointer
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPointer" ):
listener.enterPointer(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPointer" ):
listener.exitPointer(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPointer" ):
return visitor.visitPointer(self)
else:
return visitor.visitChildren(self)
def pointer(self):
localctx = CParser.PointerContext(self, self._ctx, self.state)
self.enterRule(localctx, 108, self.RULE_pointer)
self._la = 0 # Token type
try:
self.state = 834
la_ = self._interp.adaptivePredict(self._input,79,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 816
self.match(CParser.Star)
self.state = 818
la_ = self._interp.adaptivePredict(self._input,75,self._ctx)
if la_ == 1:
self.state = 817
self.typeQualifierList(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 820
self.match(CParser.Star)
self.state = 822
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 821
self.typeQualifierList(0)
self.state = 824
self.pointer()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 825
self.match(CParser.Caret)
self.state = 827
la_ = self._interp.adaptivePredict(self._input,77,self._ctx)
if la_ == 1:
self.state = 826
self.typeQualifierList(0)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 829
self.match(CParser.Caret)
self.state = 831
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 830
self.typeQualifierList(0)
self.state = 833
self.pointer()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeQualifierListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeQualifier(self):
return self.getTypedRuleContext(CParser.TypeQualifierContext,0)
def typeQualifierList(self):
return self.getTypedRuleContext(CParser.TypeQualifierListContext,0)
def getRuleIndex(self):
return CParser.RULE_typeQualifierList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeQualifierList" ):
listener.enterTypeQualifierList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeQualifierList" ):
listener.exitTypeQualifierList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeQualifierList" ):
return visitor.visitTypeQualifierList(self)
else:
return visitor.visitChildren(self)
def typeQualifierList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.TypeQualifierListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 110
self.enterRecursionRule(localctx, 110, self.RULE_typeQualifierList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 837
self.typeQualifier()
self._ctx.stop = self._input.LT(-1)
self.state = 843
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,80,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.TypeQualifierListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_typeQualifierList)
self.state = 839
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 840
self.typeQualifier()
self.state = 845
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,80,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ParameterTypeListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def parameterList(self):
return self.getTypedRuleContext(CParser.ParameterListContext,0)
def getRuleIndex(self):
return CParser.RULE_parameterTypeList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameterTypeList" ):
listener.enterParameterTypeList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameterTypeList" ):
listener.exitParameterTypeList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParameterTypeList" ):
return visitor.visitParameterTypeList(self)
else:
return visitor.visitChildren(self)
def parameterTypeList(self):
localctx = CParser.ParameterTypeListContext(self, self._ctx, self.state)
self.enterRule(localctx, 112, self.RULE_parameterTypeList)
try:
self.state = 851
la_ = self._interp.adaptivePredict(self._input,81,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 846
self.parameterList(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 847
self.parameterList(0)
self.state = 848
self.match(CParser.Comma)
self.state = 849
self.match(CParser.Ellipsis)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParameterListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def parameterDeclaration(self):
return self.getTypedRuleContext(CParser.ParameterDeclarationContext,0)
def parameterList(self):
return self.getTypedRuleContext(CParser.ParameterListContext,0)
def getRuleIndex(self):
return CParser.RULE_parameterList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameterList" ):
listener.enterParameterList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameterList" ):
listener.exitParameterList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParameterList" ):
return visitor.visitParameterList(self)
else:
return visitor.visitChildren(self)
def parameterList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.ParameterListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 114
self.enterRecursionRule(localctx, 114, self.RULE_parameterList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 854
self.parameterDeclaration()
self._ctx.stop = self._input.LT(-1)
self.state = 861
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,82,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.ParameterListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_parameterList)
self.state = 856
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 857
self.match(CParser.Comma)
self.state = 858
self.parameterDeclaration()
self.state = 863
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,82,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ParameterDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarationSpecifiers(self):
return self.getTypedRuleContext(CParser.DeclarationSpecifiersContext,0)
def declarator(self):
return self.getTypedRuleContext(CParser.DeclaratorContext,0)
def declarationSpecifiers2(self):
return self.getTypedRuleContext(CParser.DeclarationSpecifiers2Context,0)
def abstractDeclarator(self):
return self.getTypedRuleContext(CParser.AbstractDeclaratorContext,0)
def getRuleIndex(self):
return CParser.RULE_parameterDeclaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameterDeclaration" ):
listener.enterParameterDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameterDeclaration" ):
listener.exitParameterDeclaration(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParameterDeclaration" ):
return visitor.visitParameterDeclaration(self)
else:
return visitor.visitChildren(self)
def parameterDeclaration(self):
localctx = CParser.ParameterDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 116, self.RULE_parameterDeclaration)
try:
self.state = 871
la_ = self._interp.adaptivePredict(self._input,84,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 864
self.declarationSpecifiers()
self.state = 865
self.declarator()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 867
self.declarationSpecifiers2()
self.state = 869
la_ = self._interp.adaptivePredict(self._input,83,self._ctx)
if la_ == 1:
self.state = 868
self.abstractDeclarator()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdentifierListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def identifierList(self):
return self.getTypedRuleContext(CParser.IdentifierListContext,0)
def getRuleIndex(self):
return CParser.RULE_identifierList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIdentifierList" ):
listener.enterIdentifierList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIdentifierList" ):
listener.exitIdentifierList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdentifierList" ):
return visitor.visitIdentifierList(self)
else:
return visitor.visitChildren(self)
def identifierList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.IdentifierListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 118
self.enterRecursionRule(localctx, 118, self.RULE_identifierList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 874
self.match(CParser.Identifier)
self._ctx.stop = self._input.LT(-1)
self.state = 881
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,85,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.IdentifierListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_identifierList)
self.state = 876
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 877
self.match(CParser.Comma)
self.state = 878
self.match(CParser.Identifier)
self.state = 883
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,85,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class TypeNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def specifierQualifierList(self):
return self.getTypedRuleContext(CParser.SpecifierQualifierListContext,0)
def abstractDeclarator(self):
return self.getTypedRuleContext(CParser.AbstractDeclaratorContext,0)
def getRuleIndex(self):
return CParser.RULE_typeName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeName" ):
listener.enterTypeName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeName" ):
listener.exitTypeName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeName" ):
return visitor.visitTypeName(self)
else:
return visitor.visitChildren(self)
def typeName(self):
localctx = CParser.TypeNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 120, self.RULE_typeName)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 884
self.specifierQualifierList()
self.state = 886
_la = self._input.LA(1)
if ((((_la - 59)) & ~0x3f) == 0 and ((1 << (_la - 59)) & ((1 << (CParser.LeftParen - 59)) | (1 << (CParser.LeftBracket - 59)) | (1 << (CParser.Star - 59)) | (1 << (CParser.Caret - 59)))) != 0):
self.state = 885
self.abstractDeclarator()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AbstractDeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def pointer(self):
return self.getTypedRuleContext(CParser.PointerContext,0)
def directAbstractDeclarator(self):
return self.getTypedRuleContext(CParser.DirectAbstractDeclaratorContext,0)
def gccDeclaratorExtension(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.GccDeclaratorExtensionContext)
else:
return self.getTypedRuleContext(CParser.GccDeclaratorExtensionContext,i)
def getRuleIndex(self):
return CParser.RULE_abstractDeclarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAbstractDeclarator" ):
listener.enterAbstractDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAbstractDeclarator" ):
listener.exitAbstractDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAbstractDeclarator" ):
return visitor.visitAbstractDeclarator(self)
else:
return visitor.visitChildren(self)
def abstractDeclarator(self):
localctx = CParser.AbstractDeclaratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 122, self.RULE_abstractDeclarator)
self._la = 0 # Token type
try:
self.state = 899
la_ = self._interp.adaptivePredict(self._input,89,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 888
self.pointer()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 890
_la = self._input.LA(1)
if _la==CParser.Star or _la==CParser.Caret:
self.state = 889
self.pointer()
self.state = 892
self.directAbstractDeclarator(0)
self.state = 896
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,88,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 893
self.gccDeclaratorExtension()
self.state = 898
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,88,self._ctx)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DirectAbstractDeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def abstractDeclarator(self):
return self.getTypedRuleContext(CParser.AbstractDeclaratorContext,0)
def gccDeclaratorExtension(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.GccDeclaratorExtensionContext)
else:
return self.getTypedRuleContext(CParser.GccDeclaratorExtensionContext,i)
def typeQualifierList(self):
return self.getTypedRuleContext(CParser.TypeQualifierListContext,0)
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def parameterTypeList(self):
return self.getTypedRuleContext(CParser.ParameterTypeListContext,0)
def directAbstractDeclarator(self):
return self.getTypedRuleContext(CParser.DirectAbstractDeclaratorContext,0)
def getRuleIndex(self):
return CParser.RULE_directAbstractDeclarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDirectAbstractDeclarator" ):
listener.enterDirectAbstractDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDirectAbstractDeclarator" ):
listener.exitDirectAbstractDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDirectAbstractDeclarator" ):
return visitor.visitDirectAbstractDeclarator(self)
else:
return visitor.visitChildren(self)
def directAbstractDeclarator(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.DirectAbstractDeclaratorContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 124
self.enterRecursionRule(localctx, 124, self.RULE_directAbstractDeclarator, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 947
la_ = self._interp.adaptivePredict(self._input,96,self._ctx)
if la_ == 1:
self.state = 902
self.match(CParser.LeftParen)
self.state = 903
self.abstractDeclarator()
self.state = 904
self.match(CParser.RightParen)
self.state = 908
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,90,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 905
self.gccDeclaratorExtension()
self.state = 910
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,90,self._ctx)
pass
elif la_ == 2:
self.state = 911
self.match(CParser.LeftBracket)
self.state = 913
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 912
self.typeQualifierList(0)
self.state = 916
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 915
self.assignmentExpression()
self.state = 918
self.match(CParser.RightBracket)
pass
elif la_ == 3:
self.state = 919
self.match(CParser.LeftBracket)
self.state = 920
self.match(CParser.Static)
self.state = 922
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 921
self.typeQualifierList(0)
self.state = 924
self.assignmentExpression()
self.state = 925
self.match(CParser.RightBracket)
pass
elif la_ == 4:
self.state = 927
self.match(CParser.LeftBracket)
self.state = 928
self.typeQualifierList(0)
self.state = 929
self.match(CParser.Static)
self.state = 930
self.assignmentExpression()
self.state = 931
self.match(CParser.RightBracket)
pass
elif la_ == 5:
self.state = 933
self.match(CParser.LeftBracket)
self.state = 934
self.match(CParser.Star)
self.state = 935
self.match(CParser.RightBracket)
pass
elif la_ == 6:
self.state = 936
self.match(CParser.LeftParen)
self.state = 938
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__11) | (1 << CParser.Auto) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Double) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.Alignas) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Noreturn) | (1 << CParser.ThreadLocal))) != 0) or _la==CParser.Identifier:
self.state = 937
self.parameterTypeList()
self.state = 940
self.match(CParser.RightParen)
self.state = 944
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,95,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 941
self.gccDeclaratorExtension()
self.state = 946
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,95,self._ctx)
pass
self._ctx.stop = self._input.LT(-1)
self.state = 992
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,103,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 990
la_ = self._interp.adaptivePredict(self._input,102,self._ctx)
if la_ == 1:
localctx = CParser.DirectAbstractDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directAbstractDeclarator)
self.state = 949
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 950
self.match(CParser.LeftBracket)
self.state = 952
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 951
self.typeQualifierList(0)
self.state = 955
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 954
self.assignmentExpression()
self.state = 957
self.match(CParser.RightBracket)
pass
elif la_ == 2:
localctx = CParser.DirectAbstractDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directAbstractDeclarator)
self.state = 958
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 959
self.match(CParser.LeftBracket)
self.state = 960
self.match(CParser.Static)
self.state = 962
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 961
self.typeQualifierList(0)
self.state = 964
self.assignmentExpression()
self.state = 965
self.match(CParser.RightBracket)
pass
elif la_ == 3:
localctx = CParser.DirectAbstractDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directAbstractDeclarator)
self.state = 967
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 968
self.match(CParser.LeftBracket)
self.state = 969
self.typeQualifierList(0)
self.state = 970
self.match(CParser.Static)
self.state = 971
self.assignmentExpression()
self.state = 972
self.match(CParser.RightBracket)
pass
elif la_ == 4:
localctx = CParser.DirectAbstractDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directAbstractDeclarator)
self.state = 974
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 975
self.match(CParser.LeftBracket)
self.state = 976
self.match(CParser.Star)
self.state = 977
self.match(CParser.RightBracket)
pass
elif la_ == 5:
localctx = CParser.DirectAbstractDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directAbstractDeclarator)
self.state = 978
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 979
self.match(CParser.LeftParen)
self.state = 981
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__11) | (1 << CParser.Auto) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Double) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.Alignas) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Noreturn) | (1 << CParser.ThreadLocal))) != 0) or _la==CParser.Identifier:
self.state = 980
self.parameterTypeList()
self.state = 983
self.match(CParser.RightParen)
self.state = 987
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,101,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 984
self.gccDeclaratorExtension()
self.state = 989
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,101,self._ctx)
pass
self.state = 994
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,103,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class TypedefNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_typedefName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypedefName" ):
listener.enterTypedefName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypedefName" ):
listener.exitTypedefName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypedefName" ):
return visitor.visitTypedefName(self)
else:
return visitor.visitChildren(self)
def typedefName(self):
localctx = CParser.TypedefNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 126, self.RULE_typedefName)
try:
self.enterOuterAlt(localctx, 1)
self.state = 995
self.match(CParser.Identifier)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InitializerContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def initializerList(self):
return self.getTypedRuleContext(CParser.InitializerListContext,0)
def getRuleIndex(self):
return CParser.RULE_initializer
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInitializer" ):
listener.enterInitializer(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInitializer" ):
listener.exitInitializer(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInitializer" ):
return visitor.visitInitializer(self)
else:
return visitor.visitChildren(self)
def initializer(self):
localctx = CParser.InitializerContext(self, self._ctx, self.state)
self.enterRule(localctx, 128, self.RULE_initializer)
try:
self.state = 1007
la_ = self._interp.adaptivePredict(self._input,104,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 997
self.assignmentExpression()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 998
self.match(CParser.LeftBrace)
self.state = 999
self.initializerList(0)
self.state = 1000
self.match(CParser.RightBrace)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1002
self.match(CParser.LeftBrace)
self.state = 1003
self.initializerList(0)
self.state = 1004
self.match(CParser.Comma)
self.state = 1005
self.match(CParser.RightBrace)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InitializerListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def initializer(self):
return self.getTypedRuleContext(CParser.InitializerContext,0)
def designation(self):
return self.getTypedRuleContext(CParser.DesignationContext,0)
def initializerList(self):
return self.getTypedRuleContext(CParser.InitializerListContext,0)
def getRuleIndex(self):
return CParser.RULE_initializerList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInitializerList" ):
listener.enterInitializerList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInitializerList" ):
listener.exitInitializerList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInitializerList" ):
return visitor.visitInitializerList(self)
else:
return visitor.visitChildren(self)
def initializerList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.InitializerListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 130
self.enterRecursionRule(localctx, 130, self.RULE_initializerList, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1011
_la = self._input.LA(1)
if _la==CParser.LeftBracket or _la==CParser.Dot:
self.state = 1010
self.designation()
self.state = 1013
self.initializer()
self._ctx.stop = self._input.LT(-1)
self.state = 1023
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,107,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.InitializerListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_initializerList)
self.state = 1015
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1016
self.match(CParser.Comma)
self.state = 1018
_la = self._input.LA(1)
if _la==CParser.LeftBracket or _la==CParser.Dot:
self.state = 1017
self.designation()
self.state = 1020
self.initializer()
self.state = 1025
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,107,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class DesignationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def designatorList(self):
return self.getTypedRuleContext(CParser.DesignatorListContext,0)
def getRuleIndex(self):
return CParser.RULE_designation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDesignation" ):
listener.enterDesignation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDesignation" ):
listener.exitDesignation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDesignation" ):
return visitor.visitDesignation(self)
else:
return visitor.visitChildren(self)
def designation(self):
localctx = CParser.DesignationContext(self, self._ctx, self.state)
self.enterRule(localctx, 132, self.RULE_designation)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1026
self.designatorList(0)
self.state = 1027
self.match(CParser.Assign)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DesignatorListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def designator(self):
return self.getTypedRuleContext(CParser.DesignatorContext,0)
def designatorList(self):
return self.getTypedRuleContext(CParser.DesignatorListContext,0)
def getRuleIndex(self):
return CParser.RULE_designatorList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDesignatorList" ):
listener.enterDesignatorList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDesignatorList" ):
listener.exitDesignatorList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDesignatorList" ):
return visitor.visitDesignatorList(self)
else:
return visitor.visitChildren(self)
def designatorList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.DesignatorListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 134
self.enterRecursionRule(localctx, 134, self.RULE_designatorList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1030
self.designator()
self._ctx.stop = self._input.LT(-1)
self.state = 1036
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,108,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.DesignatorListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_designatorList)
self.state = 1032
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1033
self.designator()
self.state = 1038
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,108,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class DesignatorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_designator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDesignator" ):
listener.enterDesignator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDesignator" ):
listener.exitDesignator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDesignator" ):
return visitor.visitDesignator(self)
else:
return visitor.visitChildren(self)
def designator(self):
localctx = CParser.DesignatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 136, self.RULE_designator)
try:
self.state = 1045
token = self._input.LA(1)
if token in [CParser.LeftBracket]:
self.enterOuterAlt(localctx, 1)
self.state = 1039
self.match(CParser.LeftBracket)
self.state = 1040
self.constantExpression()
self.state = 1041
self.match(CParser.RightBracket)
elif token in [CParser.Dot]:
self.enterOuterAlt(localctx, 2)
self.state = 1043
self.match(CParser.Dot)
self.state = 1044
self.match(CParser.Identifier)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StaticAssertDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def StringLiteral(self, i:int=None):
if i is None:
return self.getTokens(CParser.StringLiteral)
else:
return self.getToken(CParser.StringLiteral, i)
def getRuleIndex(self):
return CParser.RULE_staticAssertDeclaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStaticAssertDeclaration" ):
listener.enterStaticAssertDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStaticAssertDeclaration" ):
listener.exitStaticAssertDeclaration(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStaticAssertDeclaration" ):
return visitor.visitStaticAssertDeclaration(self)
else:
return visitor.visitChildren(self)
def staticAssertDeclaration(self):
localctx = CParser.StaticAssertDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 138, self.RULE_staticAssertDeclaration)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1047
self.match(CParser.StaticAssert)
self.state = 1048
self.match(CParser.LeftParen)
self.state = 1049
self.constantExpression()
self.state = 1050
self.match(CParser.Comma)
self.state = 1052
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 1051
self.match(CParser.StringLiteral)
self.state = 1054
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CParser.StringLiteral):
break
self.state = 1056
self.match(CParser.RightParen)
self.state = 1057
self.match(CParser.Semi)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def labeledStatement(self):
return self.getTypedRuleContext(CParser.LabeledStatementContext,0)
def compoundStatement(self):
return self.getTypedRuleContext(CParser.CompoundStatementContext,0)
def expressionStatement(self):
return self.getTypedRuleContext(CParser.ExpressionStatementContext,0)
def selectionStatement(self):
return self.getTypedRuleContext(CParser.SelectionStatementContext,0)
def iterationStatement(self):
return self.getTypedRuleContext(CParser.IterationStatementContext,0)
def jumpStatement(self):
return self.getTypedRuleContext(CParser.JumpStatementContext,0)
def logicalOrExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.LogicalOrExpressionContext)
else:
return self.getTypedRuleContext(CParser.LogicalOrExpressionContext,i)
def getRuleIndex(self):
return CParser.RULE_statement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStatement" ):
listener.enterStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStatement" ):
listener.exitStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStatement" ):
return visitor.visitStatement(self)
else:
return visitor.visitChildren(self)
def statement(self):
localctx = CParser.StatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 140, self.RULE_statement)
self._la = 0 # Token type
try:
self.state = 1096
la_ = self._interp.adaptivePredict(self._input,116,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1059
self.labeledStatement()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1060
self.compoundStatement()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1061
self.expressionStatement()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 1062
self.selectionStatement()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 1063
self.iterationStatement()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 1064
self.jumpStatement()
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 1065
_la = self._input.LA(1)
if not(_la==CParser.T__10 or _la==CParser.T__12):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 1066
_la = self._input.LA(1)
if not(_la==CParser.T__13 or _la==CParser.Volatile):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 1067
self.match(CParser.LeftParen)
self.state = 1076
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1068
self.logicalOrExpression(0)
self.state = 1073
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CParser.Comma:
self.state = 1069
self.match(CParser.Comma)
self.state = 1070
self.logicalOrExpression(0)
self.state = 1075
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1091
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CParser.Colon:
self.state = 1078
self.match(CParser.Colon)
self.state = 1087
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1079
self.logicalOrExpression(0)
self.state = 1084
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CParser.Comma:
self.state = 1080
self.match(CParser.Comma)
self.state = 1081
self.logicalOrExpression(0)
self.state = 1086
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1093
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1094
self.match(CParser.RightParen)
self.state = 1095
self.match(CParser.Semi)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def statement(self):
return self.getTypedRuleContext(CParser.StatementContext,0)
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_labeledStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledStatement" ):
listener.enterLabeledStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledStatement" ):
listener.exitLabeledStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledStatement" ):
return visitor.visitLabeledStatement(self)
else:
return visitor.visitChildren(self)
def labeledStatement(self):
localctx = CParser.LabeledStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 142, self.RULE_labeledStatement)
try:
self.state = 1109
token = self._input.LA(1)
if token in [CParser.Identifier]:
self.enterOuterAlt(localctx, 1)
self.state = 1098
self.match(CParser.Identifier)
self.state = 1099
self.match(CParser.Colon)
self.state = 1100
self.statement()
elif token in [CParser.Case]:
self.enterOuterAlt(localctx, 2)
self.state = 1101
self.match(CParser.Case)
self.state = 1102
self.constantExpression()
self.state = 1103
self.match(CParser.Colon)
self.state = 1104
self.statement()
elif token in [CParser.Default]:
self.enterOuterAlt(localctx, 3)
self.state = 1106
self.match(CParser.Default)
self.state = 1107
self.match(CParser.Colon)
self.state = 1108
self.statement()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CompoundStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def blockItemList(self):
return self.getTypedRuleContext(CParser.BlockItemListContext,0)
def getRuleIndex(self):
return CParser.RULE_compoundStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCompoundStatement" ):
listener.enterCompoundStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCompoundStatement" ):
listener.exitCompoundStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCompoundStatement" ):
return visitor.visitCompoundStatement(self)
else:
return visitor.visitChildren(self)
def compoundStatement(self):
localctx = CParser.CompoundStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 144, self.RULE_compoundStatement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1111
self.match(CParser.LeftBrace)
self.state = 1113
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__10) | (1 << CParser.T__11) | (1 << CParser.T__12) | (1 << CParser.Auto) | (1 << CParser.Break) | (1 << CParser.Case) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Continue) | (1 << CParser.Default) | (1 << CParser.Do) | (1 << CParser.Double) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.For) | (1 << CParser.Goto) | (1 << CParser.If) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Return) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Sizeof) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Switch) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.While) | (1 << CParser.Alignas) | (1 << CParser.Alignof) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Generic) | (1 << CParser.Noreturn) | (1 << CParser.StaticAssert) | (1 << CParser.ThreadLocal) | (1 << CParser.LeftParen) | (1 << CParser.LeftBrace))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Semi - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1112
self.blockItemList(0)
self.state = 1115
self.match(CParser.RightBrace)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockItemListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def blockItem(self):
return self.getTypedRuleContext(CParser.BlockItemContext,0)
def blockItemList(self):
return self.getTypedRuleContext(CParser.BlockItemListContext,0)
def getRuleIndex(self):
return CParser.RULE_blockItemList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlockItemList" ):
listener.enterBlockItemList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlockItemList" ):
listener.exitBlockItemList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlockItemList" ):
return visitor.visitBlockItemList(self)
else:
return visitor.visitChildren(self)
def blockItemList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.BlockItemListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 146
self.enterRecursionRule(localctx, 146, self.RULE_blockItemList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1118
self.blockItem()
self._ctx.stop = self._input.LT(-1)
self.state = 1124
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,119,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.BlockItemListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_blockItemList)
self.state = 1120
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1121
self.blockItem()
self.state = 1126
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,119,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class BlockItemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declaration(self):
return self.getTypedRuleContext(CParser.DeclarationContext,0)
def statement(self):
return self.getTypedRuleContext(CParser.StatementContext,0)
def getRuleIndex(self):
return CParser.RULE_blockItem
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlockItem" ):
listener.enterBlockItem(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlockItem" ):
listener.exitBlockItem(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlockItem" ):
return visitor.visitBlockItem(self)
else:
return visitor.visitChildren(self)
def blockItem(self):
localctx = CParser.BlockItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 148, self.RULE_blockItem)
try:
self.state = 1129
la_ = self._interp.adaptivePredict(self._input,120,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1127
self.declaration()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1128
self.statement()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_expressionStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpressionStatement" ):
listener.enterExpressionStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpressionStatement" ):
listener.exitExpressionStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpressionStatement" ):
return visitor.visitExpressionStatement(self)
else:
return visitor.visitChildren(self)
def expressionStatement(self):
localctx = CParser.ExpressionStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 150, self.RULE_expressionStatement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1132
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1131
self.expression(0)
self.state = 1134
self.match(CParser.Semi)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SelectionStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.StatementContext)
else:
return self.getTypedRuleContext(CParser.StatementContext,i)
def getRuleIndex(self):
return CParser.RULE_selectionStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSelectionStatement" ):
listener.enterSelectionStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSelectionStatement" ):
listener.exitSelectionStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSelectionStatement" ):
return visitor.visitSelectionStatement(self)
else:
return visitor.visitChildren(self)
def selectionStatement(self):
localctx = CParser.SelectionStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 152, self.RULE_selectionStatement)
try:
self.state = 1151
token = self._input.LA(1)
if token in [CParser.If]:
self.enterOuterAlt(localctx, 1)
self.state = 1136
self.match(CParser.If)
self.state = 1137
self.match(CParser.LeftParen)
self.state = 1138
self.expression(0)
self.state = 1139
self.match(CParser.RightParen)
self.state = 1140
self.statement()
self.state = 1143
la_ = self._interp.adaptivePredict(self._input,122,self._ctx)
if la_ == 1:
self.state = 1141
self.match(CParser.Else)
self.state = 1142
self.statement()
elif token in [CParser.Switch]:
self.enterOuterAlt(localctx, 2)
self.state = 1145
self.match(CParser.Switch)
self.state = 1146
self.match(CParser.LeftParen)
self.state = 1147
self.expression(0)
self.state = 1148
self.match(CParser.RightParen)
self.state = 1149
self.statement()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IterationStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.ExpressionContext)
else:
return self.getTypedRuleContext(CParser.ExpressionContext,i)
def statement(self):
return self.getTypedRuleContext(CParser.StatementContext,0)
def declaration(self):
return self.getTypedRuleContext(CParser.DeclarationContext,0)
def getRuleIndex(self):
return CParser.RULE_iterationStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIterationStatement" ):
listener.enterIterationStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIterationStatement" ):
listener.exitIterationStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIterationStatement" ):
return visitor.visitIterationStatement(self)
else:
return visitor.visitChildren(self)
def iterationStatement(self):
localctx = CParser.IterationStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 154, self.RULE_iterationStatement)
self._la = 0 # Token type
try:
self.state = 1195
la_ = self._interp.adaptivePredict(self._input,129,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1153
self.match(CParser.While)
self.state = 1154
self.match(CParser.LeftParen)
self.state = 1155
self.expression(0)
self.state = 1156
self.match(CParser.RightParen)
self.state = 1157
self.statement()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1159
self.match(CParser.Do)
self.state = 1160
self.statement()
self.state = 1161
self.match(CParser.While)
self.state = 1162
self.match(CParser.LeftParen)
self.state = 1163
self.expression(0)
self.state = 1164
self.match(CParser.RightParen)
self.state = 1165
self.match(CParser.Semi)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1167
self.match(CParser.For)
self.state = 1168
self.match(CParser.LeftParen)
self.state = 1170
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1169
self.expression(0)
self.state = 1172
self.match(CParser.Semi)
self.state = 1174
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1173
self.expression(0)
self.state = 1176
self.match(CParser.Semi)
self.state = 1178
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1177
self.expression(0)
self.state = 1180
self.match(CParser.RightParen)
self.state = 1181
self.statement()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 1182
self.match(CParser.For)
self.state = 1183
self.match(CParser.LeftParen)
self.state = 1184
self.declaration()
self.state = 1186
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1185
self.expression(0)
self.state = 1188
self.match(CParser.Semi)
self.state = 1190
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1189
self.expression(0)
self.state = 1192
self.match(CParser.RightParen)
self.state = 1193
self.statement()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class JumpStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def unaryExpression(self):
return self.getTypedRuleContext(CParser.UnaryExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_jumpStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterJumpStatement" ):
listener.enterJumpStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitJumpStatement" ):
listener.exitJumpStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitJumpStatement" ):
return visitor.visitJumpStatement(self)
else:
return visitor.visitChildren(self)
def jumpStatement(self):
localctx = CParser.JumpStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 156, self.RULE_jumpStatement)
self._la = 0 # Token type
try:
self.state = 1213
la_ = self._interp.adaptivePredict(self._input,131,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1197
self.match(CParser.Goto)
self.state = 1198
self.match(CParser.Identifier)
self.state = 1199
self.match(CParser.Semi)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1200
self.match(CParser.Continue)
self.state = 1201
self.match(CParser.Semi)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1202
self.match(CParser.Break)
self.state = 1203
self.match(CParser.Semi)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 1204
self.match(CParser.Return)
self.state = 1206
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1205
self.expression(0)
self.state = 1208
self.match(CParser.Semi)
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 1209
self.match(CParser.Goto)
self.state = 1210
self.unaryExpression()
self.state = 1211
self.match(CParser.Semi)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CompilationUnitContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EOF(self):
return self.getToken(CParser.EOF, 0)
def translationUnit(self):
return self.getTypedRuleContext(CParser.TranslationUnitContext,0)
def getRuleIndex(self):
return CParser.RULE_compilationUnit
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCompilationUnit" ):
listener.enterCompilationUnit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCompilationUnit" ):
listener.exitCompilationUnit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCompilationUnit" ):
return visitor.visitCompilationUnit(self)
else:
return visitor.visitChildren(self)
def compilationUnit(self):
localctx = CParser.CompilationUnitContext(self, self._ctx, self.state)
self.enterRule(localctx, 158, self.RULE_compilationUnit)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1216
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__11) | (1 << CParser.Auto) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Double) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.Alignas) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Noreturn) | (1 << CParser.StaticAssert) | (1 << CParser.ThreadLocal) | (1 << CParser.LeftParen))) != 0) or ((((_la - 75)) & ~0x3f) == 0 and ((1 << (_la - 75)) & ((1 << (CParser.Star - 75)) | (1 << (CParser.Caret - 75)) | (1 << (CParser.Semi - 75)) | (1 << (CParser.Identifier - 75)))) != 0):
self.state = 1215
self.translationUnit(0)
self.state = 1218
self.match(CParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TranslationUnitContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def externalDeclaration(self):
return self.getTypedRuleContext(CParser.ExternalDeclarationContext,0)
def translationUnit(self):
return self.getTypedRuleContext(CParser.TranslationUnitContext,0)
def getRuleIndex(self):
return CParser.RULE_translationUnit
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTranslationUnit" ):
listener.enterTranslationUnit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTranslationUnit" ):
listener.exitTranslationUnit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTranslationUnit" ):
return visitor.visitTranslationUnit(self)
else:
return visitor.visitChildren(self)
def translationUnit(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.TranslationUnitContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 160
self.enterRecursionRule(localctx, 160, self.RULE_translationUnit, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1221
self.externalDeclaration()
self._ctx.stop = self._input.LT(-1)
self.state = 1227
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,133,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.TranslationUnitContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_translationUnit)
self.state = 1223
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1224
self.externalDeclaration()
self.state = 1229
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,133,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ExternalDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def functionDefinition(self):
return self.getTypedRuleContext(CParser.FunctionDefinitionContext,0)
def declaration(self):
return self.getTypedRuleContext(CParser.DeclarationContext,0)
def getRuleIndex(self):
return CParser.RULE_externalDeclaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExternalDeclaration" ):
listener.enterExternalDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExternalDeclaration" ):
listener.exitExternalDeclaration(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExternalDeclaration" ):
return visitor.visitExternalDeclaration(self)
else:
return visitor.visitChildren(self)
def externalDeclaration(self):
localctx = CParser.ExternalDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 162, self.RULE_externalDeclaration)
try:
self.state = 1233
la_ = self._interp.adaptivePredict(self._input,134,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1230
self.functionDefinition()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1231
self.declaration()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1232
self.match(CParser.Semi)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionDefinitionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarator(self):
return self.getTypedRuleContext(CParser.DeclaratorContext,0)
def compoundStatement(self):
return self.getTypedRuleContext(CParser.CompoundStatementContext,0)
def declarationSpecifiers(self):
return self.getTypedRuleContext(CParser.DeclarationSpecifiersContext,0)
def declarationList(self):
return self.getTypedRuleContext(CParser.DeclarationListContext,0)
def getRuleIndex(self):
return CParser.RULE_functionDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunctionDefinition" ):
listener.enterFunctionDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunctionDefinition" ):
listener.exitFunctionDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunctionDefinition" ):
return visitor.visitFunctionDefinition(self)
else:
return visitor.visitChildren(self)
def functionDefinition(self):
localctx = CParser.FunctionDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 164, self.RULE_functionDefinition)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1236
la_ = self._interp.adaptivePredict(self._input,135,self._ctx)
if la_ == 1:
self.state = 1235
self.declarationSpecifiers()
self.state = 1238
self.declarator()
self.state = 1240
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__11) | (1 << CParser.Auto) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Double) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.Alignas) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Noreturn) | (1 << CParser.StaticAssert) | (1 << CParser.ThreadLocal))) != 0) or _la==CParser.Identifier:
self.state = 1239
self.declarationList(0)
self.state = 1242
self.compoundStatement()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declaration(self):
return self.getTypedRuleContext(CParser.DeclarationContext,0)
def declarationList(self):
return self.getTypedRuleContext(CParser.DeclarationListContext,0)
def getRuleIndex(self):
return CParser.RULE_declarationList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarationList" ):
listener.enterDeclarationList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarationList" ):
listener.exitDeclarationList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarationList" ):
return visitor.visitDeclarationList(self)
else:
return visitor.visitChildren(self)
def declarationList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.DeclarationListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 166
self.enterRecursionRule(localctx, 166, self.RULE_declarationList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1245
self.declaration()
self._ctx.stop = self._input.LT(-1)
self.state = 1251
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,137,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.DeclarationListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_declarationList)
self.state = 1247
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1248
self.declaration()
self.state = 1253
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,137,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[2] = self.genericAssocList_sempred
self._predicates[4] = self.postfixExpression_sempred
self._predicates[5] = self.argumentExpressionList_sempred
self._predicates[9] = self.multiplicativeExpression_sempred
self._predicates[10] = self.additiveExpression_sempred
self._predicates[11] = self.shiftExpression_sempred
self._predicates[12] = self.relationalExpression_sempred
self._predicates[13] = self.equalityExpression_sempred
self._predicates[14] = self.andExpression_sempred
self._predicates[15] = self.exclusiveOrExpression_sempred
self._predicates[16] = self.inclusiveOrExpression_sempred
self._predicates[17] = self.logicalAndExpression_sempred
self._predicates[18] = self.logicalOrExpression_sempred
self._predicates[22] = self.expression_sempred
self._predicates[28] = self.initDeclaratorList_sempred
self._predicates[34] = self.structDeclarationList_sempred
self._predicates[37] = self.structDeclaratorList_sempred
self._predicates[40] = self.enumeratorList_sempred
self._predicates[48] = self.directDeclarator_sempred
self._predicates[55] = self.typeQualifierList_sempred
self._predicates[57] = self.parameterList_sempred
self._predicates[59] = self.identifierList_sempred
self._predicates[62] = self.directAbstractDeclarator_sempred
self._predicates[65] = self.initializerList_sempred
self._predicates[67] = self.designatorList_sempred
self._predicates[73] = self.blockItemList_sempred
self._predicates[80] = self.translationUnit_sempred
self._predicates[83] = self.declarationList_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def genericAssocList_sempred(self, localctx:GenericAssocListContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 1)
def postfixExpression_sempred(self, localctx:PostfixExpressionContext, predIndex:int):
if predIndex == 1:
return self.precpred(self._ctx, 10)
if predIndex == 2:
return self.precpred(self._ctx, 9)
if predIndex == 3:
return self.precpred(self._ctx, 8)
if predIndex == 4:
return self.precpred(self._ctx, 7)
if predIndex == 5:
return self.precpred(self._ctx, 6)
if predIndex == 6:
return self.precpred(self._ctx, 5)
def argumentExpressionList_sempred(self, localctx:ArgumentExpressionListContext, predIndex:int):
if predIndex == 7:
return self.precpred(self._ctx, 1)
def multiplicativeExpression_sempred(self, localctx:MultiplicativeExpressionContext, predIndex:int):
if predIndex == 8:
return self.precpred(self._ctx, 3)
if predIndex == 9:
return self.precpred(self._ctx, 2)
if predIndex == 10:
return self.precpred(self._ctx, 1)
def additiveExpression_sempred(self, localctx:AdditiveExpressionContext, predIndex:int):
if predIndex == 11:
return self.precpred(self._ctx, 2)
if predIndex == 12:
return self.precpred(self._ctx, 1)
def shiftExpression_sempred(self, localctx:ShiftExpressionContext, predIndex:int):
if predIndex == 13:
return self.precpred(self._ctx, 2)
if predIndex == 14:
return self.precpred(self._ctx, 1)
def relationalExpression_sempred(self, localctx:RelationalExpressionContext, predIndex:int):
if predIndex == 15:
return self.precpred(self._ctx, 4)
if predIndex == 16:
return self.precpred(self._ctx, 3)
if predIndex == 17:
return self.precpred(self._ctx, 2)
if predIndex == 18:
return self.precpred(self._ctx, 1)
def equalityExpression_sempred(self, localctx:EqualityExpressionContext, predIndex:int):
if predIndex == 19:
return self.precpred(self._ctx, 2)
if predIndex == 20:
return self.precpred(self._ctx, 1)
def andExpression_sempred(self, localctx:AndExpressionContext, predIndex:int):
if predIndex == 21:
return self.precpred(self._ctx, 1)
def exclusiveOrExpression_sempred(self, localctx:ExclusiveOrExpressionContext, predIndex:int):
if predIndex == 22:
return self.precpred(self._ctx, 1)
def inclusiveOrExpression_sempred(self, localctx:InclusiveOrExpressionContext, predIndex:int):
if predIndex == 23:
return self.precpred(self._ctx, 1)
def logicalAndExpression_sempred(self, localctx:LogicalAndExpressionContext, predIndex:int):
if predIndex == 24:
return self.precpred(self._ctx, 1)
def logicalOrExpression_sempred(self, localctx:LogicalOrExpressionContext, predIndex:int):
if predIndex == 25:
return self.precpred(self._ctx, 1)
def expression_sempred(self, localctx:ExpressionContext, predIndex:int):
if predIndex == 26:
return self.precpred(self._ctx, 1)
def initDeclaratorList_sempred(self, localctx:InitDeclaratorListContext, predIndex:int):
if predIndex == 27:
return self.precpred(self._ctx, 1)
def structDeclarationList_sempred(self, localctx:StructDeclarationListContext, predIndex:int):
if predIndex == 28:
return self.precpred(self._ctx, 1)
def structDeclaratorList_sempred(self, localctx:StructDeclaratorListContext, predIndex:int):
if predIndex == 29:
return self.precpred(self._ctx, 1)
def enumeratorList_sempred(self, localctx:EnumeratorListContext, predIndex:int):
if predIndex == 30:
return self.precpred(self._ctx, 1)
def directDeclarator_sempred(self, localctx:DirectDeclaratorContext, predIndex:int):
if predIndex == 31:
return self.precpred(self._ctx, 6)
if predIndex == 32:
return self.precpred(self._ctx, 5)
if predIndex == 33:
return self.precpred(self._ctx, 4)
if predIndex == 34:
return self.precpred(self._ctx, 3)
if predIndex == 35:
return self.precpred(self._ctx, 2)
if predIndex == 36:
return self.precpred(self._ctx, 1)
def typeQualifierList_sempred(self, localctx:TypeQualifierListContext, predIndex:int):
if predIndex == 37:
return self.precpred(self._ctx, 1)
def parameterList_sempred(self, localctx:ParameterListContext, predIndex:int):
if predIndex == 38:
return self.precpred(self._ctx, 1)
def identifierList_sempred(self, localctx:IdentifierListContext, predIndex:int):
if predIndex == 39:
return self.precpred(self._ctx, 1)
def directAbstractDeclarator_sempred(self, localctx:DirectAbstractDeclaratorContext, predIndex:int):
if predIndex == 40:
return self.precpred(self._ctx, 5)
if predIndex == 41:
return self.precpred(self._ctx, 4)
if predIndex == 42:
return self.precpred(self._ctx, 3)
if predIndex == 43:
return self.precpred(self._ctx, 2)
if predIndex == 44:
return self.precpred(self._ctx, 1)
def initializerList_sempred(self, localctx:InitializerListContext, predIndex:int):
if predIndex == 45:
return self.precpred(self._ctx, 1)
def designatorList_sempred(self, localctx:DesignatorListContext, predIndex:int):
if predIndex == 46:
return self.precpred(self._ctx, 1)
def blockItemList_sempred(self, localctx:BlockItemListContext, predIndex:int):
if predIndex == 47:
return self.precpred(self._ctx, 1)
def translationUnit_sempred(self, localctx:TranslationUnitContext, predIndex:int):
if predIndex == 48:
return self.precpred(self._ctx, 1)
def declarationList_sempred(self, localctx:DeclarationListContext, predIndex:int):
if predIndex == 49:
return self.precpred(self._ctx, 1)
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/run.py 0000644 0000766 0000000 00000000665 00000000000 020430 0 ustar 00parrt wheel 0000000 0000000 import sys
import os
src_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src')
sys.path.insert(0,src_path)
from TestTokenStreamRewriter import TestTokenStreamRewriter
from xpathtest import XPathTest
from TestFileStream import TestFileStream
from TestInputStream import TestInputStream
from TestIntervalSet import TestIntervalSet
from TestRecognizer import TestRecognizer
import unittest
unittest.main()
././@PaxHeader 0000000 0000000 0000000 00000000026 00000000000 011453 x ustar 00 0000000 0000000 22 mtime=1609697479.0
antlr4-python3-runtime-4.9.1/test/xpathtest.py 0000644 0000766 0000000 00000006310 00000000000 021641 0 ustar 00parrt wheel 0000000 0000000 import antlr4
from antlr4 import InputStream, CommonTokenStream, TerminalNode
from antlr4.xpath.XPath import XPath
import unittest
from expr.ExprParser import ExprParser
from expr.ExprLexer import ExprLexer
def tokenToString(token, ruleNames):
if isinstance(token, TerminalNode):
return str(token)
else:
return ruleNames[token.getRuleIndex()]
class XPathTest(unittest.TestCase):
def setUp(self):
self.input_stream = InputStream(
"def f(x,y) { x = 3+4; y; ; }\n"
"def g(x) { return 1+2*x; }\n"
)
# Create the Token Stream
self.lexer = ExprLexer(self.input_stream)
self.stream = CommonTokenStream(self.lexer)
self.stream.fill()
# Create the parser and expression parse tree
self.parser = ExprParser(self.stream)
self.tree = self.parser.prog()
def testValidPaths(self):
valid_paths = [
"/prog/func", # all funcs under prog at root
"/prog/*", # all children of prog at root
"/*/func", # all func kids of any root node
"prog", # prog must be root node
"/prog", # prog must be root node
"/*", # any root
"*", # any root
"//ID", # any ID in tree
"//expr/primary/ID", # any ID child of a primary under any expr
"//body//ID", # any ID under a body
"//'return'", # any 'return' literal in tree, matched by literal name
"//RETURN", # any 'return' literal in tree, matched by symbolic name
"//primary/*", # all kids of any primary
"//func/*/stat", # all stat nodes grandkids of any func node
"/prog/func/'def'", # all def literal kids of func kid of prog
"//stat/';'", # all ';' under any stat node
"//expr/primary/!ID",# anything but ID under primary under any expr node
"//expr/!primary", # anything but primary under any expr node
"//!*", # nothing anywhere
"/!*", # nothing at root
"//expr//ID" # any ID under any expression (tests antlr/antlr4#370)
]
expected_results = [
"[func, func]",
"[func, func]",
"[func, func]",
"[prog]",
"[prog]",
"[prog]",
"[prog]",
"[f, x, y, x, y, g, x, x]",
"[y, x]",
"[x, y, x]",
"[return]",
"[return]",
"[3, 4, y, 1, 2, x]",
"[stat, stat, stat, stat]",
"[def, def]",
"[;, ;, ;, ;]",
"[3, 4, 1, 2]",
"[expr, expr, expr, expr, expr, expr]",
"[]",
"[]",
"[y, x]",
]
for path, expected in zip(valid_paths, expected_results):
# Build test string
res = XPath.findAll(self.tree, path, self.parser)
res_str = ", ".join([tokenToString(token, self.parser.ruleNames) for token in res])
res_str = "[%s]" % res_str
# Test against expected output
self.assertEqual(res_str, expected, "Failed test %s" % path)
if __name__ == '__main__':
unittest.main()