pax_global_header00006660000000000000000000000064116146236550014523gustar00rootroot0000000000000052 comment=b3894d7fc2f1367fe7f2659b0fd75b7d154d5a01 nant-0.9.5~git20110729.r1.202a430/000077500000000000000000000000001161462365500154415ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/.gitignore000066400000000000000000000001211161462365500174230ustar00rootroot00000000000000/build.properties /build /ide-build test-results *.pidb *.suo *.user _ReSharper* nant-0.9.5~git20110729.r1.202a430/.project000077500000000000000000000007641161462365500171220ustar00rootroot00000000000000 boo booclipse.core.booBuilder monolipse.core.booBuilder booclipse.core.booNature monolipse.core.booNature nant-0.9.5~git20110729.r1.202a430/.settings/000077500000000000000000000000001161462365500173575ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/.settings/org.eclipse.core.resources.prefs000066400000000000000000000010111161462365500255630ustar00rootroot00000000000000#Wed Apr 07 09:59:42 BRT 2010 eclipse.preferences.version=1 encoding//src/Boo.Lang.Compiler/.monolipse=utf-8 encoding//src/Boo.Lang.Extensions/.monolipse=utf-8 encoding//src/Boo.Lang.Interpreter/.monolipse=utf-8 encoding//src/Boo.Lang.Parser/.monolipse=utf-8 encoding//src/Boo.Lang.PatternMatching/.monolipse=utf-8 encoding//src/Boo.Lang.Useful/.monolipse=utf-8 encoding//src/Boo.Lang/.monolipse=utf-8 encoding//tests/Boo.Lang.PatternMatching.Tests/.monolipse=utf-8 encoding//tests/Boo.Lang.Useful.Tests/.monolipse=utf-8 nant-0.9.5~git20110729.r1.202a430/AUTHORS000066400000000000000000000006201161462365500165070ustar00rootroot00000000000000Rodrigo B. De Oliveira Georges Benatti Jr Daniel Grunwald Doug Holton Ian MacLean Peter Johanson Arron Washington Avishay Lavie Cedric Vivier Marcus Griep Jb Evain nant-0.9.5~git20110729.r1.202a430/Makefile.am000066400000000000000000000000251161462365500174720ustar00rootroot00000000000000SUBDIRS = bin extras nant-0.9.5~git20110729.r1.202a430/ast.model.boo000077500000000000000000000263251161462365500200430ustar00rootroot00000000000000""" The classes in this module model the entire AST for the language. The actual AST classes and supporting modules are generated by a boo script. """ namespace Boo.Ast class CompileUnit(Node): Modules as ModuleCollection [Flags] enum TypeMemberModifiers: None = 0 Private = 1 Internal = 2 Protected = 4 Public = 8 Transient = 16 Static = 32 Final = 64 Virtual = 128 Override = 256 Abstract = 512 Partial = 1024 New = 2048 VisibilityMask = 15 enum MethodImplementationFlags: None = 0 Runtime = 1 enum ParameterModifiers: None = 0 Val = 0 Ref = 1 [Flags] enum ExceptionHandlerFlags: None = 0 Anonymous = 1 Untyped = 2 Filter = 4 [Flags] enum GenericParameterConstraints: None = 0 ValueType = 1 ReferenceType = 2 Constructable = 4 Covariant = 8 Contravariant = 16 abstract class TypeMember(Node, INodeWithAttributes): Modifiers as TypeMemberModifiers Name as string Attributes as AttributeCollection class TypeMemberStatement(Statement): TypeMember as TypeMember class ExplicitMemberInfo(Node): InterfaceType as SimpleTypeReference [collection(TypeMember)] class TypeMemberCollection: pass abstract class TypeReference(Node): IsPointer as bool class SimpleTypeReference(TypeReference): Name as string class ArrayTypeReference(TypeReference): ElementType as TypeReference Rank as IntegerLiteralExpression class CallableTypeReference(TypeReference, INodeWithParameters): Parameters as ParameterDeclarationCollection ReturnType as TypeReference class GenericTypeReference(SimpleTypeReference): GenericArguments as TypeReferenceCollection class GenericTypeDefinitionReference(SimpleTypeReference): GenericPlaceholders as int [collection(TypeReference)] class TypeReferenceCollection: pass class CallableDefinition(TypeMember, INodeWithParameters, INodeWithGenericParameters): Parameters as ParameterDeclarationCollection GenericParameters as GenericParameterDeclarationCollection ReturnType as TypeReference ReturnTypeAttributes as AttributeCollection abstract class TypeDefinition(TypeMember, INodeWithGenericParameters): Members as TypeMemberCollection BaseTypes as TypeReferenceCollection GenericParameters as GenericParameterDeclarationCollection [collection(TypeDefinition)] class TypeDefinitionCollection: pass class NamespaceDeclaration(Node): Name as string class Import(Node): Namespace as string AssemblyReference as ReferenceExpression Alias as ReferenceExpression [collection(Import)] class ImportCollection: pass class Module(TypeDefinition): Namespace as NamespaceDeclaration Imports as ImportCollection [auto] Globals as Block AssemblyAttributes as AttributeCollection [collection(Module)] class ModuleCollection: pass class ClassDefinition(TypeDefinition): pass class StructDefinition(TypeDefinition): pass class InterfaceDefinition(TypeDefinition): pass class EnumDefinition(TypeDefinition): pass class EnumMember(TypeMember): Initializer as Expression class Field(TypeMember): Type as TypeReference Initializer as Expression IsVolatile as bool class Property(TypeMember, INodeWithParameters, IExplicitMember): Parameters as ParameterDeclarationCollection Getter as Method Setter as Method Type as TypeReference ExplicitInfo as ExplicitMemberInfo class Event(TypeMember): Add as Method Remove as Method Raise as Method Type as TypeReference class Local(Node): Name as string [collection(Local)] class LocalCollection: pass class BlockExpression(Expression, INodeWithParameters, INodeWithBody): Parameters as ParameterDeclarationCollection ReturnType as TypeReference [auto] Body as Block class Method(CallableDefinition, IExplicitMember, INodeWithBody): [auto] Body as Block Locals as LocalCollection ImplementationFlags as MethodImplementationFlags ExplicitInfo as ExplicitMemberInfo class Constructor(Method): pass class Destructor(Method): pass class ParameterDeclaration(Node, INodeWithAttributes): Name as string Type as TypeReference Modifiers as ParameterModifiers Attributes as AttributeCollection [collection(ParameterDeclaration)] class ParameterDeclarationCollection: pass class GenericParameterDeclaration(Node): Name as string BaseTypes as TypeReferenceCollection Constraints as GenericParameterConstraints [collection(GenericParameterDeclaration)] class GenericParameterDeclarationCollection: pass class Declaration(Node): Name as string Type as TypeReference [collection(Declaration)] class DeclarationCollection: pass class Attribute(Node, INodeWithArguments): Name as string Arguments as ExpressionCollection NamedArguments as ExpressionPairCollection [collection(Attribute)] class AttributeCollection: pass enum StatementModifierType: None If Unless While class StatementModifier(Node): Type as StatementModifierType Condition as Expression abstract class Statement(Node): Modifier as StatementModifier class GotoStatement(Statement): Label as ReferenceExpression class LabelStatement(Statement): Name as string class Block(Statement): Statements as StatementCollection [collection(Statement)] class StatementCollection: pass class DeclarationStatement(Statement): Declaration as Declaration Initializer as Expression class MacroStatement(Statement, INodeWithBody): Name as string Arguments as ExpressionCollection [auto] Body as Block class TryStatement(Statement): [auto] ProtectedBlock as Block ExceptionHandlers as ExceptionHandlerCollection FailureBlock as Block EnsureBlock as Block class ExceptionHandler(Node): Declaration as Declaration FilterCondition as Expression Flags as ExceptionHandlerFlags [auto] Block as Block [collection(ExceptionHandler)] class ExceptionHandlerCollection: pass abstract class ConditionalStatement(Statement): Condition as Expression class IfStatement(ConditionalStatement): TrueBlock as Block FalseBlock as Block class UnlessStatement(ConditionalStatement): [auto] Block as Block class ForStatement(Statement): Declarations as DeclarationCollection Iterator as Expression [auto] Block as Block OrBlock as Block ThenBlock as Block class WhileStatement(ConditionalStatement): [auto] Block as Block OrBlock as Block ThenBlock as Block class BreakStatement(Statement): pass class ContinueStatement(Statement): pass class ReturnStatement(Statement): Expression as Expression class YieldStatement(Statement): Expression as Expression class RaiseStatement(Statement): Exception as Expression class UnpackStatement(Statement): Declarations as DeclarationCollection Expression as Expression class ExpressionStatement(Statement): [LexicalInfo] Expression as Expression abstract class Expression(Node): pass [collection(Expression)] class ExpressionCollection: pass [ignore] class OmittedExpression(Expression): pass class ExpressionPair(Node): First as Expression Second as Expression [collection(ExpressionPair)] class ExpressionPairCollection: pass class MethodInvocationExpression(Expression, INodeWithArguments): Target as Expression Arguments as ExpressionCollection NamedArguments as ExpressionPairCollection enum BinaryOperatorType: None Addition Subtraction Multiply Division Modulus Exponentiation LessThan LessThanOrEqual GreaterThan GreaterThanOrEqual Equality Inequality Match NotMatch Assign InPlaceAddition InPlaceSubtraction InPlaceMultiply InPlaceDivision InPlaceModulus InPlaceBitwiseAnd InPlaceBitwiseOr ReferenceEquality ReferenceInequality TypeTest Member NotMember Or And BitwiseOr BitwiseAnd ExclusiveOr InPlaceExclusiveOr ShiftLeft InPlaceShiftLeft ShiftRight InPlaceShiftRight #values are ready to be used as mask if/when BinaryOperatorType #is changed as in patch attached to BOO-1123 (breaking change) enum BinaryOperatorKind: Arithmetic = 0xF Comparison = 0xFF0 TypeComparison = 0xF00 Assignment = 0xFF000 InPlaceAssignment = 0xF0000 Logical = 0x0F00000 Bitwise = 0xF000000 enum UnaryOperatorType: None UnaryNegation Increment Decrement PostIncrement PostDecrement LogicalNot Explode OnesComplement AddressOf Indirection class UnaryExpression(Expression): Operator as UnaryOperatorType Operand as Expression class BinaryExpression(Expression): Operator as BinaryOperatorType Left as Expression Right as Expression class ConditionalExpression(Expression): Condition as Expression TrueValue as Expression FalseValue as Expression class ReferenceExpression(Expression): Name as string class MemberReferenceExpression(ReferenceExpression): Target as Expression class GenericReferenceExpression(Expression): Target as Expression GenericArguments as TypeReferenceCollection abstract class LiteralExpression(Expression): pass class QuasiquoteExpression(LiteralExpression): Node as Node class StringLiteralExpression(LiteralExpression): Value as string class CharLiteralExpression(StringLiteralExpression): pass class TimeSpanLiteralExpression(LiteralExpression): Value as System.TimeSpan class IntegerLiteralExpression(LiteralExpression): Value as long IsLong as bool class DoubleLiteralExpression(LiteralExpression): Value as double IsSingle as bool class NullLiteralExpression(LiteralExpression): pass class SelfLiteralExpression(LiteralExpression): pass class SuperLiteralExpression(LiteralExpression): pass class BoolLiteralExpression(LiteralExpression): Value as bool class RELiteralExpression(LiteralExpression): Value as string class SpliceExpression(Expression): Expression as Expression class SpliceTypeReference(TypeReference): Expression as Expression class SpliceMemberReferenceExpression(Expression): Target as Expression NameExpression as Expression class SpliceTypeMember(TypeMember): TypeMember as TypeMember NameExpression as Expression class SpliceTypeDefinitionBody(TypeMember): Expression as Expression class SpliceParameterDeclaration(ParameterDeclaration): ParameterDeclaration as ParameterDeclaration NameExpression as Expression class ExpressionInterpolationExpression(Expression): Expressions as ExpressionCollection class HashLiteralExpression(LiteralExpression): Items as ExpressionPairCollection class ListLiteralExpression(LiteralExpression): Items as ExpressionCollection class CollectionInitializationExpression(Expression): Collection as Expression Initializer as Expression class ArrayLiteralExpression(ListLiteralExpression): Type as ArrayTypeReference class GeneratorExpression(Expression): Expression as Expression Declarations as DeclarationCollection Iterator as Expression Filter as StatementModifier class ExtendedGeneratorExpression(Expression): Items as GeneratorExpressionCollection [collection(GeneratorExpression)] class GeneratorExpressionCollection: pass class Slice(Node): Begin as Expression End as Expression Step as Expression [collection(Slice)] class SliceCollection: pass class SlicingExpression(Expression): Target as Expression Indices as SliceCollection class TryCastExpression(Expression): Target as Expression Type as TypeReference class CastExpression(Expression): Target as Expression Type as TypeReference class TypeofExpression(Expression): Type as TypeReference class CustomStatement(Statement): pass class CustomExpression(Expression): pass class StatementTypeMember(TypeMember): """ Allow for macros and initializing statements inside type definition bodies. """ Statement as Statement nant-0.9.5~git20110729.r1.202a430/bin/000077500000000000000000000000001161462365500162115ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/bin/.gitignore000066400000000000000000000000061161462365500201750ustar00rootroot00000000000000*.mdb nant-0.9.5~git20110729.r1.202a430/bin/Boo.Microsoft.Build.targets000077500000000000000000000205241161462365500233330ustar00rootroot00000000000000 $(MSBuildAllProjects);$(BooBinPath)\Boo.Microsoft.Build.targets $(MSBuildAllProjects);$(MSBuildBinPath)\Boo.Microsoft.Build.targets .boo Boo <_Temporary Remove="@(_Temporary)" /> <_Temporary Remove="@(_Temporary)" /> false true false _ComputeNonExistentFileProperty $(ReferencePath);$(BoocToolPath) $(ReferencePath);$(BooBinPath) nant-0.9.5~git20110729.r1.202a430/bin/Makefile.am000066400000000000000000000011521161462365500202440ustar00rootroot00000000000000boodir = $(libdir)/boo boo_DATA = booc.exe booi.exe booish.exe booc.rsp booc.exe.config booi.exe.config booish.exe.config Boo.NAnt.Tasks.dll BOO_GAC_DLLS = Boo.Lang.dll \ Boo.Lang.Useful.dll \ Boo.Lang.Extensions.dll \ Boo.Lang.Compiler.dll \ Boo.Lang.Parser.dll \ Boo.Lang.Interpreter.dll \ Boo.Lang.PatternMatching.dll \ Boo.Lang.CodeDom.dll install-data-hook: for lib in $(BOO_GAC_DLLS); do \ echo "$(GACUTIL) /i $${lib} $(GACUTIL_FLAGS)" ; \ $(GACUTIL) /i $${lib} $(GACUTIL_FLAGS) || exit 1 ; \ done EXTRA_DIST = $(BOO_GAC_DLLS) $(boo_DATA) nant-0.9.5~git20110729.r1.202a430/bin/booc.exe.config000077500000000000000000000003251161462365500211050ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/bin/booc.rsp000077500000000000000000000010701161462365500176620ustar00rootroot00000000000000# This file contains command-line options that the Boo # command-line compiler (booc) will process as part # of every compilation, unless the "-noconfig" option # is specified. # Reference the common Framework libraries #-r:System (already included by boo) #-r:System.Data -r:System.Drawing #-r:System.Management #-r:System.Messaging #-r:System.Runtime.Remoting #-r:System.Runtime.Serialization.Formatters.Soap # does not work nicely on mono -r:System.Security #-r:System.ServiceProcess #-r:System.Web #-r:System.Web.Services #-r:System.Windows.Forms -r:System.Xml nant-0.9.5~git20110729.r1.202a430/bin/booi.exe.config000077500000000000000000000003251161462365500211130ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/bin/booish.exe.config000077500000000000000000000003251161462365500214460ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/booc000077500000000000000000000000471161462365500163120ustar00rootroot00000000000000#!/bin/sh env mono build/booc.exe "$@" nant-0.9.5~git20110729.r1.202a430/booi000077500000000000000000000000471161462365500163200ustar00rootroot00000000000000#!/bin/sh env mono build/booi.exe "$@" nant-0.9.5~git20110729.r1.202a430/configure.in.in000066400000000000000000000012641161462365500203620ustar00rootroot00000000000000AC_INIT(boo, VERSION) AM_INIT_AUTOMAKE(boo, VERSION) AC_PATH_PROG(PKG_CONFIG, pkg-config, no) MIME_PREFIX=`$PKG_CONFIG --variable=prefix shared-mime-info` GTKSOURCEVIEW_PREFIX=`$PKG_CONFIG --variable=prefix gtksourceview-1.0` AC_SUBST(MIME_PREFIX) AC_SUBST(GTKSOURCEVIEW_PREFIX) AC_PATH_PROG(RUNTIME, mono) AC_SUBST(RUNTIME) AC_PATH_PROG(GACUTIL, gacutil, no) GACUTIL_FLAGS='/package $(PACKAGE) /gacdir $(DESTDIR)/$(libdir)' AC_SUBST(GACUTIL_FLAGS) AC_OUTPUT([ Makefile bin/Makefile extras/Makefile extras/booc extras/booi extras/booish extras/boo.pc ]) echo " Summary: shared-mime-info prefix: $MIME_PREFIX gtksourceview-1.0 prefix: $GTKSOURCEVIEW_PREFIX boo prefix: $prefix " nant-0.9.5~git20110729.r1.202a430/default.build000077500000000000000000001126311161462365500201150ustar00rootroot00000000000000 print("Hello from boo task!") print("Framework directory: ${Project.TargetFramework.FrameworkAssemblyDirectory}") for p as System.Collections.DictionaryEntry in Project.Properties: print("${p.Key}: ${p.Value}") Please set the property 'mono.prefix' to point to the prefix of your mono installation (example: c:\dotnet\mono-1.1.8). Use the make-bin-dist or install targets instead on non-win32 platforms. import System.IO fname = "src/Boo.Lang/Builtins.cs" builtins = File.ReadAllText(fname) version = Project.Properties["boo.version"] newBuiltins = @/new System.Version\(".+"\)/.Replace(builtins, "new System.Version(\"${version}\")") if builtins != newBuiltins: print fname File.WriteAllText(fname, newBuiltins) *** In the event of detected defects, a detailed report will be available at: file://${path::combine(path::get-full-path(build.dir), 'gendarme-report.html')} *** nant-0.9.5~git20110729.r1.202a430/docs/000077500000000000000000000000001161462365500163715ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/docs/BooManifesto.sxw000066400000000000000000000354271161462365500215340ustar00rootroot00000000000000PKb219mimetypeapplication/vnd.sun.xml.writerPKb2neTT layout-cachepPP  P P CP oP P P PKb2 content.xml=rFrc]ڍ%J]i+G֮}WyRW!0 0 E'Wuɯk$orO)+jwI77QBR/wڭ&bO2껽o/^ Hٞ f~=R3__1gcksmyreɋ}Ө4A뙸ɖ//u_ҷy~ʷakء7ۧf;#^&8B/_ \Aor^7+ a[6xoߌ <-+7Ǜܓ>g*Nb{v/yOe׼{)_xDf/7¯ר[;[g x/A|{xZ~'cf_Aq{SfYa'(&pTgPا5 e|'M8jX.P?=*Pqpҝr;⫡0cǨù ̎Xqfms]54 S>/f7*9C%W0'*TLV3ܮD!Zj[5d-3gBAYMorڛ#%YZkklkyeFAޥ1Vy5ϰyso*p |Fyn[9l[n6wfif8o3\Ԅo:ʜa-2wÍ;duYĬz5g!P:Wstj'%pSoSʫ9UVsIڜhZtD%r$rE~|!6}1'\ *\?)Eͣ mRCcp),&mRDlSM*:6sf'%Oc76G u,ʔp۪c9'}Z^>> hk]ֱkX'•uyEx^1180D,¿5M|< 'Gʿu֕b^NO;g2EO`㷜|=N_:Ix".WnT~͝z͝E]C!$Gc&C~?[yNFVxufHWzڝu^{Ir'؊ .wk< }_s{7%|&!< e,B1 =6 <4Lb\Z]%Q lUP뛟V+pvU_RV=ͣz^{Nҷ&N\PɈ.vxpp*-Zd@HUĽK>%7q8Ý"fIFY3z/xϺ n%l Vo`%P`!׏A/@ESe!X-a!`/A|/MXhZ*g &}^8LjoPv5(V7:q*OvrwsݘW 4 &r X|(l?aއO@\@i@opxАA4.4RČ8!ұHqT`]=nT"Q*G(Q&P dhh ^_x׸'=`ޑ|02@׀,  [Ȅga^@?IC[`PHӁ Ő$r; $xz kxΠ@HcLs봴HQ/v hX%YIQ@n j  H  }kOJޯ"ע7Ȁ޺KdL۵$asm=zVdz@;K=:-ah$DuqpZ5 z/@a`4;BzF̢! & [D[OK | 7j<ɴ5Su Df.*iX :-aƓYKՑehW#s^.4A~gFM(K~BDg:]?=A!TSO!SDgM8X{)y6j[RI4 6ipb aljۥԏUDxH3~hdAA'H2OwfNO E7Hb]ؔ  F?nSCD7ѩz$ ^_R,{_y;g/pl3?W.ΙysvAF/MW~s`OA E3v -wA8=yPQЬ/SB *xԍ2HՇeQ}xi?|j5<10aZ<͌@~\Dǡ!WR ʍOw'bh,{0e8>B(ħ/Fi)\'A'PN0_@v~Qq#<\?0T?)+cچ"։pW=QQNGYpbUks:w8Qx8l|se9ŐTy€O4gܢn @&413D* $c $(zؠIndT+DZ-@l%뫁JBej$A˼so" ڷ&:ɪ^'H*rF0`B!FEBZA9Nw#؀Ӡא0g!L;A]ybޏ㺗 }Xbm+nC>7}\ &ue3 âQ!)kx৮Mʴ_Sbc W9xD8Ⱥ@8N%T$hL2WG-[>v<~M ' i!ڋf nB_#;&Kk~"xϲ~q+V }sgqì\/"}/, ყbbU\ 7bC3~m|ƾjafn`RG<\6bA,xHy~GMYbPc6/pytYؾ x8V='+C͸}UT$n%;|'ZGԐn!ΌX-@WWG:xy6I%`1DcV6 }ČƢA{MXdױ0C5-ƥ)2и*.ɸeVQ?0ŝpzZOX$P.)LMf"cfBَ9.a\ecILǩD 2%M^b,/W&d_!չS[^nAo[޶mD;jz,rqѝ-&JӄXA%{pJ4LTvAma󷩄 őPQ5]c.]TCbx*V3c{*`Q?~'*'M,j30銪s+ea<aj{ƪk+!nߗcPjYa\ByhXLcĒ{Ͼa Q-pyTɫda5!SЬ?R3bj9^5+ЕǵQWAb$E;/;gbEt\}*EIwaV\T& dž]W[aa_bb hs4VϜRo!6l'|$zO"+A`/)c(1?| LOoСZgI"%Gf6T&'w"ōI *eb \Uɮfrxi0zWG&) NTcO0M2TRb|ڀRdgS8&Rz:t\rwqM'Aw< r=lF̃QyE{`[+mXS2ܻfWԱ)}Vc*ݢa {eWaIGUZB\.-LO|e߇*W|T+y2[1xi"Wrկ@ ;ml4:asAr8b]nL soIUe~? qVϸ[yíVGBXBt4o'[XӄIePƂULcɖxv޲RݶX̔M/h 6#X41<>F,7vmE[`00azi7]vZςaC 9Fӡ)p7#E5Wbuw0D׉%&~bC͜VA3o /|?cXve?;y'$y\u p'[]u3]ՁK:ٗq<%7GE}3 aJg24U$~Ƚ1GhѣcJI֬i`9,N~T/hWoUcL.LT4+=HtٮV(!ќEG v+ kڥ#EEةTJ8 h0DYHRW& »B zkjZQ7:{bAfrM+nIh0}K+yi4h? kM#a6Ϊԁ;;g67Hk9 ԈQ7YU>w /?<)(B r7 Ո->:k8 bmv~u~Gudʫ f!l(J?lK,ώ5Y8JH9I+ukxR䚼dh?] Z!uw߸* x.r`bq_ŦFBw@ E&nlw\c7ASb`9XC0G,Xu.E\^04UbX~gvnSb=B]0PVA'XGӝ0S-mi"^G2Kf8\S>5yĀY6ujtĹYSM&vA7NZESdel.H RA*7!3IV)DUEyLe [l(° JaxOf|!.6Z-6B2C=jDZEqd!soT4*$oVAq^AUj\9In>\nJfvA`@ĜV ʃ5H$\&iPnP,$nG؉nC$/(C 5jНVf-lR`^Y&'<\J@̄*]KC,Ry}o\'rk S8k}De-RѸbjm|33  Þ :X  s6ީu}zҤe%-G*foT8м<峓 r\M"Ñ=+- Dx|&& my8ðʹ1\Qʜ|blBVC<)9knP7#,_gFAvqȶj9tjd SAeWj7eӧyPkM)L&sySOՎj L 4D")aPK y&;PKb2 styles.xmlZo6_idY]bmW@%GZl)TߑԧMYv yh w;#yū,u0ѥLibBK7w_]ponÒDe̢"TzB>X8_n__}x㸞1&}[~[Nru\cnؽ RF3/t:ogRj֨c ^'n  ձܧ~J Ñ :͸9 ~? r rWʰMG>^5qP/$?Qq+wvKڄccB،87EpZ"~X: 3J HЕl(=aaWEDbaT0]?ߺZfר{kL1'X9DFȨ-%PdBkZ+!Nn6,C.Iu:%ڷ##,D+x["Đgr*'Hˊ^-cYsoH$J9 $` u {"GIo8NQtp~~SSƢ}ULcZl =(1zfﲔAŕ4#7*;0".QXA%nzHDVf+1nJF-]Dˈey V*T TFc4褭8FpB ̮}o9*-ݔ{rU3o*˅.)az81ڨ>}`|)GR8Csfnb3NJNԐ)411\U`8^L.R$AS 0]0Įo44Fjf5{>jb`7e`˜Lbܥ Y\U,vd~J Jc&gH1HNsLSeY/H=ʎZckl$џbѼm0F8Јc6Wm":]`D^%52$u\=eLؒX} lgMy/&Y*GeHwoNgɾD%e8 0l0L륩.U٧T8di0oT-@oti`A dêbulq{JIsEl8n \Oo51fDs"Q`gP`OhNo;mN[JQܩU堲ԡnӶk?d#~az_PK# #PKb2JMmeta.xml OpenOffice.org 1.1.3 (Linux)The boo Programming LanguageRodrigo B. de Oliveira2004-04-24T13:17:012005-06-29T09:22:01en-US348PT15H58M35SPKb2 settings.xmlY[wH~_ǀ$80DxkM74AoJ8g}@WUTu3߶. \Jʅ"qNrCV%pP;r ٠4$e'rd5j~[I(TR ;R6]XItNZs2z?z齴߰.ټGQtيrz)yqWȩt;~0y./~1AxWNy/?4tX;_KJhGC? ?]_!O]O}uY~w>iZoTö4#XGKJ1 "59 |`ӵAg4  dRʋ! 8GsdjYf9șN}U K E\HJYg|Fsꝴ[׮/(FqkbAMds2`/  6d}ᙵh~Ħ7!C\Rk5 FH]L,Ç7pQ8cAT:8Q 0z> i%|K8RL$x>EbE8~A$FE-^k%Ik!dAEZ(;U]w b;9_=!+^z>]I^iXq֭ɿN5I冩,5Tw6m wT:NŬwiOv64UDp7bzvc/EOx04g]"p7_kC:#-x2豥}W`Yn3]A-eȮnM!ŸWǪ|[jl~> |߈堍FI ̮ghnk! )2j- tӠ+icD#H\Hѝ;0."{{rЩB*UjK:΅XrȾPu@%Db|scĉ|f0Og *Wj{MazX>؊FZ]QŰm# AܵdwU}.>F~(-eF). jv"]OWR %d2ᾥ d$[ɖilC<\Θ\g2)Vy SixV~od:NT5ML`:3!tWnP;!w%1.Ky_PK4RPKb219mimetypePKb2neTT Dlayout-cachePKb2 y&; content.xmlPKb2# # t'styles.xmlPKb2JM_.meta.xmlPKb2yɲ +3settings.xmlPKb24R8META-INF/manifest.xmlPKm9nant-0.9.5~git20110729.r1.202a430/docs/meta-programming.txt000077500000000000000000000043701161462365500224070ustar00rootroot00000000000000Meta Methods ============ Content published at http://blogs.codehaus.org/people/bamboo/archives/001593_boo_meta_methods.html http://blogs.codehaus.org/people/bamboo/archives/001594_boo_meta_methods_ii.html Context specific quasi-quotation -------------------------------- It is still possible to specify the exact context of quasi-quotation by using special delimiters for: * Expressions - [e| a as int |] # try cast expression * Statements - [s| a as int |] # variable declaration * Type Members - [t| a as int |] # field declaration * Parameters - [p| a as int |] # parameter declaration * Modules - [m| namespace MyNamespace import System.Console WriteLine "hello" |] * Attributes - [a| [property(Name)] |] # attribute ============================================= BEGIN GRAY AREA ============================================= It's an arguable point that a more keyword oriented approach to quasi-quotation would be more in boo's spirit. This should be achievable with a very simple meta method definition: meta def code(tree as Node): // returns a code tree that reconstructs // the original tree return CodeSerializer().Serialize(tree) Now our 'using' implementation could be rewritten as the more palatable: meta def using(e as Expression, block as BlockExpression): temp = uniqueName() return code: $temp = $e try: $(block.Body) ensure: if $temp isa IDisposable: ($temp as IDisposable).Dispose() ============================================= END GRAY AREA ============================================= ============================================= BEGIN HAIRY GRAY AREA ============================================= Meta extension methods ====================== [extension(IEnumerable)] meta def each(e as Expression, b as BlockExpression): """ Automatically defines the 'it' variable for iterators Example: (1, 2, 3).each: print it """ return [| for it in $e: $(b.Body) |] Meta operators ======================== meta def =>(x as ReferenceExpression, y as Expression): """ c# lambda syntax operator. a => b > 2 """ return [| { $(x.Name) | return $y } |] ============================================= END HAIRY GRAY AREA ============================================= nant-0.9.5~git20110729.r1.202a430/examples/000077500000000000000000000000001161462365500172575ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/000077500000000000000000000000001161462365500226435ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/000077500000000000000000000000001161462365500250315ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/.gitignore000066400000000000000000000000101161462365500270100ustar00rootroot00000000000000bin obj nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/000077500000000000000000000000001161462365500255475ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih.Tests/000077500000000000000000000000001161462365500273665ustar00rootroot00000000000000CompilerTest.cs000077500000000000000000000012671161462365500322610ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih.Testsusing System; using System.IO; using NUnit.Framework; namespace Nih.Tests { [TestFixture] public class CompilerTest { [Test] public void SayStatement() { var assembly = Nih.Compiler.CompileString("say 3"); var output = CapturingStandardOutput(() => assembly.EntryPoint.Invoke(null, new object[] { null })); Assert.AreEqual("nih! nih! nih!", output.Trim()); } private string CapturingStandardOutput(System.Action action) { var oldOut = Console.Out; var newOut = new StringWriter(); Console.SetOut(newOut); try { action(); } finally { Console.SetOut(oldOut); } return newOut.ToString(); } } } Nih.Tests.csproj000077500000000000000000000053111161462365500323530ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih.Tests Debug AnyCPU 9.0.30729 2.0 {A754F13A-CE24-4142-ACD5-A4EB886F7A29} Library Properties Nih.Tests Nih.Tests v3.5 512 true full false bin\Debug\ DEBUG;TRACE prompt 4 pdbonly true bin\Release\ TRACE prompt 4 3.5 {7EAC855C-AB70-4CEB-9B9A-8B60712277E2} Boo.Lang.Compiler {9473BB00-3676-488E-8229-4E197FFB29C5} Nih nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih.Tests/ParserTest.cs000077500000000000000000000013201161462365500320100ustar00rootroot00000000000000using Boo.Lang.Compiler.Ast; using NUnit.Framework; namespace Nih.Tests { [TestFixture] public class ParserTest { [Test] public void Say42IsParsedAsMethodInvocationExpression() { var module = Nih.Parser.ParseModule("say 42"); Assert.AreEqual(0, module.Members.Count); Assert.AreEqual(1, module.Globals.Statements.Count); var stmt = (ExpressionStatement)module.Globals.Statements[0]; var invocation = (MethodInvocationExpression)stmt.Expression; Assert.AreEqual("say", ((ReferenceExpression)invocation.Target).Name); Assert.AreEqual(1, invocation.Arguments.Count); Assert.AreEqual(42, ((IntegerLiteralExpression)invocation.Arguments[0]).Value); } } } nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih.Tests/Properties/000077500000000000000000000000001161462365500315225ustar00rootroot00000000000000AssemblyInfo.cs000077500000000000000000000026261161462365500343760ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih.Tests/Propertiesusing System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. [assembly: AssemblyTitle("Nih.Tests")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("")] [assembly: AssemblyProduct("Nih.Tests")] [assembly: AssemblyCopyright("Copyright © 2010")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] // Setting ComVisible to false makes the types in this assembly not visible // to COM components. If you need to access a type in this assembly from // COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("0cc494b0-be0f-48d0-9d07-cecbb744ad8d")] // Version information for an assembly consists of the following four values: // // Major Version // Minor Version // Build Number // Revision // // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] [assembly: AssemblyFileVersion("1.0.0.0")] nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih.sln000077500000000000000000000103531161462365500270100ustar00rootroot00000000000000 Microsoft Visual Studio Solution File, Format Version 10.00 # Visual Studio 2008 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Nih", "Nih\Nih.csproj", "{9473BB00-3676-488E-8229-4E197FFB29C5}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Nih.Tests", "Nih.Tests\Nih.Tests.csproj", "{A754F13A-CE24-4142-ACD5-A4EB886F7A29}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Boo", "Boo", "{A08C55B5-22EB-4C6A-8937-FA2C8BDC420B}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Boo.Lang", "..\..\..\..\src\Boo.Lang\Boo.Lang.csproj", "{A359E52E-5E90-40F3-A5F0-257FE2D545EE}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Boo.Lang.Compiler", "..\..\..\..\src\Boo.Lang.Compiler\Boo.Lang.Compiler.csproj", "{7EAC855C-AB70-4CEB-9B9A-8B60712277E2}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Boo.Lang.Parser", "..\..\..\..\src\Boo.Lang.Parser\Boo.Lang.Parser.csproj", "{BA0306B9-A10B-4D29-B219-0DFBE24741E8}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug Net-1.1|Any CPU = Debug Net-1.1|Any CPU Debug|Any CPU = Debug|Any CPU Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {9473BB00-3676-488E-8229-4E197FFB29C5}.Debug Net-1.1|Any CPU.ActiveCfg = Debug|Any CPU {9473BB00-3676-488E-8229-4E197FFB29C5}.Debug Net-1.1|Any CPU.Build.0 = Debug|Any CPU {9473BB00-3676-488E-8229-4E197FFB29C5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {9473BB00-3676-488E-8229-4E197FFB29C5}.Debug|Any CPU.Build.0 = Debug|Any CPU {9473BB00-3676-488E-8229-4E197FFB29C5}.Release|Any CPU.ActiveCfg = Release|Any CPU {9473BB00-3676-488E-8229-4E197FFB29C5}.Release|Any CPU.Build.0 = Release|Any CPU {A754F13A-CE24-4142-ACD5-A4EB886F7A29}.Debug Net-1.1|Any CPU.ActiveCfg = Debug|Any CPU {A754F13A-CE24-4142-ACD5-A4EB886F7A29}.Debug Net-1.1|Any CPU.Build.0 = Debug|Any CPU {A754F13A-CE24-4142-ACD5-A4EB886F7A29}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {A754F13A-CE24-4142-ACD5-A4EB886F7A29}.Debug|Any CPU.Build.0 = Debug|Any CPU {A754F13A-CE24-4142-ACD5-A4EB886F7A29}.Release|Any CPU.ActiveCfg = Release|Any CPU {A754F13A-CE24-4142-ACD5-A4EB886F7A29}.Release|Any CPU.Build.0 = Release|Any CPU {A359E52E-5E90-40F3-A5F0-257FE2D545EE}.Debug Net-1.1|Any CPU.ActiveCfg = Debug Net-1.1|Any CPU {A359E52E-5E90-40F3-A5F0-257FE2D545EE}.Debug Net-1.1|Any CPU.Build.0 = Debug Net-1.1|Any CPU {A359E52E-5E90-40F3-A5F0-257FE2D545EE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {A359E52E-5E90-40F3-A5F0-257FE2D545EE}.Debug|Any CPU.Build.0 = Debug|Any CPU {A359E52E-5E90-40F3-A5F0-257FE2D545EE}.Release|Any CPU.ActiveCfg = Release|Any CPU {A359E52E-5E90-40F3-A5F0-257FE2D545EE}.Release|Any CPU.Build.0 = Release|Any CPU {7EAC855C-AB70-4CEB-9B9A-8B60712277E2}.Debug Net-1.1|Any CPU.ActiveCfg = Debug Net-1.1|Any CPU {7EAC855C-AB70-4CEB-9B9A-8B60712277E2}.Debug Net-1.1|Any CPU.Build.0 = Debug Net-1.1|Any CPU {7EAC855C-AB70-4CEB-9B9A-8B60712277E2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {7EAC855C-AB70-4CEB-9B9A-8B60712277E2}.Debug|Any CPU.Build.0 = Debug|Any CPU {7EAC855C-AB70-4CEB-9B9A-8B60712277E2}.Release|Any CPU.ActiveCfg = Release|Any CPU {7EAC855C-AB70-4CEB-9B9A-8B60712277E2}.Release|Any CPU.Build.0 = Release|Any CPU {BA0306B9-A10B-4D29-B219-0DFBE24741E8}.Debug Net-1.1|Any CPU.ActiveCfg = Debug Net-1.1|Any CPU {BA0306B9-A10B-4D29-B219-0DFBE24741E8}.Debug Net-1.1|Any CPU.Build.0 = Debug Net-1.1|Any CPU {BA0306B9-A10B-4D29-B219-0DFBE24741E8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {BA0306B9-A10B-4D29-B219-0DFBE24741E8}.Debug|Any CPU.Build.0 = Debug|Any CPU {BA0306B9-A10B-4D29-B219-0DFBE24741E8}.Release|Any CPU.ActiveCfg = Release|Any CPU {BA0306B9-A10B-4D29-B219-0DFBE24741E8}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(NestedProjects) = preSolution {A359E52E-5E90-40F3-A5F0-257FE2D545EE} = {A08C55B5-22EB-4C6A-8937-FA2C8BDC420B} {7EAC855C-AB70-4CEB-9B9A-8B60712277E2} = {A08C55B5-22EB-4C6A-8937-FA2C8BDC420B} {BA0306B9-A10B-4D29-B219-0DFBE24741E8} = {A08C55B5-22EB-4C6A-8937-FA2C8BDC420B} EndGlobalSection EndGlobal nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih/000077500000000000000000000000001161462365500262655ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih/Compiler.cs000077500000000000000000000012421161462365500303700ustar00rootroot00000000000000using System; using System.Reflection; using Boo.Lang.Compiler; using Boo.Lang.Compiler.IO; using Boo.Lang.Compiler.MetaProgramming; namespace Nih { public class Compiler { public static Assembly CompileString(string code) { var compiler = new BooCompiler(new CompilerParameters(false)); compiler.Parameters.GenerateInMemory = true; compiler.Parameters.Pipeline = new NihPipeline(); compiler.Parameters.Input.Add(new StringInput("string.nih", code)); var result = compiler.Run(); if (result.Errors.Count > 0) throw new CompilationErrorsException(result.Errors); return result.GeneratedAssembly; } } } nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih/Nih.csproj000077500000000000000000000057031161462365500302350ustar00rootroot00000000000000 Debug AnyCPU 9.0.30729 2.0 {9473BB00-3676-488E-8229-4E197FFB29C5} Library Properties Nih Nih v3.5 512 true full false bin\Debug\ DEBUG;TRACE prompt 4 pdbonly true bin\Release\ TRACE prompt 4 3.5 {7EAC855C-AB70-4CEB-9B9A-8B60712277E2} Boo.Lang.Compiler {BA0306B9-A10B-4D29-B219-0DFBE24741E8} Boo.Lang.Parser {A359E52E-5E90-40F3-A5F0-257FE2D545EE} Boo.Lang nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih/NihPipeline.cs000077500000000000000000000014521161462365500310250ustar00rootroot00000000000000using Boo.Lang.Compiler.Ast; using Boo.Lang.Compiler.Steps; namespace Nih { public class NihPipeline : Boo.Lang.Compiler.Pipelines.CompileToMemory { public NihPipeline() { Replace(typeof(Parsing), new NihParsingStep()); InsertAfter(typeof(NihParsingStep), new AddRuntimeImport()); } public class NihParsingStep : AbstractCompilerStep { public override void Run() { foreach (var input in Parameters.Input) using (var reader = input.Open()) CompileUnit.Modules.Add(Parser.ParseModule(reader.ReadToEnd())); } } public class AddRuntimeImport : AbstractCompilerStep { public override void Run() { foreach (var module in CompileUnit.Modules) module.Imports.Add(new Import { Namespace = "Nih.Runtime" }); } } } }nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih/Parser.cs000077500000000000000000000027531161462365500300620ustar00rootroot00000000000000using System; using System.Text.RegularExpressions; using Boo.Lang.Compiler; using Boo.Lang.Compiler.Ast; namespace Nih { /// /// /// Simple recursive descent parser for the nih language which has only a single statement: /// /// say (times)? /// /// public class Parser { public static Module ParseModule(string code) { var parser = new Parser(code); return parser.Parse(); } private string _code; private int _position; private Parser(string code) { _code = code; } private Module Parse() { var module = new Module(); var globals = module.Globals; while (_position < _code.Length) { var expression = ParseNextExpression(); if (expression == null) break; globals.Add(expression); } return module; } private Expression ParseNextExpression() { var sayMatch = Match(SayPattern); var integerMatch = Match(IntegerPattern); return new MethodInvocationExpression( new ReferenceExpression(sayMatch.Value.Trim()), new IntegerLiteralExpression(long.Parse(integerMatch.Value.Trim()))); } private Match Match(Regex pattern) { var m = pattern.Match(_code, _position); if (!m.Success) throw new CompilerError(LexicalInfo.Empty, "Expecting '" + pattern + "'"); _position += m.Length; return m; } static Regex SayPattern = new Regex(@"\s*say"); static Regex IntegerPattern = new Regex(@"\s*\d+"); } } nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih/Properties/000077500000000000000000000000001161462365500304215ustar00rootroot00000000000000AssemblyInfo.cs000077500000000000000000000026121161462365500332700ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih/Propertiesusing System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. [assembly: AssemblyTitle("Nih")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("")] [assembly: AssemblyProduct("Nih")] [assembly: AssemblyCopyright("Copyright © 2010")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] // Setting ComVisible to false makes the types in this assembly not visible // to COM components. If you need to access a type in this assembly from // COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("d7bdeba2-114b-4d79-b8e7-e8d7b6a21ebe")] // Version information for an assembly consists of the following four values: // // Major Version // Minor Version // Build Number // Revision // // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] [assembly: AssemblyFileVersion("1.0.0.0")] nant-0.9.5~git20110729.r1.202a430/examples/BuildingLanguages/UsingCSharp/Nih/Nih/Runtime.cs000077500000000000000000000002771161462365500302500ustar00rootroot00000000000000using System; namespace Nih { public static class Runtime { public static void say(int times) { for (int i = 0; i < times; ++i) Console.Write("nih! "); } } } nant-0.9.5~git20110729.r1.202a430/examples/asp.net/000077500000000000000000000000001161462365500206275ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/asp.net/Calendar.aspx000066400000000000000000000027771161462365500232520ustar00rootroot00000000000000

Boo for .NET running in ASP.NET

Please pick a date

nant-0.9.5~git20110729.r1.202a430/examples/asp.net/HelloAspNet.aspx000066400000000000000000000002671161462365500237070ustar00rootroot00000000000000<%@Page Inherits="Boo.Examples.Web.HelloAspNet" %>
nant-0.9.5~git20110729.r1.202a430/examples/asp.net/HelloAspNet.aspx.boo000066400000000000000000000035531161462365500244660ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion namespace Boo.Examples.Web import System import System.Web import System.Web.UI import System.Web.UI.WebControls class HelloAspNet(Page): _message as Label def Page_Load(sender, args as EventArgs): _message.Text = "Hello, asp.net!" nant-0.9.5~git20110729.r1.202a430/examples/asp.net/InlineBooButtonClick.aspx000066400000000000000000000006011161462365500255410ustar00rootroot00000000000000

nant-0.9.5~git20110729.r1.202a430/examples/asp.net/InlineBooExpression.aspx000066400000000000000000000003721161462365500254640ustar00rootroot00000000000000<%@ Page Language="Boo" %> Inline Boo Expression

<%= "Hello World from an inline Boo expression!" %>

3*4 is: <%= 3 * 4 %>

nant-0.9.5~git20110729.r1.202a430/examples/asp.net/InlineBooHelloAspNet.aspx000066400000000000000000000006111161462365500254770ustar00rootroot00000000000000<%@ Page Language="Boo" %> ASP.NET Hello World

nant-0.9.5~git20110729.r1.202a430/examples/asp.net/Math.asmx000066400000000000000000000001271161462365500224120ustar00rootroot00000000000000<%@WebService Class="MathService" Language="C#" %> public class MathService : Math { } nant-0.9.5~git20110729.r1.202a430/examples/asp.net/Math.asmx.boo000066400000000000000000000034541161462365500231760ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Web.Services [WebService] class Math: [WebMethod] def Add(a as int, b as int): return a+b [WebMethod] def Multiply(a as int, b as int): return a*b nant-0.9.5~git20110729.r1.202a430/examples/asp.net/PrettyPrinter.aspx000066400000000000000000000013151161462365500243570ustar00rootroot00000000000000<%@Page Inherits="Boo.Examples.Web.PrettyPrinterPage" ValidateRequest="False" %>
Type in some boo code


nant-0.9.5~git20110729.r1.202a430/examples/asp.net/PrettyPrinter.aspx.boo000066400000000000000000000066261161462365500251470ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion namespace Boo.Examples.Web import System import System.IO import System.Web import System.Web.UI import System.Web.UI.WebControls import System.Web.UI.HtmlControls import Boo.Lang.Parser import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Ast.Visitors class PrettyPrinter(BooPrinterVisitor): Server = HttpContext.Current.Server def constructor(writer as TextWriter): super(writer) override def Write(text as string): Server.HtmlEncode(text, _writer) override def WriteLine(): _writer.Write("
") super() override def WriteKeyword(text as string): _writer.Write("${text}") override def WriteOperator(text as string): _writer.Write("${Server.HtmlEncode(text)}") override def OnExpressionInterpolationExpression(node as ExpressionInterpolationExpression): _writer.Write("") super(node) _writer.Write("") override def WriteStringLiteral(text as string): _writer.Write("") buffer = StringWriter() BooPrinterVisitor.WriteStringLiteral(text, buffer) Server.HtmlEncode(buffer.ToString(), _writer) _writer.Write("") override def OnIntegerLiteralExpression(node as IntegerLiteralExpression): _writer.Write("${node.Value}") class PrettyPrinterPage(Page): _src as TextBox _pretty as HtmlContainerControl def Page_Load(sender, args as EventArgs): PrettyPrint() if Page.IsPostBack def PrettyPrint(): printer = PrettyPrinter(StringWriter(), IndentText: "  ") printer.Print(Parse()) _pretty.InnerHtml = printer.Writer.ToString() def Parse(): return BooParser.ParseString("", _src.Text) nant-0.9.5~git20110729.r1.202a430/examples/asp.net/ScriptRunner.aspx000066400000000000000000000013441161462365500241640ustar00rootroot00000000000000<%@Page Inherits="Boo.Examples.Web.ScriptRunnerPage" ValidateRequest="False" %>
Type in some boo code and press run
Try print(Request.Url), for instance


nant-0.9.5~git20110729.r1.202a430/examples/asp.net/ScriptRunner.aspx.boo000066400000000000000000000072111161462365500247410ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion namespace Boo.Examples.Web import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Steps import System import System.IO import System.Web import System.Web.UI import System.Web.UI.WebControls import System.Web.UI.HtmlControls class WebMacro: Console = StringWriter() Context = HttpContext.Current Request = Context.Request Response = Context.Response virtual def print(text): Console.WriteLine(text) override def ToString(): return Console.ToString() abstract def Run(): pass class CreateMacroStep(AbstractCompilerStep): override def Run(): module = CompileUnit.Modules[0] method = Method(Name: "Run", Modifiers: TypeMemberModifiers.Override, Body: module.Globals) module.Globals = Block() macro = ClassDefinition(Name: "__Macro__") macro.BaseTypes.Add(SimpleTypeReference("Boo.Examples.Web.WebMacro")) macro.Members.Add(method) for member in module.Members: macro.Members.Add(member) module.Members.Clear() module.Members.Add(macro) class ScriptRunnerPage(Page): _code as TextBox _console as HtmlGenericControl def _run_Click(sender, args as EventArgs): result = CompileMacro(_code.Text) if len(result.Errors): WriteConsole(join(result.Errors, "\n")) else: type = result.GeneratedAssembly.GetType("__Macro__") macro = cast(WebMacro, type()) macro.Run() WriteConsole(macro.ToString()) def CompileMacro(code): compiler = BooCompiler() compiler.Parameters.Pipeline = CompileToMemory() compiler.Parameters.Input.Add(StringInput("", code)) compiler.Parameters.OutputType = CompilerOutputType.Library compiler.Parameters.References.Add(typeof(WebMacro).Assembly) pipeline = compiler.Parameters.Pipeline pipeline.Insert(1, CreateMacroStep()) return compiler.Run() def WriteConsole(text as string): _console.InnerHtml = Server.HtmlEncode(text).Replace("\n", "
") nant-0.9.5~git20110729.r1.202a430/examples/asp.net/YourName.aspx000066400000000000000000000004661161462365500232710ustar00rootroot00000000000000<%@Page Inherits="Boo.Examples.Web.YourName" %>
What's your name?


nant-0.9.5~git20110729.r1.202a430/examples/asp.net/YourName.aspx.boo000066400000000000000000000036061161462365500240460ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion namespace Boo.Examples.Web import System import System.Web.UI import System.Web.UI.WebControls class YourName(Page): _name as TextBox _message as Label def Page_Load(sender, args as EventArgs): _message.Text = "Hello, ${_name.Text}!" if Page.IsPostBack nant-0.9.5~git20110729.r1.202a430/examples/asp.net/default.build000066400000000000000000000030421161462365500232730ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/asp.net/web.config000066400000000000000000000006341161462365500225760ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/attributes/000077500000000000000000000000001161462365500214455ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/attributes/Async/000077500000000000000000000000001161462365500225225ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/attributes/Async/AsyncAttribute.boo000066400000000000000000000132461161462365500261720ustar00rootroot00000000000000#region license // Copyright (c) 2005, Sorin Ionescu (sorin.ionescu@gmail.com) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Sorin Ionescu nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion namespace Boo.Examples.Attributes import System; import System.Runtime.Remoting.Messaging import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Steps class AsyncAttribute(AbstractAstAttribute): """ Adds asynchronous helpers Begin/End for a method. """ _method as Method _accessModifiers as TypeMemberModifiers _disposed as ReferenceExpression def constructor(): pass def constructor(disposed as ReferenceExpression): _disposed = disposed override def Apply(node as Node): assert node isa Method _method = node if _method.IsProtected and _method.IsInternal: _accessModifiers = TypeMemberModifiers.Protected _accessModifiers = _accessModifiers | TypeMemberModifiers.Internal elif _method.IsPublic: _accessModifiers = TypeMemberModifiers.Public elif _method.IsProtected: _accessModifiers = TypeMemberModifiers.Protected elif _method.IsInternal: _accessModifiers = TypeMemberModifiers.Internal elif _method.IsPrivate: _accessModifiers = TypeMemberModifiers.Private EmitBeginMethod() EmitEndMethod() private def EmitBeginMethod(): beginMethod = Method( self.LexicalInfo, Name: "Begin" + _method.Name, Modifiers: _accessModifiers, ReturnType: CodeBuilder.CreateTypeReference(typeof(IAsyncResult))) beginMethod.Parameters.ExtendWithClones(_method.Parameters) beginMethod.Parameters.Add( ParameterDeclaration( "callback", CodeBuilder.CreateTypeReference(typeof(AsyncCallback)))) beginMethod.Parameters.Add( ParameterDeclaration( "state", CodeBuilder.CreateTypeReference(typeof(object)))) asyncInvocation = MethodInvocationExpression( Target: MemberReferenceExpression( ReferenceExpression(_method.Name), "BeginInvoke")) for parameter in beginMethod.Parameters: asyncInvocation.Arguments.Add( ReferenceExpression(parameter.Name)) EmitDisposedObjectCheck(beginMethod) if _disposed is not null beginMethod.Body.Add(ReturnStatement(asyncInvocation)) _method.DeclaringType.Members.Add(beginMethod) private def EmitEndMethod(): endMethod = Method( self.LexicalInfo, Name: "End" + _method.Name, Modifiers: _accessModifiers) endMethod.Parameters.Add( ParameterDeclaration( "result", CodeBuilder.CreateTypeReference(typeof(IAsyncResult)))) asyncInvocation = MethodInvocationExpression( Target: MemberReferenceExpression( ReferenceExpression(_method.Name), "EndInvoke")) asyncInvocation.Arguments.Add(ReferenceExpression("result")) EmitDisposedObjectCheck(endMethod) if _disposed is not null endMethod.Body.Add(ReturnStatement(asyncInvocation)) _method.DeclaringType.Members.Add(endMethod) # cache the voidType reference because we are going # to lose the context after this method returns # (see AbstractCompilerComponent.Dispose) voidType = Context.TypeSystemServices.VoidType Context.Parameters.Pipeline.AfterStep += def (sender, e as CompilerStepEventArgs): if e.Step isa ProcessMethodBodies: if _method.ReturnType.Entity is voidType: returnStatement = endMethod.Body.Statements[-1] as ReturnStatement endMethod.Body.Statements.Replace( returnStatement, ExpressionStatement(returnStatement.Expression)) private def EmitDisposedObjectCheck(method as Method): exceptionCreation = MethodInvocationExpression( Target: MemberReferenceExpression( ReferenceExpression("System"), "ObjectDisposedException")) exceptionCreation.Arguments.Add(StringLiteralExpression(_method.DeclaringType.Name)) # TODO: Access Boo resources to get the exception message. exceptionCreation.Arguments.Add(StringLiteralExpression("")) trueBlock = Block() trueBlock.Add(RaiseStatement(exceptionCreation)) method.Body.Add(IfStatement(_disposed.CloneNode(), trueBlock, null)) nant-0.9.5~git20110729.r1.202a430/examples/attributes/Async/AttributeUsage.boo000066400000000000000000000043511161462365500261560ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import Boo.Examples.Attributes class Foo(IDisposable): _disposed = false [async] def Bar(): print "Foo.Bar" [async] def Baz(): return "Foo.Baz" [async(_disposed)] def Spam(): return "Foo.Spam" def Dispose(): _disposed = true f = Foo() resultBar = f.BeginBar({ print "Bar ended" }, null) resultBaz = f.BeginBaz({ print "Baz ended" }, null) resultSpam = f.BeginSpam({ print "Spam ended" }, null) f.EndBar(resultBar) print f.EndBaz(resultBaz) print f.EndSpam(resultSpam) f.Dispose() try: f.BeginSpam(null, null) print "should never got here!" except x as ObjectDisposedException: print x nant-0.9.5~git20110729.r1.202a430/examples/attributes/Async/default.build000066400000000000000000000013531161462365500251710ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/attributes/ViewState/000077500000000000000000000000001161462365500233605ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/attributes/ViewState/AttributeUsage.boo000066400000000000000000000033761161462365500270220ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Web import System.Web.UI class MyControl(UserControl): [ViewState(Default: 70)] Value as int [ViewState] Text as string nant-0.9.5~git20110729.r1.202a430/examples/attributes/ViewState/ViewStateAttribute.boo000066400000000000000000000064341161462365500276670ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion namespace Boo.Web import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast class ViewStateAttribute(AbstractAstAttribute): [property(Default)] _default as Expression override def Apply(node as Node): assert node isa Field f as Field = node p = Property(Name: f.Name, Type: f.Type) p.Getter = CreateGetter(f) p.Setter = CreateSetter(f) f.ParentNode.Replace(f, p) protected def CreateGetter(f as Field): getter = Method() getter.ReturnType = f.Type if _default: // value = ViewState[] getter.Body.Add(BinaryExpression( BinaryOperatorType.Assign, ReferenceExpression("value"), CreateViewStateSlice(f))) // return value if value getter.Body.Add( ReturnStatement( ReferenceExpression("value"), StatementModifier( StatementModifierType.If, ReferenceExpression("value")))) // return getter.Body.Add(ReturnStatement(_default)) else: // return ViewState[] getter.Body.Add(ReturnStatement(CreateViewStateSlice(f))) return getter protected def CreateSetter(f as Field): setter = Method() // ViewState[] = value setter.Body.Add( BinaryExpression( BinaryOperatorType.Assign, CreateViewStateSlice(f), ReferenceExpression("value"))) return setter protected def CreateViewStateSlice(f as Field): // ViewState[""] slice = SlicingExpression() slice.Target = ReferenceExpression("ViewState") slice.Indices.Add( Slice(StringLiteralExpression(f.Name))) return slice nant-0.9.5~git20110729.r1.202a430/examples/attributes/ViewState/default.build000066400000000000000000000012261161462365500260260ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/attributes/coroutine/000077500000000000000000000000001161462365500234545ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/attributes/coroutine/CoroutineAttribute.boo000066400000000000000000000365711161462365500300240ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion # @author Cedric Vivier # # [coroutine] AST attribute # # Compile with: booc -t:library -o:Coroutine.dll CoroutineAttribute.dll # # Look at tests/testcases/attributes/coroutine-*.boo for usage examples. # # OPTIONS: # # Looping: bool - Does the coroutine loop? (default: true) # # Default: expression # The value to return whenever the coroutine is terminated # (Looping:false) or when Timeout has expired. # If Default has not been set the coroutine will raise # the corresponding exception (see 'Exceptions' below) # # DefaultLastValue: bool # When true default value is the last value. (default :false) # # Future: bool - Compute the future before it is requested (default: false) # NB: 1st call to the coroutine will be synchronous # # Timeout: long - Timeout (in ms) before a blocking future call will return # Default or raise a CoroutineFutureNotReadyException. # (default: -1 means no timeout) # # ThreadSafe: bool # If true, there will be no lock on the method when Future is # enabled. (default: false) # # Parallel: int - Number of futures to compute in parallel (default: 1) # UNSUPPORTED TODO: # # # EXCEPTIONS: # # CoroutineTerminatedException: # This exception is raised whenever the coroutine has finished # processing and Looping is false # # CoroutineFutureNotReadyException # This exception is raised whenever a Timeout is set and the # result of the coroutine is not yet available. # namespace Coroutine import System import System.Collections.Generic import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.TypeSystem [AttributeUsage(AttributeTargets.Method)] public class CoroutineAttribute(AbstractAstAttribute): Looping as BoolLiteralExpression: set: _looping = value.Value _looping = true Default as Expression: set: _default = value _defaultSet = true _default as Expression = null _defaultSet = false DefaultLastValue as BoolLiteralExpression: set: _defaultLastValue = value.Value _defaultLastValue = false Future as BoolLiteralExpression: set: _future = value.Value _future = false Timeout as IntegerLiteralExpression: set: _timeout = value.Value _timeout = -1L ThreadSafe as BoolLiteralExpression: set: _threadSafe = value.Value _threadSafe = false Parallel as IntegerLiteralExpression: #TODO: compute parallel futures set: raise NotSupportedException("parallel futures are not yet supported!") _parallel = 1 final ResetEventImplementationTypeName = "System.Threading.AutoResetEvent" final ThreadImplementationTypeName = "System.Threading.Thread" _m as Method _generatorName as string public override def Apply(node as Node): _m = node as Method return if not SanityCheck() _generatorName = "__"+_m.Name #enumerator type if _m.ReturnType is null: _m.ReturnType = SimpleTypeReference("System.Object") et = GenericTypeReference() et.Name = "System.Collections.Generic.IEnumerator" et.GenericArguments.Add(_m.ReturnType) eRef = GetReferenceForNewField("__enumerator", et)#ref to enumerator if _defaultSet or _defaultLastValue: f = CreateField("__default", _m.ReturnType) if _defaultSet: f.Initializer = _default _m.DeclaringType.Members.Add(f) dRef = ReferenceExpression(f.Name)#ref to default value if _future: rRef = GetReferenceForNewField("__reset", SimpleTypeReference(ResetEventImplementationTypeName)) rtRef = GetReferenceForNewField("__resetThread", SimpleTypeReference(ResetEventImplementationTypeName)) tRef = GetReferenceForNewField("__thread", SimpleTypeReference(ThreadImplementationTypeName)) f = CreateField("__lock", SimpleTypeReference("System.Object")) f.Initializer = [| System.Object() |] _m.DeclaringType.Members.Add(f) lRef = ReferenceExpression(f.Name)#ref to lock object #generator invocation gInvoc = MethodInvocationExpression(MemberReferenceExpression(SelfLiteralExpression(), _generatorName)) for parameter in _m.Parameters: gInvoc.Arguments.Add(ReferenceExpression(parameter.Name)) facade = _m.Clone() as Method facade.IsSynthetic = true if not _future: facade.Body = [| block: if $eRef is null: $eRef = $gInvoc |].Block moveNext = [| $(eRef).MoveNext() |] if _looping: #$(eRef).Reset() not used to handle new arguments passed in at reset b = [| block: if not $moveNext: $eRef = $gInvoc $moveNext |].Block facade.Body.Add(b) elif _defaultSet or _defaultLastValue: b = [| block: if not $moveNext: return $dRef |].Block facade.Body.Add(b) else: b = [| block: if not $moveNext: raise CoroutineTerminatedException() |].Block facade.Body.Add(b) if _defaultLastValue: b = [| block: $dRef = $(eRef).Current return $dRef |].Block facade.Body.Add(b) else: facade.Body.Add([| return $(eRef).Current |]) #future else: t = Method(tRef.Name+"__callable") t.IsSynthetic = true if _m.IsStatic or _m.DeclaringType isa Module: t.Modifiers |= TypeMemberModifiers.Static t.Modifiers |= TypeMemberModifiers.Private t.Body = [| block: while true: $(rtRef).WaitOne($_timeout, false) break if not $(eRef).MoveNext() $(rRef).Set() $tRef = null $(rRef).Set() |].Block _m.DeclaringType.Members.Add(t) callableRef = ReferenceExpression(t.Name) if _defaultSet: facade.Body = [| block: future as $(_m.ReturnType) = $dRef |].Block else: facade.Body = [| block: future as $(_m.ReturnType) |].Block b = [| block: :init if $eRef is null: $eRef = $gInvoc $rRef = System.Threading.AutoResetEvent(false) $rtRef = System.Threading.AutoResetEvent(true) $tRef = Thread($callableRef) $(tRef).IsBackground = true $(tRef).Name = "coroutine ${$(facade.Name)}" $(tRef).Start() if $tRef is not null and $(tRef).IsAlive: gotIt = $(rRef).WaitOne($_timeout, false) |].Block if _looping: b2 = [| block: if $tRef is null or not $(tRef).IsAlive: $eRef = null goto init |].Block elif _defaultSet or _defaultLastValue: b2 = [| block: if $tRef is null or not $(tRef).IsAlive: return $dRef |].Block else: b2 = [| block: if $tRef is null or not $(tRef).IsAlive: raise CoroutineTerminatedException() |].Block b.Add(b2) if _defaultSet or _defaultLastValue: b2 = [| block: if not gotIt: return $dRef |].Block else: b2 = [| block: if not gotIt: raise CoroutineFutureNotReadyException() |].Block b.Add(b2) if _defaultLastValue: b.Add([| $dRef = $(eRef).Current |]) b.Add([| future = $(eRef).Current |]) b.Add([| $(rtRef).Set() |]) if not _threadSafe: lockBlock = [| block: lock $lRef: $b |].Block facade.Body.Add(lockBlock) else: facade.Body.Add(b) facade.Body.Add([| return future |]) #annotate the facade as a coroutine facade.Annotate("boo.coroutine", null) #hide the generator implementation _m.Name = _generatorName _m.Modifiers |= TypeMemberModifiers.Private _m.ReturnType = et #promote the facade _m.DeclaringType.Members.Add(facade) private def CreateField(name as string, type as TypeReference) as Field: f = Field() f.Name = Context.GetUniqueName(name, _generatorName) f.IsSynthetic = true if _m.IsStatic or _m.DeclaringType isa Module: f.Modifiers |= TypeMemberModifiers.Static f.Modifiers |= TypeMemberModifiers.Private f.Type = type return f private def GetReferenceForNewField(name as string, type as TypeReference) as ReferenceExpression: f = CreateField(name, type) _m.DeclaringType.Members.Add(f) return ReferenceExpression(f.Name) private def SanityCheck() as bool: if _m is null: InvalidNodeForAttribute('Method') return false #check arguments if _looping and _defaultSet and not _future: Errors.Add(CompilerErrorFactory.CustomError(self.LexicalInfo, "Looping and Default are mutually exclusive in a non-future context")) return false if _defaultSet and _defaultLastValue: Errors.Add(CompilerErrorFactory.CustomError(self.LexicalInfo, "DefaultLastValue and Default are mutually exclusive")) return false if -1L != _timeout and not _future: Errors.Add(CompilerErrorFactory.CustomError(self.LexicalInfo, "Timeout requires Future:true")) return false if 1 != _parallel and not _future: Errors.Add(CompilerErrorFactory.CustomError(self.LexicalInfo, "Parallel requires Future:true")) return false #check if there is at least one yield in the method finder = YieldFinder() finder.Visit(_m) if not finder.Found: Errors.Add(CompilerErrorFactory.CustomError(self.LexicalInfo, "There is no yield statement in the coroutine")) return false return true #HACK: should be a simple way to restart the bind attribute step (?) public def SetCompilerContext(context as CompilerContext): _context = context class CoroutineTerminatedException(System.Exception): pass class CoroutineFutureNotReadyException(System.Exception): pass # # SPAWN # # FIXME: move into their own files blablabla... # # interface ISpawnable: def Execute() as bool static class CoroutineSchedulerManager: Coroutines: get: #FIXME: synchronized dictionary(?) return _coroutines _coroutines = List[of ISpawnable]() Slices: get: return _slices _slices = Dictionary[of ISpawnable, int]() Scheduler as ICoroutineScheduler: get: if _scheduler is null: _scheduler = DeadlineCoroutineScheduler() return _scheduler set: if _scheduler and _scheduler.IsRunning: raise InvalidOperationException("Cannot change the scheduler while it is running.") _scheduler = value _scheduler as ICoroutineScheduler = null interface ICoroutineScheduler: IsRunning as bool: get def JoinStart(): pass class DeadlineCoroutineScheduler(ICoroutineScheduler): _lock_IsRunning = object() IsRunning as bool: get: lock _lock_IsRunning: return _isRunning _isRunning = false def JoinStart(): lock _lock_IsRunning: if _isRunning: raise InvalidOperationException("Scheduler is already running.") _isRunning = true coroutines = CoroutineSchedulerManager.Coroutines #FIXME: sync copy slices = CoroutineSchedulerManager.Slices try: :runSlices toRemove as List[of ISpawnable] = null while 0 != coroutines.Count: for c in coroutines: s = slices[c] try: if 1 == s: c.Execute() else: for i in range(0, s): c.Execute() except e as CoroutineTerminatedException: if toRemove is null: toRemove = List[of ISpawnable]() toRemove.Add(c) except e as CoroutineFutureNotReadyException: pass if toRemove is not null: for c in toRemove: coroutines.Remove(c) goto runSlices ensure: lock _lock_IsRunning: _isRunning = false def GetSpawnable(spawnable as MethodInvocationExpression, nss as NameResolutionService, context as CompilerContext): if spawnable is null: raise ArgumentException("first argument must be a ISpawnable instance") spawnClass = nss.Resolve((spawnable.Target as ReferenceExpression).Name, EntityType.Type) as InternalClass if spawnClass is null: raise ArgumentException("spawn is supported on internal types only") ifaces = spawnClass.GetInterfaces() foundISpawnable = false for iface in ifaces: if "Coroutine.ISpawnable" == iface.FullName: foundISpawnable = true break if not foundISpawnable: raise ArgumentException("spawn first argument must implement ISpawnable") classDef = spawnClass.Node as ClassDefinition execute = classDef.Members["Execute"] as Method if not execute.ContainsAnnotation("boo.coroutine"): YieldInserter().Visit(execute) execute.ToCodeString() astAttr = CoroutineAttribute(Looping: BoolLiteralExpression(false)) astAttr.SetCompilerContext(context)#HACK: should restart the step somehow astAttr.Apply(execute) return spawnable internal class YieldFinder(DepthFirstVisitor): [property(Found)] _found = false override def OnYieldStatement(node as YieldStatement): _found = true internal class YieldInserter(DepthFirstVisitor): [property(Inserted)] _inserted = 0 override def LeaveBlock(node as Block): node.Add(YieldStatement()) # # spawn macro # Usage: spawn [ISpawnable_instance[, slices]] (slices=1 by default) # Use spawn with no argument to launch execution of your spawnables. # macro spawn: slices = 1 if 0 == len(spawn.Arguments): return [| block: CoroutineSchedulerManager.Scheduler.JoinStart() |].Block if 1 <= len(spawn.Arguments): spawnable = GetSpawnable(spawn.Arguments[0], NameResolutionService, Context) if 2 <= len(spawn.Arguments): if not spawn.Arguments[1] isa IntegerLiteralExpression: raise ArgumentException("second argument 'slices' must be an integer literal") slices = (spawn.Arguments[1] as IntegerLiteralExpression).Value if 3 <= len(spawn.Arguments): raise ArgumentException("Usage is: spawn [ISpawnable instance, [slices]]") return [| block: tmp = $spawnable CoroutineSchedulerManager.Coroutines.Add(tmp) CoroutineSchedulerManager.Slices.Add(tmp, $slices) |].Block nant-0.9.5~git20110729.r1.202a430/examples/attributes/coroutine/coroutine-0.boo000066400000000000000000000040041161462365500263170ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Apply [coroutine] to a method is the 1st step ??? is the 2nd step PROFIT is the 3rd step """ import System import Coroutine [coroutine] def Step(): Console.Write("Apply [coroutine] to a method") yield 1 Console.Write("???") yield 2 Console.Write("PROFIT") yield 3 print " is the ${Step()}st step" print " is the ${Step()}nd step" print " is the ${Step()}rd step" nant-0.9.5~git20110729.r1.202a430/examples/attributes/coroutine/coroutine-1.boo000066400000000000000000000041011161462365500263160ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Apply [coroutine] to a method is the 1st step ??? is the 2nd step PROFIT is the 3rd step Goodbye! """ import System import Coroutine [coroutine(Looping: false, Default: "Goodbye!")] def Step(): Console.Write("Apply [coroutine] to a method") yield 1 Console.Write("???") yield 2 Console.Write("PROFIT") yield 3 print " is the ${Step()}st step" print " is the ${Step()}nd step" print " is the ${Step()}rd step" print Step() nant-0.9.5~git20110729.r1.202a430/examples/attributes/coroutine/coroutine-2.boo000066400000000000000000000046471161462365500263360ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ 2007 is the past! Let's count up to three : 1 2 3 2008 is the future! Last year was 2007 """ import System import System.Threading import Coroutine [coroutine(Future: true)] def LongComputation(): yield 2007 Thread.Sleep(3.3s) #long computation yield 2008 print LongComputation()+" is the past!" #first call to future is synchronous Console.Write("Let's count up to three :") for i in range(1, 4): Thread.Sleep(1s) Console.Write(" "+i) Console.Write(Environment.NewLine) print LongComputation()+" is the future!" #this will return almost instantaneously (~.3s) #delay is the delta between the time we've been counting and the duration of computation in the future #if you add Timeout:0L in the coroutine arguments, you will get a CoroutineFutureNotReadyException print "Last year was "+LongComputation() nant-0.9.5~git20110729.r1.202a430/examples/attributes/coroutine/coroutine-3.boo000066400000000000000000000043541161462365500263320ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ 2007 is the past! Let's count up to three : 1 2 3 2008 is the future! OK! Terminated! """ import System import System.Threading import Coroutine [coroutine(Future: true, Looping: false)] def LongComputation(): yield 2007 Thread.Sleep(3.3s) #long computation yield 2008 print LongComputation()+" is the past!" #first call to future is synchronous Console.Write("Let's count up to three :") for i in range(1, 4): Thread.Sleep(1s) Console.Write(" "+i) Console.Write(Environment.NewLine) print LongComputation()+" is the future!" try: LongComputation() except e as CoroutineTerminatedException: print "OK! Terminated!" nant-0.9.5~git20110729.r1.202a430/examples/attributes/coroutine/coroutine-4.boo000066400000000000000000000043411161462365500263270ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ 2007 is the past! Let's count up to three : 1 2 3 The future is not yet ready! """ import System import System.Threading import Coroutine [coroutine(Future: true, Timeout: 100L)] def LongComputation(): yield 2007 Thread.Sleep(5s) #long computation yield 2008 print LongComputation()+" is the past!" #first call to future is synchronous Console.Write("Let's count up to three :") for i in range(1, 4): Thread.Sleep(1s) Console.Write(" "+i) Console.Write(Environment.NewLine) try: print LongComputation()+" is the future!" except e as CoroutineFutureNotReadyException: print "The future is not yet ready!" nant-0.9.5~git20110729.r1.202a430/examples/attributes/coroutine/coroutine-5.boo000066400000000000000000000047041161462365500263330ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ 2007 is the past! Let's count up to three : 1 2 3 2008 is the future! We are in 2008 """ import System import System.Threading import Coroutine [coroutine(Future: true, Looping: false, DefaultLastValue: true)] def LongComputation(): yield 2007 Thread.Sleep(3.3s) #long computation yield 2008 print LongComputation()+" is the past!" #first call to future is synchronous Console.Write("Let's count up to three :") for i in range(1, 4): Thread.Sleep(1s) Console.Write(" "+i) Console.Write(Environment.NewLine) print LongComputation()+" is the future!" #this will return almost instantaneously (~.3s) #it is the delta between the time we've been counting and the duration of computation in the future #if you add Timeout:0L in the coroutine arguments, you will get a CoroutineFutureNotReadyException print "We are in "+LongComputation() nant-0.9.5~git20110729.r1.202a430/examples/attributes/coroutine/spawn-0.boo000066400000000000000000000047061161462365500254510ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ id:0 / count:1 id:0 / count:2 << id0 light-thread will count twice faster than the others! id:1 / count:1 id:2 / count:1 ... id:10 / count:1 id:0 / count:3 id:0 / count:4 id:1 / count:2 id:2 / count:2 ... id:9 / count:10 id:10 / count:10 bye! """ import Coroutine class CountToTen(ISpawnable): [property(Id)] _id as int #look ma! no yield! #if you want control over the placement of yields, just apply [coroutine] as usual def Execute() as bool: for i in range(1, 11): print "id:${_id} / count:${i}" spawn CountToTen(Id:0), 2 #spawn the first coroutine with a double scheduler slice for i in range(1, 11): #we could spawn thousands of threads here without pain spawn CountToTen(Id:i) spawn #with no argument, it will execute the spawned coroutines and block until all #coroutines have terminated. print "bye!" nant-0.9.5~git20110729.r1.202a430/examples/attributes/coroutine/spawn-1.boo000066400000000000000000000070301161462365500254430ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """RANDOM SCHEDULING the coroutines will be executed in any order, but they will still run their sliceshare at each round. Output could be : id:2 / count:1 id:5 / count:1 id:6 / count:1 id:0 / count:1 id:0 / count:2 ... id:2 / count:10 bye! """ import System import System.Collections.Generic import Coroutine class CountToTen(ISpawnable): [property(Id)] _id as int #look ma! no yield! #if you want control over the placement of yields, just apply [coroutine] as usual def Execute() as bool: for i in range(1, 11): print "id:${_id} / count:${i}" class LotteryCoroutineScheduler(ICoroutineScheduler,IComparer of ISpawnable): _lock_IsRunning = object() IsRunning as bool: get: lock _lock_IsRunning: return _isRunning _isRunning = false _rand as Random def Compare(a as ISpawnable, b as ISpawnable) as int: return _rand.Next(-10, 10) def JoinStart(): lock _lock_IsRunning: if _isRunning: raise InvalidOperationException("Scheduler is already running.") _isRunning = true _rand = Random() coroutines = CoroutineSchedulerManager.Coroutines #FIXME: sync copy slices = CoroutineSchedulerManager.Slices try: :runSlices toRemove as List[of ISpawnable] = null while 0 != coroutines.Count: coroutines.Sort(self) for c in coroutines: s = slices[c] try: if 1 == s: c.Execute() else: for i in range(0, s): c.Execute() except e as CoroutineTerminatedException: if toRemove is null: toRemove = List[of ISpawnable]() toRemove.Add(c) except e as CoroutineFutureNotReadyException: pass if toRemove is not null: for c in toRemove: coroutines.Remove(c) goto runSlices ensure: lock _lock_IsRunning: _isRunning = false spawn CountToTen(Id:0), 2 for i in range(1, 11): spawn CountToTen(Id:i) CoroutineSchedulerManager.Scheduler = LotteryCoroutineScheduler() spawn print "bye!" nant-0.9.5~git20110729.r1.202a430/examples/duck-typing/000077500000000000000000000000001161462365500215155ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/duck-typing/XmlObject.boo000066400000000000000000000066111161462365500241110ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ This example shows how to use IQuackFu.QuackGet to provide transparent access to xml elements. """ import System import System.Xml from System.Xml import System.Reflection [DefaultMember("Item")] class XmlObject(IQuackFu): _element as XmlElement def constructor(element as XmlElement): _element = element def constructor(text as string): doc = XmlDocument() doc.LoadXml(text) _element = doc.DocumentElement def QuackInvoke(name as string, args as (object)) as object: if name == "op_Addition": doc as XmlDocument = _element.ParentNode.CloneNode(true) tmp = XmlObject(doc.DocumentElement) docFrag = doc.CreateDocumentFragment() docFrag.InnerXml = args[1] tmp._element.AppendChild(docFrag) return tmp else: raise System.InvalidOperationException("Method ${name} not found in class ${self.GetType()}") def QuackSet(name as string, parameters as (object), value) as object: pass def QuackGet(name as string, parameters as (object)) as object: if name == "": assert len(parameters) == 1 return GetAttribute(parameters[0]) elements = _element.SelectNodes(name) if elements is not null: return XmlObject(elements[0]) if elements.Count == 1 return XmlObject(e) for e as XmlElement in elements def GetAttribute(name as string): item = _element.Attributes.GetNamedItem(name) return item.InnerText if item override def ToString(): return _element.InnerText xml = """ John Cleese 1111-111-111 2222-222-222 """ person = XmlObject(xml) print person.FirstName print person.LastName person += "3333-333-333" for phone as XmlObject in person.Phone: print phone['place'], phone nant-0.9.5~git20110729.r1.202a430/examples/duck-typing/ie.boo000066400000000000000000000037501161462365500226200ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Threading def CreateInstance(progid): type = System.Type.GetTypeFromProgID(progid) return type() ie as duck = CreateInstance("InternetExplorer.Application") ie.Visible = true ie.Navigate2("http://www.go-mono.com/monologue/") Thread.Sleep(50ms) while ie.Busy document = ie.Document print("${document.title} is ${document.fileSize} bytes long.") nant-0.9.5~git20110729.r1.202a430/examples/duck-typing/multimethods.boo000077500000000000000000000051651161462365500247460ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion class GameObject: pass class Ship(GameObject): pass class Missile(GameObject): pass class CollisionHandler: """ Takes advantage of runtime dispatching on all arguments to handle collisions between concrete game objects. """ def handle(ship as Ship, missile as Missile): print "a ship collided with a missile" def handle(missile as Missile, ship as Ship): print "a missile collided with a ship" def handle(missile1 as Missile, missile2 as Missile): print "a missile collided with another missile" def handle(ship1 as Ship, ship2 as Ship): print "a ship collided with another ship" def gameLoop(): objects = [Ship(), Missile(), Missile(), Ship(), Missile()] random = System.Random() select = def: return objects[random.Next(len(objects))] # duck enables runtime dispatch handler as duck = CollisionHandler() while true: handler.handle(select(), select()) yield for _ in zip(range(10), gameLoop()): pass nant-0.9.5~git20110729.r1.202a430/examples/gac/000077500000000000000000000000001161462365500200115ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/gac/GacLibrary/000077500000000000000000000000001161462365500220305ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/gac/GacLibrary/GacLibrary.snk000066400000000000000000000011241161462365500245620ustar00rootroot00000000000000$RSA2ftfx!.*40XTSz`wS-7pAx Acpr*fU7d6v v8絀#"G'Bs%nc xcqІOo3Q;ޙٝ¿6= H[-%Y.&qDF$j,~`Թkaq=IerXN\c@<B0AibJNF+] nant-0.9.5~git20110729.r1.202a430/examples/gac/test/000077500000000000000000000000001161462365500207705ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/gac/test/test.boo000066400000000000000000000041371161462365500224550ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import GacLibrary from GacLibrary class LocalType: pass def test(typeName as string): yac = GacType() report(typeName, yac.Load(typeName)) def report(typeName, type): print "${typeName}: ${type}" test("GacLibrary.GacType") test("GacLibrary.GacType, GacLibrary") test("LocalType") test("LocalType, test") test("PrivateType") test("PrivateType, PrivateLibrary") report("LocalType", Type.GetType("LocalType")) report("LocalType, test", Type.GetType("LocalType, test")) nant-0.9.5~git20110729.r1.202a430/examples/gtk/000077500000000000000000000000001161462365500200445ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/gtk/gsv.boo000066400000000000000000000056731161462365500213570ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import Gtk from "gtk-sharp" import GtkSourceView from "gtksourceview-sharp" import Pango from "pango-sharp" Application.Init() booSourceLanguage = SourceLanguagesManager().GetLanguageFromMimeType("text/x-boo") buffer = SourceBuffer(booSourceLanguage, Highlight: true) sourceView = SourceView(buffer, ShowLineNumbers: true, AutoIndent: true, TabsWidth: 4) sourceView.ModifyFont(FontDescription(Family: "Lucida Console")) accelGroup = AccelGroup() menuBar = MenuBar() fileMenu = Menu() fileMenuOpen = ImageMenuItem(Stock.Open, accelGroup) fileMenuOpen.Activated += do: fs = FileSelection("Open file", SelectMultiple: false) fs.Complete("*.boo") try: if cast(int, ResponseType.Ok) == fs.Run(): selected, = fs.Selections using reader = System.IO.File.OpenText(selected): buffer.Text = reader.ReadToEnd() ensure: fs.Hide() fileMenu.Append(fileMenuOpen) menuBar.Append(MenuItem("_File", Submenu: fileMenu)) vbox = VBox(false, 2) vbox.PackStart(menuBar, false, false, 0) scrolledSourceView = ScrolledWindow() scrolledSourceView.Add(sourceView) vbox.PackStart(scrolledSourceView, true, true, 0) window = Window("Simple Boo Editor", DefaultWidth: 600, DefaultHeight: 400, DeleteEvent: Application.Quit) window.AddAccelGroup(accelGroup) window.Add(vbox) window.ShowAll() Application.Run() nant-0.9.5~git20110729.r1.202a430/examples/gtk/gsvshell.boo000066400000000000000000000067301161462365500224020ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import Gtk from "gtk-sharp" import GtkSourceView from "gtksourceview-sharp" import Gdk from "gdk-sharp" as Gdk import Pango from "pango-sharp" as Pango import Boo.Lang.Interpreter from Boo.Lang.Interpreter class PromptView(SourceView): _interpreter = InteractiveInterpreter(RememberLastValue: true, Print: print) def constructor(): super( SourceBuffer( SourceLanguagesManager().GetLanguageFromMimeType("text/x-boo"), Highlight: true)) self.WrapMode = Gtk.WrapMode.Word //if Environment.OSVersion.Platform in (PlatformID.Win32NT, PlatformID.Win32Windows): self.ModifyFont(Pango.FontDescription(Family: "Lucida Console")) _interpreter.References.Add(typeof(TextView).Assembly) _interpreter.References.Add(typeof(Gdk.Key).Assembly) _interpreter.SetValue("cls", { Buffer.Text = "" }) _interpreter.SetValue("view", self) prompt() override def OnKeyPressEvent(ev as Gdk.EventKey): if Gdk.Key.Return == ev.Key: try: EvalCurrentLine() except x: print(x) prompt() return true elif ev.Key in (Gdk.Key.BackSpace, Gdk.Key.Left): if Buffer.GetIterAtMark(Buffer.InsertMark).LineOffset < 5: return true return super(ev) def print(obj): Buffer.InsertAtCursor("${obj}\n") def prompt(): Buffer.MoveMark(Buffer.InsertMark, Buffer.EndIter) Buffer.InsertAtCursor(">>> ") ScrollMarkOnscreen(Buffer.InsertMark) def EvalCurrentLine(): start = Buffer.GetIterAtLine(Buffer.LineCount) line = Buffer.GetText(start, Buffer.EndIter, false) print("") _interpreter.LoopEval(line[4:]) class MainWindow(Window): def constructor(): super("booish") window = ScrolledWindow() window.Add(PromptView()) self.Add(window) self.DeleteEvent += Application.Quit Application.Init() window = MainWindow(DefaultWidth: 400, DefaultHeight: 250) window.ShowAll() Application.Run() nant-0.9.5~git20110729.r1.202a430/examples/gtk/gtk.boo000066400000000000000000000036201161462365500213330ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import Gtk from "gtk-sharp" Application.Init() window = Window("Button Tester", DefaultWidth: 200, DefaultHeight: 150, DeleteEvent: Application.Quit) window.Add(Button("Click Me!", Clicked: { print("button clicked!") })) window.ShowAll() Application.Run() nant-0.9.5~git20110729.r1.202a430/examples/gtk/gtkshell.boo000066400000000000000000000064651161462365500223750ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import Gtk from "gtk-sharp" import Gdk from "gdk-sharp" as Gdk import Pango from "pango-sharp" as Pango import Boo.Lang.Interpreter from Boo.Lang.Interpreter class PromptView(TextView): _interpreter = InteractiveInterpreter(RememberLastValue: true, Print: print) def constructor(): self.WrapMode = Gtk.WrapMode.Word //if Environment.OSVersion.Platform in (PlatformID.Win32NT, PlatformID.Win32Windows): self.ModifyFont(Pango.FontDescription(Family: "Lucida Console")) _interpreter.References.Add(typeof(TextView).Assembly) _interpreter.References.Add(typeof(Gdk.Key).Assembly) _interpreter.SetValue("cls", { Buffer.Text = "" }) _interpreter.SetValue("view", self) prompt() override def OnKeyPressEvent(ev as Gdk.EventKey): if Gdk.Key.Return == ev.Key: try: EvalCurrentLine() except x: print(x) prompt() return true elif ev.Key in (Gdk.Key.BackSpace, Gdk.Key.Left): if Buffer.GetIterAtMark(Buffer.InsertMark).LineOffset < 5: return true return super(ev) def print(obj): Buffer.InsertAtCursor("${obj}\n") def prompt(): Buffer.MoveMark(Buffer.InsertMark, Buffer.EndIter) Buffer.InsertAtCursor(">>> ") ScrollMarkOnscreen(Buffer.InsertMark) def EvalCurrentLine(): start = Buffer.GetIterAtLine(Buffer.LineCount) line = Buffer.GetText(start, Buffer.EndIter, false) print("") _interpreter.LoopEval(line[4:]) class MainWindow(Window): def constructor(): super("booish") window = ScrolledWindow() window.Add(PromptView()) self.Add(window) self.DeleteEvent += Application.Quit Application.Init() window = MainWindow(DefaultWidth: 400, DefaultHeight: 250) window.ShowAll() Application.Run() nant-0.9.5~git20110729.r1.202a430/examples/macros/000077500000000000000000000000001161462365500205435ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/macros/PerformTransaction/000077500000000000000000000000001161462365500243635ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/macros/PerformTransaction/MacroUsage.boo000066400000000000000000000033041161462365500271120ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion performTransaction connection: connection.Execute(cmd1) connection.Execute(cmd2) nant-0.9.5~git20110729.r1.202a430/examples/macros/PerformTransaction/PerformTransactionMacro.boo000066400000000000000000000042111161462365500316640ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion macro performTransaction(connection as Boo.Lang.Compiler.Ast.Expression): """ performTransaction connection: connection.Execute(cmd1) connection.Execute(cmd2) transaction=connection.BeginTransaction() try: // transaction logic transaction.Commit() except: transaction.Revert() raise ensure: transaction.End() """ yield [| transaction = $connection.BeginTransaction() |] yield [| try: $(performTransaction.Body) transaction.Commit() except: transaction.Revert() raise ensure: transaction.End() |] nant-0.9.5~git20110729.r1.202a430/examples/macros/PerformTransaction/default.build000066400000000000000000000012101161462365500270220ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/macros/With/000077500000000000000000000000001161462365500214565ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/macros/With/MacroUsage.boo000066400000000000000000000032751161462365500242140ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion with fooInstanceWithReallyLongName: _f1 = 100 _f2 = "abc" _DoSomething() nant-0.9.5~git20110729.r1.202a430/examples/macros/With/WithMacro.boo000066400000000000000000000053501161462365500240570ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Ast.Visitors class WithMacro(AbstractAstMacro): private class NameExpander(DepthFirstTransformer): _inst as ReferenceExpression def constructor(inst as ReferenceExpression): _inst = inst override def OnReferenceExpression(node as ReferenceExpression): // if the name of the reference begins with '_' // then convert the reference to a member reference // of the provided instance if node.Name.StartsWith('_'): // create the new member reference and set it up mre = MemberReferenceExpression(node.LexicalInfo) mre.Name = node.Name[1:] mre.Target = _inst.CloneNode() // replace the original reference in the AST // with the new member-reference ReplaceCurrentNode(mre) override def Expand(macro as MacroStatement) as Statement: assert 1 == macro.Arguments.Count assert macro.Arguments[0] isa ReferenceExpression inst = macro.Arguments[0] as ReferenceExpression // convert all _ to inst. block = macro.Body ne = NameExpander(inst) ne.Visit(block) return block nant-0.9.5~git20110729.r1.202a430/examples/macros/With/default.build000066400000000000000000000011621161462365500241230ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/macros/alias/000077500000000000000000000000001161462365500216345ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/macros/alias/AliasMacro.boo000077500000000000000000000046651161462365500243660ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Ast.Visitors class AliasMacro(AbstractAstMacro): static final Usage = "Usage: alias as " override def Expand(macro as MacroStatement): if not CheckUsage(macro): Errors.Add( CompilerErrorFactory.CustomError(macro.LexicalInfo, Usage)) return null argument as TryCastExpression = macro.Arguments[0] reference = ReferenceExpression(Name: argument.Type.ToString()) macro.ParentNode.ReplaceNodes(reference, argument.Target) def CheckUsage(macro as MacroStatement): if len(macro.Body.Statements) > 0: return false if len(macro.Arguments) != 1: return false expression = macro.Arguments[0] as TryCastExpression if expression is null: return false return expression.Type isa SimpleTypeReference nant-0.9.5~git20110729.r1.202a430/examples/macros/alias/MacroUsage.boo000077500000000000000000000034751161462365500243770ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion def foo(ref paramWithReallyLongName as int, j): alias paramWithReallyLongName as i i = 3*i if false: alias i as j print j print j # won't be replaced i = 4 foo(i, "bar") print i nant-0.9.5~git20110729.r1.202a430/examples/macros/alias/default.build000077500000000000000000000011671161462365500243110ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/macros/ifdef.boo000066400000000000000000000042061161462365500223230ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Will show the text below only if compiled with booc -d:BOO woohoo BOO is defined okthxbye """ import System import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast macro ifdef: if not ifdef.Arguments[0] isa StringLiteralExpression: raise ArgumentException("ifdef argument must be a string literal.") if Context.Parameters.Defines.ContainsKey((ifdef.Arguments[0] as StringLiteralExpression).Value): return [| $(ifdef.Block) |] ifdef "BOO": print "woohoo BOO is defined" print "okthxbye" nant-0.9.5~git20110729.r1.202a430/examples/macros/trace/000077500000000000000000000000001161462365500216415ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/macros/trace/TraceService.boo000066400000000000000000000050531161462365500247240ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Description: TraceService class is the compiler service used by `trace` macro. Read `trace.boo` for more information. """ import System import System.IO import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast class TraceService (IDisposable): context as CompilerContext locations = List[of string]() def constructor(context as CompilerContext): .context = context def AddLocation(lexicalInfo as LexicalInfo, method as Method): locations.Add("${lexicalInfo.ToString()} : ${method.ToString()}") def Dispose(): if len(context.Errors): return #do not write file if there was an error during compilation output = context.Parameters.OutputAssembly return unless output #do not write file if there no output assembly (eg. compilation in memory) print "NOTICE: writing trace locations to file `${output}.traces`" using writer = StreamWriter(output+".traces"): for location in locations: writer.WriteLine(location) nant-0.9.5~git20110729.r1.202a430/examples/macros/trace/trace.boo000066400000000000000000000060311161462365500234400ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Description: This examples shows how to register and use a lightweight compiler service that maintains a list of locations of `trace` macro usages and write that list to a file _only_ when compilation succeeds. First you need to compile the library containing the service with: `booc -t:library TraceService.boo` Then compile with `booc -r:TraceService.dll -o:trace.exe trace.boo`. A file `trace.exe.traces` will be created with this content: trace.boo(37,9) : Example.Foo trace.boo(40,9) : Example.Bar Exercise: Modify the macro (and/or TraceService) to allow at most one `trace` by method and return a compiler error otherwise. (hints: TraceServices.locations and CompilerContext.Errors) """ import Boo.Lang.Compiler.Ast import Boo.Lang.PatternMatching macro trace(message as string): try: service = Context.GetService[of TraceService]() except as System.ArgumentException: Context.RegisterService[of TraceService](service = TraceService(Context)) service.AddLocation(trace.LexicalInfo, trace.GetAncestor[of Method]()) yield [| System.Diagnostics.Trace.WriteLine($message) |] static class Example: def Foo(): trace "entered foo..." def Bar(): trace "entered bar..." #NB: if you execute this example you won't see anything displayed unless you #have compiled with -d:TRACE and add a trace listener. #http://msdn.microsoft.com/en-us/library/system.diagnostics.trace.aspx Example.Foo() Example.Bar() nant-0.9.5~git20110729.r1.202a430/examples/meta-programming/000077500000000000000000000000001161462365500225255ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/meta-programming/PetDSL.boo000077500000000000000000000044211161462365500243250ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Compiler.MetaProgramming dsl = [| namespace PetDSL import Compiler.MetaProgramming import Compiler.Ast class Pet: public name = "I need a name" [meta] def onCreate(reference as ReferenceExpression, block as BlockExpression): return [| $reference = $(pascalCase(reference))() $(block.Body) |] def pascalCase(r as ReferenceExpression): return ReferenceExpression(Name: pascalCase(r.Name)) def pascalCase(s as string): return s[:1].ToUpper() + s[1:] |] app = [| import PetDSL onCreate pet: print pet.name |] asm = compile(app, compile(dsl)) asm.EntryPoint.Invoke(null, (null,)) nant-0.9.5~git20110729.r1.202a430/examples/misc/000077500000000000000000000000001161462365500202125ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/misc/FormatterServices.boo000066400000000000000000000054621161462365500243710ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.IO import System.Runtime.Serialization import System.Runtime.Serialization.Formatters.Binary class Person: [property(FirstName)] _fname = "" [property(LastName)] _lname = "" class PersonProxy(Person, ISerializable): transient _caboosh = 0 def constructor(): pass def constructor(info as SerializationInfo, context as StreamingContext): data = info.GetValue("Person.Data", typeof((object))) FormatterServices.PopulateObjectMembers(self, GetSerializableMembers(), data) def GetObjectData(info as SerializationInfo, context as StreamingContext): members = GetSerializableMembers() info.AddValue("Person.Data", FormatterServices.GetObjectData(self, members)) def GetSerializableMembers(): return FormatterServices.GetSerializableMembers(Person) def serialize(o): stream = MemoryStream() BinaryFormatter().Serialize(stream, o) return stream.GetBuffer() def deserialize(buffer as (byte)): return BinaryFormatter().Deserialize(MemoryStream(buffer)) p = PersonProxy(FirstName: "John", LastName: "Cleese") p = deserialize(serialize(p)) assert "John" == p.FirstName assert "Cleese" == p.LastName nant-0.9.5~git20110729.r1.202a430/examples/misc/GetOptions.boo000077500000000000000000000047111161462365500230140ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Mono.GetOptions from Mono.GetOptions class CommandLineOptions(Options): def constructor(argv): ProcessArgs(argv) [Option("thumbnail width", "width")] public Width = 70 [Option("thumbnail height", "height")] public Height = 70 [Option("output file", "output")] public OutputFileName = "" [Option("input file", "input")] public InputFileName = "" [Option("encoding quality level (1-100), default is 75", "encoding-quality")] public EncodingQuality = 75L IsValid as bool: get: return (0 == len(RemainingArguments) and len(OutputFileName) > 0 and len(InputFileName) > 0 and Width > 0 and Height > 0) options = CommandLineOptions(argv) if options.IsValid: for field in typeof(CommandLineOptions).GetFields(): print("${field.Name}: ${field.GetValue(options)}") else: options.DoHelp() nant-0.9.5~git20110729.r1.202a430/examples/misc/ObjectConstruction.boo000066400000000000000000000037341161462365500245430ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System class Button: [property(Text)] _text = "" public Width = 0 event Click as EventHandler def RaiseClick(): Click(self, EventArgs.Empty) # you can set property and field values and even # bind to events when constructing an object b = Button(Text: "Click me", Width: 10, Click: { print("clicked!") }) print(b.Text) print(b.Width) b.RaiseClick() nant-0.9.5~git20110729.r1.202a430/examples/misc/arrayperformance.boo000066400000000000000000000035671161462365500242660ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion def test(): items = 2000000 a = array(object, range(items)) collect = [] start = date.Now for i in range(items): collect.Add(a[i]) print("${(date.Now-start).TotalMilliseconds} elapsed.") test() test() test() nant-0.9.5~git20110729.r1.202a430/examples/misc/arrayperformance.cs000066400000000000000000000042311161462365500241010ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion using System; using Boo.Lang; public class App { public static void Main() { Test(); Test(); Test(); } private static void Test() { const int items = 2000000; object[] array = (object[])new List(Builtins.range(items)).ToArray(typeof(object)); List collect = new List(); DateTime start = DateTime.Now; foreach (int i in Builtins.range(items)) { collect.Add(array[i]); } TimeSpan elapsed = DateTime.Now.Subtract(start); Console.WriteLine("{0} elapsed.", elapsed.TotalMilliseconds); } } nant-0.9.5~git20110729.r1.202a430/examples/misc/arrayperformance.java000066400000000000000000000036461161462365500244260ustar00rootroot00000000000000//#region license // boo - an extensible programming language for the CLI // Copyright (C) 2004 Rodrigo Barreto de Oliveira // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA // // Contact Information // // mailto:rbo@acm.org //#endregion import java.util.ArrayList; import java.util.Iterator; class xrange implements Iterator { int _max; int _current; public xrange(int max) { _max = max; _current = 0; } public boolean hasNext() { return _current < _max; } public Object next() { return new Integer(_current++); } public void remove() { } } public class arrayperformance { public static final int items = 2000000; public static void main(String[] args) { test(); test(); test(); } private static void test() { Object[] array = new Object[items]; for (int i = 0; i < array.length; i++) { array[i] = new Object(); } ArrayList collect = new ArrayList(); Iterator i = new xrange(items); long start = System.currentTimeMillis(); while (i.hasNext()) { int index = ((Integer)i.next()).intValue(); collect.add(array[index]); } System.out.println((System.currentTimeMillis() - start) + " elapsed."); } } nant-0.9.5~git20110729.r1.202a430/examples/misc/arrayperformance.py000066400000000000000000000004221161462365500241220ustar00rootroot00000000000000from time import time def test(): items = 2000000 array = tuple(range(items)) collect = [] start = time() for i in xrange(items): collect.append(array[i]) elapsed = time() - start print elapsed*1000, " elapsed." test() test() test() nant-0.9.5~git20110729.r1.202a430/examples/misc/ast-to-string.boo000066400000000000000000000034571161462365500234370ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import System.IO mie=MethodInvocationExpression(ReferenceExpression("print")) mie.Arguments.Add(StringLiteralExpression("Hello!")) print mie nant-0.9.5~git20110729.r1.202a430/examples/misc/ast-to-xml.boo000066400000000000000000000060711161462365500227240ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Like booi except it only spits out the XML representation of the AST. booi.exe examples\ast-to-xml.boo file1.boo file2.boo To show the AST after the first parsing step only use the -parse option: booi.exe examples\ast-to-xml.boo -parse file1.boo file2.boo If you want to save the output to a file: booi.exe examples\ast-to-xml.boo file1.boo file2.boo > output.xml """ import System import System.IO import System.Xml.Serialization from System.Xml import Boo.Lang.Compiler from Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Pipelines def PrintAST([required]result as CompilerContext, [required]out as TextWriter): astobject = result.CompileUnit try: s = XmlSerializer( astobject.GetType() ) except e: print e.Message return try: s.Serialize( out, astobject ) except e: print "\n", e.ToString() compiler = BooCompiler() files as (string) if argv[0] == "-parse": compiler.Parameters.Pipeline = Parse() files = argv[1:] else: compiler.Parameters.Pipeline = Compile() files = argv[:] if len(files) == 0: print "please specify at least one boo file as a parameter" return for filename in files: compiler.Parameters.Input.Add(FileInput(filename)) try: result = compiler.Run() if len(result.Errors) > 0: print "There were errors compiling the boo file(s)" else: PrintAST(result, Console.Out) except e: print e.GetType(), ":", e.Message nant-0.9.5~git20110729.r1.202a430/examples/misc/ast.boo000066400000000000000000000050371161462365500215070ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Ast.Visitors import System def print(node as Node): BooPrinterVisitor(Console.Out).Switch(node) def CreateNotExpression(e as Expression): return UnaryExpression(Operand: e, Operator: UnaryOperatorType.Not) e = ExpressionStatement( Expression: be = BinaryExpression(BinaryOperatorType.Assign, ReferenceExpression("a"), IntegerLiteralExpression(3) ) ) print(e) be.ParentNode.Replace(be, MethodInvocationExpression(Target: ReferenceExpression("a"))) print(e) i = IfStatement(Condition: be = BinaryExpression(BinaryOperatorType.NotMatch, StringLiteralExpression("foo"), StringLiteralExpression("bar"))) i.TrueBlock = Block() //be.ReplaceBy(CreateNotExpression(be)) //i.Expression = CreateNotExpression(be) i.Replace(be, CreateNotExpression(be)) be.Operator = BinaryOperatorType.Match print(i) nant-0.9.5~git20110729.r1.202a430/examples/misc/ast2.boo000066400000000000000000000041401161462365500215630ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import Boo.Lang.Ast import Boo.Lang.Ast.Visitors def printNode(node as Node): BooPrinterVisitor(Console.Out).Switch(node) be = BinaryExpression(BinaryOperatorType.Assign, ReferenceExpression("foo"), ReferenceExpression("bar")) clone as BinaryExpression = be.Clone() print(be.Left.ParentNode is be) print(be.Right.ParentNode is be) print(clone.Left.ParentNode is clone) print(clone.Right.ParentNode is clone) printNode(be) printNode(clone) nant-0.9.5~git20110729.r1.202a430/examples/misc/async.boo000066400000000000000000000035031161462365500220310ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.Threading def run(): print("executing") print("started") result = run.BeginInvoke({ print("called back") }) Thread.Sleep(50ms) run.EndInvoke(result) print("done") nant-0.9.5~git20110729.r1.202a430/examples/misc/buttonclick1.boo000066400000000000000000000035361161462365500233240ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Windows.Forms from System.Windows.Forms f = Form(Text: "Hello, Boo!") f.Controls.Add(Button(Text: "Click Me!", Dock: DockStyle.Fill, Click: { print("clicked!") })) Application.Run(f) nant-0.9.5~git20110729.r1.202a430/examples/misc/changelog.boo000066400000000000000000000051461161462365500226500ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ svn change log printer utility. changelog.boo [FROM-REVISION [TO-REVISION]] Example: changelog.boo PREV HEAD changelog.boo 123 changelog.boo """ import System.Xml from System.Xml class LogEntry: Author as string Date as date Message as string def constructor(element as XmlElement): Author = element.SelectSingleNode("author/text()").Value Date = date.Parse(element.SelectSingleNode("date/text()").Value) Message = element.SelectSingleNode("msg/text()").Value override def ToString(): return "${Date} - ${Author}\n${Message}" static def Load(fromRevision, toRevision): doc = XmlDocument() doc.LoadXml(shell("svn", "log --xml -v -r ${fromRevision}:${toRevision}")) return array(LogEntry(e) for e in doc.SelectNodes("//logentry")) if len(argv) > 1: fromRevision, toRevision = argv elif len(argv) > 0: fromRevision, = argv toRevision = "HEAD" else: fromRevision = toRevision = "HEAD" entries = LogEntry.Load(fromRevision, toRevision) print join(entries, "\n") nant-0.9.5~git20110729.r1.202a430/examples/misc/client.boo000066400000000000000000000037061161462365500221770ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.IO import System.Net import System.Net.Sockets s = Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp) s.Connect(IPEndPoint(IPAddress.Parse("127.0.0.1"), 8080)) using stream=NetworkStream(s, true): writer=StreamWriter(stream) writer.WriteLine("ping!") writer.Flush() print StreamReader(stream).ReadLine() nant-0.9.5~git20110729.r1.202a430/examples/misc/closure-serialization.boo000066400000000000000000000042421161462365500252440ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import NUnit.Framework from nunit.framework import System.IO import System.Runtime.Serialization.Formatters.Binary def save(o): stream=MemoryStream() BinaryFormatter().Serialize(stream, o) return stream.GetBuffer() def load(data as (byte)): return BinaryFormatter().Deserialize(MemoryStream(data)) def make_counter(value as int): return { return ++value } c1 = make_counter(10) c2 = make_counter(20) Assert.AreEqual(11, c1()) Assert.AreEqual(21, c2()) saved = save(c1) Assert.AreEqual(12, c1()) # restore c1 = load(saved) Assert.AreEqual(12, c1()) nant-0.9.5~git20110729.r1.202a430/examples/misc/coroutines.boo000077500000000000000000000041271161462365500231140ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion class XPer: def Test(): while true: print "testing" yield def Code(): while true: print "coding" yield def Refactor(): while true: print "refactoring" yield def Iterate(times as int): assert times > 0 test = Test().GetEnumerator() code = Code().GetEnumerator() refactor = Refactor().GetEnumerator() while times > 0: test.MoveNext() code.MoveNext() refactor.MoveNext() test.MoveNext() --times XPer().Iterate(5) nant-0.9.5~git20110729.r1.202a430/examples/misc/countries.boo000077500000000000000000000046621161462365500227410ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.Xml.Serialization from System.Xml import System.IO class Country: public Name as string public Hex as string override def ToString(): return "${Name} - ${Hex}" # an array of Country objects just to show how XmlSerializer # handles it countries = ( Country(Name: "Foo", Hex: "0x00"), Country(Name: "Bar", Hex: "0x01")) # boo array types are defined with (ElementType) serializer = XmlSerializer(typeof((Country)), XmlRootAttribute("Countries")) serializer.Serialize(Console.Out, countries) # now let's read the file using stream=File.OpenRead("countries.xml"): countries = serializer.Deserialize(stream) print("${len(countries)} countries deserialized.") # print first 3 countries just because we can :-) print(join(countries[:3], "\n")) nant-0.9.5~git20110729.r1.202a430/examples/misc/countries.xml000077500000000000000000000007311161462365500227530ustar00rootroot00000000000000 Unknown 0x007F af 0x0036 af-ZA 0x0436 sq 0x001C sq-AL 0x041C vi-VN 0x042A nant-0.9.5~git20110729.r1.202a430/examples/misc/customcollection.boo000066400000000000000000000046141161462365500243060ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.Reflection import System.Collections class Foo: [getter(Name)] _name as string def constructor(name as string): _name = name [DefaultMember("Item")] [EnumeratorItemType(Foo)] class FooCollection(ICollection): _items = [] SyncRoot: get: return _items.SyncRoot Count: get: return len(_items) IsSynchronized: get: return _items.IsSynchronized def CopyTo(target as Array, index as int): _items.CopyTo(target, index) def GetEnumerator(): return _items.GetEnumerator() def Add([required] item as Foo): _items.Add(item) Item(index as int) as Foo: get: return _items[index] c = FooCollection() c.Add(Foo("Homer")) c.Add(Foo("Eric")) for foo in c: print(foo.Name) nant-0.9.5~git20110729.r1.202a430/examples/misc/download.boo000066400000000000000000000035241161462365500225260ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.Net import System.Threading url, local = argv client = WebClient() call = client.DownloadFile.BeginInvoke(url, local) while not call.IsCompleted: Console.Write(".") Thread.Sleep(50ms) Console.WriteLine() nant-0.9.5~git20110729.r1.202a430/examples/misc/dsl-1.boo000077500000000000000000000036401161462365500216410ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion callable Block() def blockTag(tagName as string, block as Block): print "<${tagName}>" block() print "" def html(block as Block): blockTag "html", block def body(block as Block): blockTag "body", block def text(s as string): print s html: body: text "Hello, world!" nant-0.9.5~git20110729.r1.202a430/examples/misc/dump.boo000066400000000000000000000037641161462365500216720ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Lists the string representation of every object serialized to a file. """ import System import System.Console import System.IO import System.Runtime.Serialization.Formatters.Binary _, fname = Environment.GetCommandLineArgs() formatter = BinaryFormatter() using stream = File.OpenRead(fname): WriteLine(formatter.Deserialize(stream)) while stream.Position < stream.Length nant-0.9.5~git20110729.r1.202a430/examples/misc/fibonacci.boo000066400000000000000000000034521161462365500226340ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion def fibonacci(): a, b = 0, 1 while true: yield b a, b = b, a+b for index as int, element in zip(range(5), fibonacci()): print("${index+1}: ${element}") nant-0.9.5~git20110729.r1.202a430/examples/misc/firstline.boo000066400000000000000000000033041161462365500227120ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion using reader=File.OpenText(fname): print(reader.ReadLine()) nant-0.9.5~git20110729.r1.202a430/examples/misc/functor.boo000066400000000000000000000035751161462365500224050ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion class Functor(ICallable): def Call(args as (object)) as object: print("called with: " + join(args, ", ")) def each(items, function as callable): for item in items: function(item) items = List(range(5)) each(items, Functor()) each(items, { item | print(item) })nant-0.9.5~git20110729.r1.202a430/examples/misc/grep.boo000066400000000000000000000037311161462365500216540ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ grep example: grep *.cs Boo.IO """ import System.IO // Directory def ScanFile(fname as string, pattern as string): for index, line as string in enumerate(File.OpenText(fname)): print("${fname}(${index}): ${line}") if line =~ pattern glob, pattern = argv for fname in Directory.GetFiles(".", glob): ScanFile(fname, pattern) nant-0.9.5~git20110729.r1.202a430/examples/misc/helloforms.boo000066400000000000000000000033311161462365500230650ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Windows.Forms from System.Windows.Forms f = Form(Text: "Hello, Boo!") Application.Run(f) nant-0.9.5~git20110729.r1.202a430/examples/misc/helloforms2.boo000066400000000000000000000033501161462365500231500ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Windows.Forms from System.Windows.Forms as SWF f = SWF.Form(Text: "Hello, Boo!") SWF.Application.Run(f) nant-0.9.5~git20110729.r1.202a430/examples/misc/helloforms3.boo000066400000000000000000000034251161462365500231540ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Windows.Forms from System.Windows.Forms f = Form(Text: "Hello!") f.Controls.Add(Button(Text: "Click Me!", Dock: DockStyle.Fill)) Application.Run(f) nant-0.9.5~git20110729.r1.202a430/examples/misc/lines.boo000066400000000000000000000034171161462365500220320ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.IO fname, = argv using stream = File.OpenText(fname): for index, line in enumerate(stream): print "$index: $line" nant-0.9.5~git20110729.r1.202a430/examples/misc/listoperations.boo000066400000000000000000000036541161462365500240020ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion start = date.Now l = [] for i in range(500000): l.Add(i) print("Total time: ${date.Now-start}") start = date.Now for i in [100, -100]*1000: l.RemoveAt(i) print("Total time: ${date.Now-start}") start = date.Now for i as int in range(len(l)): l[i] = l[-i] print("Total time: ${date.Now-start}") nant-0.9.5~git20110729.r1.202a430/examples/misc/lscope.boo000066400000000000000000000032661161462365500222070ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion if "y" == prompt("Enter scope? (y/n) "): message = "ok." print(message) nant-0.9.5~git20110729.r1.202a430/examples/misc/now.boo000066400000000000000000000032251161462365500215200ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion print date.Now nant-0.9.5~git20110729.r1.202a430/examples/misc/propertygrid.boo000066400000000000000000000042151161462365500234470ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Windows.Forms from System.Windows.Forms import System.Drawing from System.Drawing class PropertyEditor(Form): def constructor([required] obj): grid = PropertyGrid(Dock: DockStyle.Fill, SelectedObject: obj) Controls.Add(grid) class Options: [property(Message)] _message as string [property(Font)] _font as System.Drawing.Font options = Options(Message: "Hello!", Font: Font("Lucida Console", 12.0)) editor = PropertyEditor(options) editor.ShowDialog() print(options.Message) nant-0.9.5~git20110729.r1.202a430/examples/misc/replace.boo000066400000000000000000000047741161462365500223420ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Regular expression replacement tool - replaces every occurrence of a regular expression in a group of files. Usage: replace.boo Examples: replace.boo *.cs "AssemblyVersion(.*?)" "AssemblyVersion(1.2.3.4)" """ import System import System.Text.RegularExpressions import System.IO def Replace(folder as string, glob as string, expression as Regex, replacement as string): for fname in Directory.GetFiles(folder, glob): contents = File.ReadAllText(fname) newContents = expression.Replace(contents, replacement) if newContents != contents: print(fname) File.WriteAllText(fname, newContents) for subFolder in Directory.GetDirectories(folder): Replace(subFolder, glob, expression, replacement) glob, expression, replacement = argv Replace(".", glob, Regex(expression), replacement) nant-0.9.5~git20110729.r1.202a430/examples/misc/rgrep.boo000066400000000000000000000044201161462365500220320ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Recursive grep tool. rgrep example: grep *.cs Boo.IO """ import System // Environment import System.IO // Directory def ScanFile(fname as string, pattern as string): using stream = File.OpenText(fname): for index, line as string in enumerate(stream): print("${fname}(${index}): ${line.Trim()}") if line =~ pattern def ScanDirectory(path as string, glob as string, pattern as string): for fname in Directory.GetFiles(path, glob): ScanFile(fname, pattern) for dir in Directory.GetDirectories(path): ScanDirectory(dir, glob, pattern) glob, pattern = argv ScanDirectory(".", glob, pattern) nant-0.9.5~git20110729.r1.202a430/examples/misc/run-ast-without-compiler.boo000066400000000000000000000040471161462365500256220ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast // print('Hello!') mie = MethodInvocationExpression(ReferenceExpression("print")) mie.Arguments.Add(StringLiteralExpression("Hello!")) // statements and expressions must be inside a code block module = Module() module.Globals.Add(mie) // modules must be inside a CompileUnit cunit = CompileUnit() cunit.Modules.Add(module) pipeline = Pipelines.Run() pipeline.Run(CompilerContext(cunit)) nant-0.9.5~git20110729.r1.202a430/examples/misc/run-ast.boo000066400000000000000000000043151161462365500223070ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Lang.Compiler import Boo.Lang.Compiler.Steps import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Ast class CodeGeneratorStep(AbstractCompilerStep): override def Run(): // print('Hello!') mie = MethodInvocationExpression(ReferenceExpression("print")) mie.Arguments.Add(StringLiteralExpression("Hello!")) module = Module() module.Globals.Add(mie) CompileUnit.Modules.Add(module) compiler = BooCompiler() compiler.Parameters.Pipeline = Run() compiler.Parameters.Pipeline.Insert(0, CodeGeneratorStep()) result = compiler.Run() for error in result.Errors: print(error) nant-0.9.5~git20110729.r1.202a430/examples/misc/run.boo000066400000000000000000000037761161462365500215340ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Lang.Compiler import Boo.Lang.Compiler.IO def run(pipelineName as string, code): compiler = BooCompiler() compiler.Parameters.Input.Add(StringInput("", code)) compiler.Parameters.Pipeline = BooCompiler.GetStandardPipeline(pipelineName) result = compiler.Run() print(join(result.Errors, "\n")) if len(result.Errors) code = "print('Hello!')" run("boo", code) run("booi", code) nant-0.9.5~git20110729.r1.202a430/examples/misc/selectElements.boo000066400000000000000000000041561161462365500236750ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Xml from System.Xml def selectElements(element as XmlElement, tagName as string): for node in element.ChildNodes: element = node as XmlElement if element is not null and tagName == element.Name: yield element xml = """ """ document = XmlDocument() document.LoadXml(xml) for element in selectElements(document.DocumentElement, "foo"): print(element.GetAttribute("value")) nant-0.9.5~git20110729.r1.202a430/examples/misc/serialize.boo000066400000000000000000000037301161462365500227050ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.IO import System.Runtime.Serialization.Formatters.Binary class Person: _name as string def constructor(name): _name = name override def ToString(): return _name def serialize(fname, obj): using stream=File.OpenWrite(fname): BinaryFormatter().Serialize(stream, obj) serialize("\\temp\\p.dat", Person("Homer Simpson")) nant-0.9.5~git20110729.r1.202a430/examples/misc/serializearray.boo000066400000000000000000000037211161462365500237440ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.IO import System.Runtime.Serialization.Formatters.Binary def serialize(obj): stream = MemoryStream() BinaryFormatter().Serialize(stream, obj) return stream.ToArray() def deserialize(buffer as (byte)): return BinaryFormatter().Deserialize(MemoryStream(buffer)) arr = (1, 2, 3,) arr = deserialize(serialize(arr)) print(join(arr)) nant-0.9.5~git20110729.r1.202a430/examples/misc/server.boo000066400000000000000000000040231161462365500222200ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.IO import System.Net import System.Net.Sockets server = Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp) server.Bind(IPEndPoint(IPAddress.Any, 8080)) server.Listen(1) while true: socket = server.Accept() using stream=NetworkStream(socket, true): print(StreamReader(stream).ReadLine()) writer=StreamWriter(stream) writer.WriteLine("pong!") writer.Flush() nant-0.9.5~git20110729.r1.202a430/examples/misc/showcompilersteps.boo000066400000000000000000000304231161462365500245070ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion // By Doug Holton. More stuff added by David Piepgrass. // For help, please see the help string below. import System import System.IO import System.Xml.Serialization from System.Xml import Boo.Lang.Compiler from Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Ast.Visitors import Boo.Lang.Compiler.TypeSystem import Boo.Lang.Compiler.Steps import System.Reflection [System.Runtime.CompilerServices.CompilerGlobalScopeAttribute] class Globals: static help = """ This script visits the AST structure after each step in the compile process and converts it to XML or back to boo syntax. If there are visible differences since the previous step, it saves the output to a file in a folder named after the command-line arguments (name of source file plus options). How to use: booi path/to/showcompilersteps.boo [-xml | (-ent | -exp | -nodes) [-short] [-bind]] path/to/your/script.boo -xml: generate XML representation of AST -ent: show entity type names that are associated with AST nodes (node.Entity) -exp: show entity type names and expression types (node.ExpressionType) -nodes: show entity type names and expression types, and for each typed expression, also show the AST type (node.GetType()) -bind: show binding information contained in entities -short: abbreviate the output so that lines hopefully fit on your screen. You can also use the "-r:assembly.dll" flag to add assembly references. ShowSteps will generate a folder in the current directory named after the input file and the options you specified. It generates copies of the script after each compiler step, and puts them in that folder. If you use -exp -nodes -bind, the output can get pretty long and confusing. For example, a simple expression like "_x", that refers to a variable _x in the current class, eventually expands to a the following (all on one line): self._x The -short command-line option will use an abbreviated syntax like this: self._x Here's how to understand it. First of all, of course, it's not really XML, it's just an XML-like notation. If you have a text editor that can do XML syntax highlighting, use it. Second, notice that a reference to "self" has been added. Third, the outer tag (InternalField) describes the whole expression, "self._x", whereas the inner tag (InternalMethod) describes only the "self" part. The tags have the following syntax: where N: Class of AST node. For example, "MemberReferenceExpression" refers to the Boo.Lang.Compiler.Ast.MemberReferenceExpression class. T: The value of node.ExpressionType.ToString(), e.g. int E: Type of entity associated with the node, or "_" if the node has no entity. For example, "InternalField" actually refers to the Boo.Lang.Compiler.TypeSystem.InternalField class. It seems that an entity's main purpose is to hold binding information. S: The entity's EntityType (although I don't actually know what it's for.) If the EntityType is EntityType.Type, which is the most common case, then S is omitted. P: Binding Path. For example, X.Y.Z might represent a variable or method "Z" in class "Y" in namespace "X". A tag is not printed at all if there is no entity nor data type associated with a node. That's why you don't see very many tags during the first compiler steps. The "N=T" part is printed only if the node is an Expression and it has a a known data type; the Bind=S:P part is only printed if binding information is available. """ static format = "boo" //or "xml" //format for output static foldername = "compilersteps" //folder where files are saved static showents = false //whether to print entity types static showexp = false //show expression types as well static shownodetypes = false static shorten = false static showbindings = false //used internally: static savefolder as string static n = 0 static laststep as string //basic boo printer visitor, but adds comments if a node is synthetic (generated //by the compiler instead of the user). class BooSyntheticPrinterVisitor(BooPrinterVisitor): def constructor(writer as TextWriter): super(writer) override def Visit(node as Node) as bool: if node is not null and node.IsSynthetic: WriteIndented("// synthetic") WriteLine() WriteIndented("") return super(node) class BooTypePrinterVisitor(BooPrinterVisitor): _showexp = false _shownodetypes = false _shorten = false _showbindings = false def constructor(writer as TextWriter, show_expressions as bool, shownodetypes as bool, shorten as bool, showbindings as bool): super(writer) _showexp = show_expressions _shownodetypes = shownodetypes _shorten = shorten _showbindings = showbindings override def Visit(node as Node) as bool: return true if node is null if node.IsSynthetic: WriteIndented("// synthetic") WriteLine() WriteIndented() // Automatically indent iff starting a new line tagname = "" entity = TypeSystemServices.GetOptionalEntity(node) // aka node.Entity if entity is not null: tagname = ShortName(entity.GetType()) s = "<" s += tagname s += ExtraJunk(node) s += ">" Write(s) if _shorten: tagname = InitialsOf(tagname) elif _showexp or _showbindings: junk = ExtraJunk(node) if junk.Length > 0: tagname = "_" s = "<_${junk}>" Write(s) result = super(node) if tagname != "": WriteIndented("") return result def ShortName(t as object): t2 = t.ToString(). \ Replace("Boo.Lang.Compiler.TypeSystem.",""). \ Replace("Boo.Lang.Compiler.Ast.","") return t2 unless _shorten return t2. \ Replace("Expression", "Expr"). \ Replace("Reference", "Refr"). \ Replace("Internal", "Itl").Replace("External", "Xtl") def InitialsOf(s as string): s2 = System.Text.StringBuilder() for ch in s: if ch >= char('A') and ch <= char('Z'): s2.Append(ch) if s2.Length>0: return s2.ToString() else: return s def ExtraJunk(node as Node): s = System.Text.StringBuilder() if _showexp: exp = node as Expression if exp is not null and exp.ExpressionType is not null: if _shownodetypes: s.Append(" ") s.Append(ShortName(node.GetType())) s.Append("=") elif _shorten: s.Append(":") else: s.Append(" EType=") s.Append(ShortName(exp.ExpressionType.ToString())) if _showbindings: entity = TypeSystemServices.GetOptionalEntity(node) // aka node.Entity if entity is not null: if _shorten: s.Append(" @") else: s.Append(" Bind=") if entity.EntityType != EntityType.Type: if _shorten: s.Append(InitialsOf(entity.EntityType.ToString())) else: s.Append(entity.EntityType.ToString()) s.Append(char(':')) s.Append(entity.FullName) return s.ToString() def PrintAST([required]result as CompilerContext, [required]o as TextWriter): astobject = result.CompileUnit try: s = XmlSerializer( astobject.GetType() ) s.Serialize( o, astobject ) except e: print print e.GetType(), ":", e.Message def AfterStep(sender, e as CompilerStepEventArgs): ++n stepname = e.Step.ToString().Replace("Boo.Lang.Parser.","").Replace("Boo.Lang.Compiler.Steps.","") tempfile = Path.GetTempFileName() using temp = StreamWriter(tempfile): if format == "xml": PrintAST(e.Context, temp) else: try: printer as BooPrinterVisitor if showents: printer = BooTypePrinterVisitor(temp, showexp, shownodetypes, shorten, showbindings) else: printer = BooSyntheticPrinterVisitor(temp) printer.Print(e.Context.CompileUnit) except e: print e.Message + "\n" + e.StackTrace using r = StreamReader(tempfile): thisstep = r.ReadToEnd() filename = string.Format("STEP{0:D2}-{1}.{2}", n, stepname, format) if thisstep != laststep: File.Move(tempfile, Path.Combine(savefolder, filename)) laststep = thisstep print string.Format("STEP{0:D2}-{1}: SAVED TO {2} FILE.", n, stepname, format.ToUpper()) else: File.Delete(tempfile) print string.Format("STEP{0:D2}-{1}: NO CHANGE TO AST.", n, stepname) def LoadAssembly(assemblyName as string) as Assembly: reference as Assembly if File.Exists(Path.GetFullPath(assemblyName)): reference = Assembly.LoadFrom(Path.GetFullPath(assemblyName)) if reference is null: reference = Assembly.LoadWithPartialName(assemblyName) if reference is null: raise ApplicationException( ResourceManager.Format("BooC.UnableToLoadAssembly", assemblyName)) return reference /////////////////////////////////////////////////// if len(argv) == 0: print help return compiler = BooCompiler() compiler.Parameters.Pipeline = Compile() compiler.Parameters.Pipeline.AfterStep += AfterStep foldername_base = foldername_extra = "" for arg in argv: if arg[0:3] == "-r:": compiler.Parameters.References.Add(LoadAssembly(arg[3:])) continue elif arg == "-xml": format = "xml" elif arg == "-ent": showents = true elif arg == "-exp": showents = true showexp = true elif arg == "-ducky": compiler.Parameters.Ducky = true elif arg == "-nodes": showents = true showexp = true shownodetypes = true elif arg == "-short": shorten = true elif arg == "-bind": showbindings = true else: compiler.Parameters.Input.Add(FileInput(arg)) foldername_base += /^(.*?[\/\\])*([^\\\/]+?)(\.[^.\\\/]*)?$/.Match(arg).Groups[2] continue foldername_extra += " " + arg foldername = foldername_base + foldername_extra //delete old folder if running more than once: if Directory.Exists(foldername): Directory.Delete(foldername, true) savedir = Directory.CreateDirectory(foldername) if savedir is null or not Directory.Exists(foldername): print "The directory '${foldername}' could not be created." return savefolder = savedir.FullName try: print print "See boo/src/Boo.Lang.Compiler/Steps/ for the source code for these steps." print result = compiler.Run() if len(result.Errors) > 0: print "\nThere were ${len(result.Errors)} errors compiling the boo file(s)" print result.Errors.ToString(true) else: print "\nSuccessful: See the files under: '${savefolder}'" except e: print e.GetType(), ":", e.Message nant-0.9.5~git20110729.r1.202a430/examples/misc/sincos.boo000066400000000000000000000034111161462365500222100ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System functions = Math.Sin, Math.Cos a = [] for f in functions: a.Add(f(i) for i in range(3)) for iterator in a: print(join(iterator)) nant-0.9.5~git20110729.r1.202a430/examples/misc/stacktrace.boo000077500000000000000000000033221161462365500230420ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System def foo(): print Environment.StackTrace def bar(): foo() bar() nant-0.9.5~git20110729.r1.202a430/examples/misc/toolbar1.boo000066400000000000000000000037071161462365500224450ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Windows.Forms from System.Windows.Forms f = Form(Text: "Hello, Boo!") tb = ToolBar( ShowToolTips: true, TabIndex: 0, Appearance: ToolBarAppearance.Flat, Cursor: Cursors.Hand) tb.Buttons.Add(ToolBarButton(Text: "Click Me!", ToolTipText: "You heard me.")) f.Controls.Add(tb) Application.Run(f) nant-0.9.5~git20110729.r1.202a430/examples/misc/toolbar2.boo000066400000000000000000000040061161462365500224370ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Windows.Forms from System.Windows.Forms f = Form(Text: "Hello, Boo!") tb = ToolBar(ShowToolTips: true, TabIndex: 0, Appearance: ToolBarAppearance.Flat, Cursor: Cursors.Hand, Click: { MessageBox.Show("Cool or what?") }) tb.Buttons.Add(ToolBarButton(Text: "Click Me!", ToolTipText: "You heard me.")) f.Controls.Add(tb) Application.Run(f) nant-0.9.5~git20110729.r1.202a430/examples/misc/ugly.boo000066400000000000000000000044701161462365500217000ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion def value(x as long, y as long, z as long): return 2**x * 3**y * 5**z def ugly(max as int): uglies = [] counter = 1L dict = {1L : (0L, 0L, 0L)} while len(uglies) < max: uglies.Add(counter) x, y, z = dict[counter] as (long) dict[value(x+1, y, z)] = (x+1, y, z) dict[value(x, y+1, z)] = (x, y+1, z) dict[value(x, y, z+1)] = (x, y, z+1) dict.Remove(counter) keys = array(long, dict.Count) dict.Keys.CopyTo(keys, 0) System.Array.Sort(keys) counter = keys[0] return uglies[-1] iter = 1500 start = date.Now for i in range(10): uvalue = ugly(iter) stop = date.Now print("${iter} ugly value = ${uvalue} in ${(stop-start).TotalMilliseconds}ms") nant-0.9.5~git20110729.r1.202a430/examples/misc/ugly.cs000066400000000000000000000053731161462365500215310ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion using System; using System.Collections; public class App { public static long value(long x, long y, long z) { return (long)(Math.Pow(2, x)*Math.Pow(3, y)*Math.Pow(5, z)); } public static long ugly(int max) { ArrayList uglies = new ArrayList(); long counter = 1; Hashtable dict = new Hashtable(); dict[counter] = new long[] { 0, 0, 0 }; while (uglies.Count < max) { uglies.Add(counter); long[] array = (long[])dict[counter]; long x = array[0]; long y = array[1]; long z = array[2]; dict[value(x+1, y, z)] = new long[] { x+1, y, z }; dict[value(x, y+1, z)] = new long[] { x, y+1, z }; dict[value(x, y, z+1)] = new long[] { x, y, z+1 }; dict.Remove(counter); ArrayList keys = new ArrayList(dict.Keys); keys.Sort(); counter = (long)keys[0]; } return (long)uglies[uglies.Count-1]; } public static void Main() { int iter = 1500; DateTime start = DateTime.Now; long uvalue = 0; for (int i=0; i<10; ++i) { uvalue = ugly(iter); } DateTime end = DateTime.Now; Console.WriteLine("{0} ugly value = {1} in {2}ms", iter, uvalue, (end-start).TotalMilliseconds); } } nant-0.9.5~git20110729.r1.202a430/examples/misc/ugly.py000066400000000000000000000011271161462365500215450ustar00rootroot00000000000000from time import time def value(x, y, z): return 2**x * 3**y * 5**z def ugly(maxValue): uglies = [] counter = 1 d = {1 : (0, 0, 0)} while len(uglies) < maxValue: uglies.append(counter) x, y, z = d[counter] d[value(x+1, y, z)] = (x+1, y, z) d[value(x, y+1, z)] = (x, y+1, z) d[value(x, y, z+1)] = (x, y, z+1) del d[counter] keys = d.keys() keys.sort() counter = keys[0] return uglies[-1] iterations = 1500 start = time() for i in range(10): uvalue = ugly(iterations) end = time() print iterations, "ugly value =", uvalue, "in", (end - start)*1000, "ms" nant-0.9.5~git20110729.r1.202a430/examples/misc/vectors.boo000066400000000000000000000047141161462365500224060ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion class Vector3: _x as double _y as double _z as double def constructor(): r = System.Random() _x = r.Next() _y = r.Next() _z = r.Next() def Distance(other as Vector3): dx = _x - other._x dy = _y - other._y dz = _z - other._z return System.Math.Sqrt(dx*dx+dy*dy+dz*dz) #return 1.0 def createArray(count as int): a = array(Vector3, count) for i in range(count): a[i] = Vector3() return a // array as (Vector3) = array(Vector3() for i in range(length)) a = createArray(25000) start = date.Now total = 0.0 count = 0 for v1 in a: for v2 in a: total += v2.Distance(v1) ++count elapsed = date.Now - start print("Total... ${total}.") // a good ips value is: 13.000.000 print("Done ${count} in ${elapsed.TotalSeconds} secs - ${count/elapsed.TotalSeconds} ips.") nant-0.9.5~git20110729.r1.202a430/examples/misc/vgrep.boo000066400000000000000000000073041161462365500220420ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.IO import System.Drawing from System.Drawing import System.Windows.Forms from System.Windows.Forms def ScanFile(lv as ListView, fname as string, pattern as string): using stream=File.OpenText(fname): for index as int, line as string in enumerate(fname): if line =~ pattern: lvItem = lv.Items.Add(fname) lvItem.SubItems.Add(index.ToString()) lvItem.Tag = [fname, index] def ScanDirectory(lv as ListView, path as string, glob as string, pattern as string): for fname in Directory.GetFiles(path, glob): ScanFile(lv, fname, pattern) for path in Directory.GetDirectories(path): ScanDirectory(lv, path, glob, pattern) def fileList_SelectedIndexChanged(sender, args as EventArgs): fileList as ListView = sender txtBox as TextBox = fileList.Tag for item as ListViewItem in fileList.SelectedItems: fname as string, index as int = item.Tag txtBox.Text = File.ReadAllText(fname) txtBox.Focus() txtBox.SelectionLength = 0 txtBox.SelectionStart = index txtBox.ScrollToCaret() fileList = ListView( Dock: DockStyle.Bottom, TabIndex: 0, Size: Size(576, 144), View: View.Details, FullRowSelect: true, SelectedIndexChanged: fileList_SelectedIndexChanged ) fileList.Columns.Add("File", 400, HorizontalAlignment.Left) fileList.Columns.Add("Line", 50, HorizontalAlignment.Left) splitter = Splitter(Dock: DockStyle.Bottom, TabIndex: 1, TabStop: false) fileTab = TabControl(Dock: DockStyle.Fill) textTab = TabPage(TabIndex: 0, Text: "FileName goes here") txtBox = TextBox(Dock: DockStyle.Fill, AcceptsTab: true, Multiline: true, ScrollBars: ScrollBars.Vertical, Font: Font("Lucida Console", 12)) textTab.Controls.Add(txtBox) fileTab.Controls.Add(textTab) fileList.Tag = txtBox f = Form(Text: "Visual Grep Utility", Font: Font("Tahoma", 8), Size: Size(800, 600)) f.Controls.Add(fileTab) f.Controls.Add(splitter) f.Controls.Add(fileList) glob, pattern = argv ScanDirectory(fileList, ".", glob, pattern) Application.Run(f) nant-0.9.5~git20110729.r1.202a430/examples/misc/vgrep2.boo000066400000000000000000000101051161462365500221150ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.IO import System.Drawing from System.Drawing import System.Windows.Forms from System.Windows.Forms class MainForm(Form): _fileList as ListView _filesTab as TabControl _editor as TextBox _splitter as Splitter def constructor(glob as string, pattern as string): _fileList = ListView( Dock: DockStyle.Bottom, TabIndex: 0, Size: System.Drawing.Size(576, 144), View: View.Details, FullRowSelect: true, SelectedIndexChanged: _fileList_SelectedIndexChanged) _fileList.Columns.Add("File", 400, HorizontalAlignment.Left) _fileList.Columns.Add("Line", 50, HorizontalAlignment.Left) _splitter = Splitter(Dock: DockStyle.Bottom, TabStop: false) _editor = TextBox(Dock: DockStyle.Fill, AcceptsTab: true, Multiline: true, ScrollBars: ScrollBars.Vertical | ScrollBars.Horizontal, Font: System.Drawing.Font("Lucida Console", 12)) editorTab = TabPage(TabIndex: 0, Text: "FileName goes here") editorTab.Controls.Add(_editor) _filesTab = TabControl(Dock: DockStyle.Fill) _filesTab.Controls.Add(editorTab) Controls.Add(_filesTab) Controls.Add(_splitter) Controls.Add(_fileList) ScanDirectory(".", glob, pattern) def ScanFile(fname as string, pattern as string): position = 0 newLineLen = len(Environment.NewLine) using stream=File.OpenText(fname): for index, line as string in enumerate(stream): if line =~ pattern: lvItem = _fileList.Items.Add(fname) lvItem.SubItems.Add(index.ToString()) lvItem.Tag = (fname, position) position = position + len(line) + newLineLen def ScanDirectory(path as string, glob as string, pattern as string): for fname in Directory.GetFiles(path, glob): ScanFile(fname, pattern) for path in Directory.GetDirectories(path): ScanDirectory(path, glob, pattern) def _fileList_SelectedIndexChanged(sender, args as EventArgs): for lvItem as ListViewItem in _fileList.SelectedItems: fname as string, position as int = lvItem.Tag _editor.Text = File.ReadAllText(fname) _editor.Focus() _editor.SelectionLength = 0 _editor.SelectionStart = position _editor.SelectionLength = 10 _editor.ScrollToCaret() glob, pattern = argv Application.Run(MainForm( glob, pattern, Text: "Visual Grep Utility", Font: Font("Tahoma", 8), Size: Size(800, 600))) nant-0.9.5~git20110729.r1.202a430/examples/misc/wget.boo000066400000000000000000000042441161462365500216650ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.Net import System.IO def GetFileName(url as string): uri = Uri(url) return Path.GetFileName(uri.AbsolutePath) def DownloadTo(url as string, fname as string): using response=WebRequest.Create(url).GetResponse(): reader=response.GetResponseStream() using writer=File.OpenWrite(fname): buffer=array(byte, 1024) while read=reader.Read(buffer, 0, len(buffer)): Console.Write(".") writer.Write(buffer, 0, read) url = argv[0] fname = GetFileName(url) print("${url} => ${fname}") DownloadTo(url, fname) nant-0.9.5~git20110729.r1.202a430/examples/misc/whichserver.boo000066400000000000000000000033351161462365500232500ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Net using response=WebRequest.Create(argv[0]).GetResponse(): print(response.Headers["Server"]) nant-0.9.5~git20110729.r1.202a430/examples/misc/winforms.boo000066400000000000000000000042451161462365500225640ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.Windows.Forms from System.Windows.Forms class App: [getter(Times)] _times = 0 def Run(): f = Form(Text: "My first boo winforms app") button = Button(Text: "click me!") button.Click += def: print("clicked!") ++_times f.Controls.Add(button) if Application.MessageLoop: # if we are running inside boox # just show a dialog f.ShowDialog() else: Application.Run(f) app = App() app.Run() print("The button was clicked ${app.Times} times.") nant-0.9.5~git20110729.r1.202a430/examples/misc/wrapLines.boo000077500000000000000000000045041161462365500226650ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Text.RegularExpressions def getWordBreaks(s as string): return m.Index for m as Match in /\b|$/.Matches(s) def wrapLines(s as string, columns as int): lines = [] nextBreak = columns lastBreak = 0 lineStart = 0 for wb as int in getWordBreaks(s): if wb > nextBreak: line = s[lineStart:lastBreak] lines.Add(line.Trim()) lineStart = lastBreak nextBreak = lastBreak + columns lastBreak = wb lines.Add(s[lineStart:].Trim()) return lines def test(s): print(join(wrapLines(s, 12), "*\n*")) test("this is a very long string") test("It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout.") nant-0.9.5~git20110729.r1.202a430/examples/misc/xml.boo000066400000000000000000000053761161462365500215260ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.Xml.Serialization from System.Xml import System.IO class Address: public Street as string public Number as int override def ToString(): return "${Number}, ${Street}" class Person: _fname as string _lname as string _addresses as (Address) def constructor(): pass def constructor(fname, lname): _fname = fname _lname = lname [XmlAttribute("FirstName")] FirstName: get: return _fname set: _fname = value [XmlAttribute("LastName")] LastName: get: return _lname set: _lname = value Addresses as (Address): get: return _addresses set: _addresses = value p1 = Person("Homer", "Simpson") p1.Addresses = (Address(Street: "Al. Foo", Number: 35), Address(Street: "Al.Bar", Number: 14)) buffer = StringWriter() serializer = XmlSerializer(Person) serializer.Serialize(buffer, p1) Console.WriteLine(buffer.ToString()) p2 as Person = serializer.Deserialize(StringReader(buffer.ToString())) Console.WriteLine("${p2.LastName}, ${p2.FirstName}") for address in p2.Addresses: Console.WriteLine("\t${address}") nant-0.9.5~git20110729.r1.202a430/examples/misc/xmllogdata.boo000066400000000000000000000043601161462365500230520ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.Xml.Serialization from System.Xml import System.IO class GameSession: public Name as string def constructor(): pass def constructor(name as string): Name = name class GameLogData: _sessions = [] Sessions as (GameSession): get: return _sessions.ToArray(GameSession) set: _sessions.Clear() _sessions.Extend(value) def Add([required] session as GameSession): _sessions.Add(session) data = GameLogData() data.Add(GameSession("Foo")) data.Add(GameSession("Bar")) serializer = XmlSerializer(GameLogData) serializer.Serialize(Console.Out, data) nant-0.9.5~git20110729.r1.202a430/examples/pipeline/000077500000000000000000000000001161462365500210645ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/pipeline/AutoImport/000077500000000000000000000000001161462365500231675ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/pipeline/AutoImport/client.boo000077500000000000000000000032201161462365500251460ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import globals print(Foo()) nant-0.9.5~git20110729.r1.202a430/examples/pipeline/AutoImport/globals.boo000077500000000000000000000033011161462365500253130ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion namespace globals class Foo: override def ToString(): return "I'm a foo!" nant-0.9.5~git20110729.r1.202a430/examples/pipeline/AutoImport/main.boo000077500000000000000000000055621161462365500246270ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Steps import System.IO class AutoImport(AbstractVisitorCompilerStep): override def OnImport(node as Import): references = self.Parameters.References errors = self.Errors for reference in references: simpleName = @/, /.Split(reference.FullName)[0] return if simpleName == node.Namespace result = compile("${node.Namespace}.boo", CompilerOutputType.Library) if len(result.Errors): for e in result.Errors: errors.Add(e) else: references.Add(result.GeneratedAssembly) override def Run(): Visit(CompileUnit) def compile(fname as string, outputType as CompilerOutputType): pipeline = CompileToMemory() pipeline.Insert(1, AutoImport()) print("compiling ${fname}...") compiler = BooCompiler() compiler.Parameters.OutputType = outputType compiler.Parameters.Input.Add(FileInput(fname)) compiler.Parameters.Pipeline = pipeline result = compiler.Run() print("done.") return result result = compile("client.boo", CompilerOutputType.ConsoleApplication) if len(result.Errors): print(result.Errors.ToString(true)) else: result.GeneratedAssemblyEntryPoint.Invoke(null, (null,)) nant-0.9.5~git20110729.r1.202a430/examples/pipeline/EverybodyLovesDucks.boo000077500000000000000000000073271161462365500255540ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Steps import Boo.Lang.Compiler.TypeSystem class EverybodyLovesDucks(ProcessMethodBodiesWithDuckTyping): _getInRuntime as IMethod override def OnReferenceExpression(node as ReferenceExpression): entity = self.NameResolutionService.Resolve(node.Name) if entity is not null: super(node) else: mie = CodeBuilder.CreateMethodInvocation( CodeBuilder.CreateSelfReference(self._currentMethod.DeclaringType), _getInRuntime) mie.Arguments.Add(CodeBuilder.CreateStringLiteral(node.Name)) node.ParentNode.Replace(node, mie) override def InitializeMemberCache(): super() _getInRuntime = TypeSystemServices.Map(typeof(BaseTemplate).GetMethod("GetInRuntime")) class ProcessTemplate(AbstractVisitorCompilerStep): override def Run(): Visit(self.CompileUnit) override def OnModule(node as Module): template = ClassDefinition(Name: "Template") template.BaseTypes.Add(CodeBuilder.CreateTypeReference(BaseTemplate)) template.Members.Add( Method(Name: "Run", Body: node.Globals)) node.Members.Add(template) node.Globals = Block() class BaseTemplate: def GetInRuntime(name as string): return "name is: ${name}" abstract def Run(): pass template = """ print name """ pipeline = Pipelines.CompileToMemory() pipeline.InsertAfter(InitializeTypeSystemServices, ProcessTemplate()) pipeline.Replace(ProcessMethodBodiesWithDuckTyping, EverybodyLovesDucks()) compiler = BooCompiler() compiler.Parameters.Input.Add(IO.StringInput("template.boo", template)) compiler.Parameters.Pipeline = pipeline compiler.Parameters.OutputType = CompilerOutputType.Library try: result = compiler.Run() print result.Errors.ToString(true) if len(result.Errors) print "the generated code looks like this:" print print result.CompileUnit assembly = result.GeneratedAssembly assert assembly is not null templateType = assembly.GetType("Template") templateInstance as BaseTemplate = templateType() templateInstance.Run() except x: print x nant-0.9.5~git20110729.r1.202a430/examples/pipeline/Extensibilidade.sxw000066400000000000000000000375711161462365500247410ustar00rootroot00000000000000PKx019mimetypeapplication/vnd.sun.xml.writerPKx0hlOO layout-cachepKP P 3P K9P d P qP 1PKx0 content.xml]_oɑO HQ,K^;emVmlmnCМiJ;3͝d9=`&$WUrf(Rs vENW_PK<&G2|󭧝_ſ05JGʉ}D[ "/;cF"8f]^z.^1qz1ul^@c睫(z Q0;=c"ihakl6惊E xuzZz_\>Oid4i@)/E/:7 $;W<\?jqd׭Ȯ4^͙xHz:c_/sB9ZLiI;Fw=a/<rop}:lL!,rW ͖6O{8<0ۿס%כ E$^ͱC)BV\vs>RY0mvR"\&#9 >Cy#Jzhx_8AwN%@y?R7! *y_pZ !iI]lI`H yx)-O Anw@urB-x~FbK.~mƉn {rS^05_g:kZOd}qwQG[\K,j(䉛ɚydޝbHE$\'(O߫d}W<"Z~aGm7JۂskҧWM>U}Ԙ .5 uaZlwj*OQfo 9.n4-\D@]IKj(O;M$ 0I鳔wIw Wһ˕] @hRaRJݥT_כe9XJ; u.i;KHure2`BKy'khh\02XzyŌ)cZ_ {ӽ' 5y}p`(Zc}?y`U%A|RYN3#ZW&P>nɀ%ְ6fBc9m#>5 UjAQn']ƣ*7ҷy?Os@a$*xb,<版lh^f.3 bKáwLT?l:G@ɵtp?xx<4LU#f6`8{?=`x|?;>x`ػNdēO'niqpbp"K?Nr߹#{\x-lg <[xwXG! V]$o]/;ye_.F)׮f ,)qr@ˁ]J{)yqDrɟ'3h}%T!0U`.ȲfDKW2p20V id >~;pXq|`3F827p&?R1E!xm,ʒ '&8#Ps/!>rHDq,gɴ3q 1 {]v]K"Prn2qԜMg(lGJkc(-ldv 6Zx$p鄙2E_KDЯbj8˯̭N1r8 @Lk8b 3 i@n@"|b,L[`!k 4D 'A' (e46:"RXZ!"X;QVQi(&, ? ' R(!ȁ!%3;{GkYG‘iQqBWB>KԵx Cc`@Bai2Rɞ17[GH~ɊPPgٽRjP*v)Bِ* FC]143%BR@YPD6*:+ +:c}'ƙ&0DžJ`p.75Nwoaj  P3Cо4YS֡C; 9#iJN o Zmi -h4l"fb!\j~.?dHVs&Lu{,A {@؎_",CPIGpPMO`,a7'36s>jujvhq539,R:ϏCf,O,T3rIj$lc Tp4rMg>07U&@*PA=h|2QA# N&SB'l;`7`B6.^&, `jG7&`y%8R[xvjZTJ1 Ђ-Heowk#L%֫}Hx@.+4n#13fHB#:,mEW4C2ņՕ)SE'<~t\L >6Ks_D=B72%h1gK8J 9k*rF"<9B/5a 5Rz5C]*$ 5tdNnr^dGG%hh(ᣱfٞ6'`${_%]`%l͂MfWs#]< gD 4nI\h=V\ 8xՈR=Njl.̃f6 Р TJ>((6`"WUN9ڬ[y9!LL@4N2'ݜ0eLvAF b/F$)l.I&&ÂO*,}q R<&w;:o2|eƊCݼ^L@ "(,HFEMOrLVMu Bo_uj ٙe@"p SddMUV;u:y`<2$2($B- <# I(%+?" OLKI WX-6M4g4)Mi̓h B8uVSn f ֳ aLEsUd dbH?::nm{^voez1j_N/G](j ::(9i%9 5*zzqBf4L3 70ozmбr f=CMZi}&z5Jb~H'BӻBMqd+LBsy, Zt(iKu[&MJE$,lMQ6YsU1~n5b x2ќ49Ř'"y,tKppP—m!Hra(.QD*$Ə_2C~3L`rO;@T,: e<]W9Aټz\9~opH E:~o>rnHdއCRxԎzrгFVо̧( Q4qyZRʈ"2Q "3@ي g+ۺ(YB0oШ6y|#e WS f<QN ǗPN3, ӅxA XdB= gؘ`JZiesԘ6E#DT3oc'+= =VIտ`9Kp5^报n2A^o)gXpt\('ii5ZB\U^X4Ɗ2o s*5lrLj캤9kL>'pհ*8)IhbJw_I$9a7"통 YVM 11`O9J[Ch{YSQbU Vbf2beU.ׅ&e9O4q*JPlfN}+&$y2NXiD͡HMUIi .8BJ i t+k3;>>|X-) Y6GXÏ"}HW٦R.ur\alI-G#[3EmxLD.z&ЦCbmq>\ȯϐ1 "J{z|L>Ɏɏb{G*78Hyb eKQ~7CR,`v t߉55*,^>bWDIx!%\byNt)XH7SM"Z*z_<;p٤KV8(&uMUa2K Xr,fT.>*'&b5IzXO)/r̾ ;V~.V@9ZL~sZ gUC>HPcM9tKSE?# ,f|SwUMfNL+H]4 }Bu>#LɾA:@֖ q끍(9#1#ع^{r`fYꕴ'$o dz+ L,qdO3ߤc $hR(:pC_@~vֻ L]c1_$tN;m';▵ilBx:ӕdnvnw,ea){doJ}#>F۲|J [jÉl~)jmGD}:%ӳf5v3[L57$+=&@ 4 J{Yssdua%wY+UIO^zC -JdY Y&?c<;rVJ6 Y[:n*}{HǮ}]NLvJV)-!lr#Ny%b40WK ˞:ˡ RdR;w6-TP0Y..IM_+e'1- (2NAm]1w^8Z/۴Y6Xfz٨R1~l,vrܧ UӶ$0KT9̹~o4@K/+- #s~{@$W3M W,FaqII-Q=̭+C W7eIoB^US3ݻ#B$#Hq4k',><<@&d*˽Tl |xhzрGhteH[!^2 &Zꊅ[mIFO3 k:]Yިlv$MImXGCPqb/SI !TMfDyW*kgR3P{)U\A|RA~ËsbFzu;10`#v 0e}za\__wR ׽A*%1\n`3aɏt+\XK6+j(OG(m^JU|M ]IG FMWyB<೷-(7RGh02߼<..7;洎˘'n^/l9D9xqyf>\;I?rڵPt_(*g[NOxkZR5Ur~"8Xcн2[*vŻӾ`]*V]oXb;Xܦ}Hu#?R\2}/`v~ !x਽> {El K7Ń=pn j.YzҲXoz%e\޸eɒrqw=Kq7g+ yysQܞAk>!lXPK$!PKx0 styles.xml]s۸޿MD=,vt$=;7G$4I˾ ŇDCcbXlu3P㺧= *Ո}({|~y;VD?kTذdzձ)}'_w*Z)[OIa/=k>6ph?;׽e~ (BQFps;ۋ^s X[~c̡-Zx5[~Eb~-yyщ#vaN"4>"c-5n2&8<@}$Sit?Z|]QX 0( y{|.ޓViB[FB48r&_dөW}$}<7#(9Qw֟ETGڌ`&}$5jD׽-03"MvRA5Vkב% ם{[%naRAJE*;$k[G\ K KASnsFOJde hX}lW 09,ܴOhC(9W^*'7 E~quW=./kv=afw@ 66,;)o(# H LJ2?szjjklhh'ZSUS~$B2}Zh Epن b22 2RQؖ^Kgʫ3:wԴ`tnw:~I3 i,VrGMFE<,a*JFn!%'3bq9ryәl-}E4Lq"}C44l^wl @Z!݌'3Eޘ,\FF;4ёWoP0[bޝmC580oqSoOeY4 l>sTabOPvO(:28a뿃N8H8gY>ki~s|ҒjBy`IeJ .e-p>ڣ0Ӓ%bhh$8zB:^Y)3bx˅,5iSP$b<+|<(^͌'aLB=7cWM@q*#mK3^qi[&ߋ *l/w IC/ .x?)YbV f Φ&'Uezq܈r)DAI&C8F⾹w4H%{)ጘg?q0?0{bmdCD-f^Lu/ 2}ǎF/e**^^uzҌ~ѿ0B)O7&L-x\&ZdiSWٸ2+'j}Æj H!´+%J׃UpuV׌quyIp]tWe[+3t-D3W'rMqCfgN3:`SRGIo+Io*;Cv 6FA/EY>\Żuc8xFJ2zG󹔋_Jz7K1^gҥ6~;3/lPřO1ĢjR*wL2ݾP߆.<stqo"ksCI!v mke gJ+P9rVa_(޳ ˡE0_ce0S&LNo s^XItWWHըD5pOyTb>*%͓aƭCOTWf %rVȡԧB*a<^Dh2iJ}ZvlbԎݡ܇H*aV'l4꺆N%o.^".nBV-i-7"W#yo[[omm-Fod*p~\7"W#+¤ICJCBJC肥C.m6\DroU}:^Zaڮ`;~~.X;fpyOn 9g~T& 22)8rk:rIׇCV0%ΥxrcmyS+sS-R7+Ȳ&ͪP3 zoDCm^wl B o2~|{Dmn"M&:l8 1k,SVo)Jѭć+,oƞ*NPRqP?O\'`n cgIJvDpPc@*q6?lC-50KxT`Z{nGxBN~^С׮ԫ<cgy(%7nO&i@*(eByna)13~u NQЉZ]apjjwzhQ$DZp)adGٓ^&1\A`eaM/ܱgTqLp_tYK("qhꟐsm)MFX҆dx&hǮ0w"!!h}`KIf:lȃmɴZK WTPuh ~I3+ү>tjD>ODԴͭeb@7lTzqq#,W]4h\%W{_$т"&U<]xE iSWuwj杂/d7%N{ 5[ٹtqGU={RRJgz+^T<{>뒚~YxI:{i9aF>"#F޾iT4ΉG1*J,rV&Ht;_0uЗ •KHյ<4*{!X'SaDq J؟nסr`NU2#WY +0J`/bHJ={Gu&[;3st4~{=~ţ#CntwƋyj3j4ur00SD:Z2O3T8HN:ZNL}PNTAs+;Ĩm ],Ά Pr`yd>=="1 ^7yPK[QPKx0enmeta.xml OpenOffice.org 1.1 (Win32)Rodrigo B. de Oliveira2004-04-15T18:30:36Rodrigo B. de Oliveira2004-04-20T15:51:49en-US244PT14H25M8SPKx0 settings.xmlYKs8rM9@^OWB&ln2t0e!B~ę [Ylj0׭V?˯Qy\I"u %M'V\wcOpQ7n ڤ3jwޑ< C|Y6&F|7ަu#Y6TzܓBJ\W҂BC9RE.ߥ a@YfbtMR%˾YwQg#UJUs>=b'4#|p*GBKyw̫O/*>;SNwþ_Tʭjg)4qBa uqɢ&8Sߍp msDaYmphCqL9jm)a`B_ q)zF9Qu\9>5h"6-nAY.wНBp9xxtǿgW[ sME7 O'n;r( C^Mf(ryU8DRd4$y9To }Џ#t-f4Tjb'!CmXnF}p&[(byVps(שk4lK9*'pv`de1qoOG0Qc!|=+q~5HFBT<Bx0L#͞9L ޼4M"GHGBBZW<9)llϟ~~+֢#6鍑~PhFUs6ׯ_i%+tsjk p5Mg+k39fI׀!M(r&!p}BWm/v AI0%.O]o :4$h2&ԕW^O20CtDnhUE,Z糲Kը9[e;fEZhv-kEQ4vuFxgq@K & n"M7յv y;5>h]`Z G >,F5IE_4rNPKT]˂PKx0META-INF/manifest.xmln0>{{rЩB*UjK:΅XrȾPu@%Db|scĉ|f0Og *Wj{MazX>؊FZ]QŰm# AܵdwU}.>F~(-eF). jv"]OWR %d2ᾥ d$[ɖilC<\Θ\g2)Vy SixV~od:NT5ML`:3!tWnP;!w%1.Ky_PK4RPKx019mimetypePKx0hlOO Dlayout-cachePKx0$! content.xmlPKx0[Q "styles.xmlPKx0en2meta.xmlPKx0T]˂ 7settings.xmlPKx04Ry<META-INF/manifest.xmlPK=nant-0.9.5~git20110729.r1.202a430/examples/pipeline/Extensibilidade.txt000066400000000000000000000021741161462365500247260ustar00rootroot00000000000000A maior parte dos compiladores existentes para as linguagens de programação mais populares são incríveis e funcionais caixas-preta que traduzem código fonte armazenado em arquivos para programas executáveis por uma determinada arquitetura de computador, seja ela real ou virtual. Alguns compiladores nem tão incríveis e outros nem tão funcionais, mas uma coisa é certa, não é possível ensiná-los a fazer muito mais. E é esse o estado da arte de uma das mais fundamentais ferramentas ao ofício de programador. Irônico de se pensar pois dada a oportunidade e correta dosagem de cafeína, qualquer programador experiente terá muito a dizer sobre as idades de seus programas e projetos, entre elas, a tal da extensibilidade. Nesta série de artigos serão explorados os três mecanismos principais de extensibilidade embutidos em boo, mais especificamente: * como estender e alterar o processo de compilação utilizando pipelines personalizadas; * como criar micro geradores de código pela definição de atributos ativos; * como adicionar novos tipos de blocos e construções à linguagem através da definição de macros; nant-0.9.5~git20110729.r1.202a430/examples/pipeline/HelloPipeline/000077500000000000000000000000001161462365500236155ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/pipeline/HelloPipeline/default.build000066400000000000000000000023671161462365500262720ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/pipeline/HelloPipeline/src/000077500000000000000000000000001161462365500244045ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/pipeline/HelloPipeline/src/HelloPipeline.boo000066400000000000000000000036601161462365500276430ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion namespace HelloPipeline import Boo.Lang.Compiler import Boo.Lang.Compiler.Steps import Boo.Lang.Compiler.Pipelines class HelloPipelineStep(AbstractCompilerStep): override def Run(): print("Hello from ${GetType()}!") class HelloPipeline(CompileToFile): def constructor(): self.Add(HelloPipelineStep()) nant-0.9.5~git20110729.r1.202a430/examples/pipeline/HelloPipeline/src/HelloWorld.boo000066400000000000000000000032701161462365500271620ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ The (in)famous Hello World example. """ print("Hello, world!")nant-0.9.5~git20110729.r1.202a430/examples/pipeline/NoPrimitives.boo000077500000000000000000000052161161462365500242240ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Steps import Boo.Lang.Compiler.TypeSystem class CustomTypeSystem(TypeSystemServices): def constructor(context as CompilerContext): super(context) override def PreparePrimitives(): self.AddPrimitiveType("string", self.StringType) self.AddPrimitiveType("void", self.VoidType) class InitializeCustomTypeSystem(AbstractCompilerStep): override def Run(): self.Context.TypeSystemServices = CustomTypeSystem(self.Context) pipeline = Pipelines.CompileToMemory() pipeline.Replace(InitializeTypeSystemServices, InitializeCustomTypeSystem()) pipeline.RemoveAt(pipeline.Find(IntroduceGlobalNamespaces)) code = """ import System.Console WriteLine(date.Now) WriteLine(List()) """ compiler = BooCompiler() compiler.Parameters.Input.Add(IO.StringInput("code.boo", code)) compiler.Parameters.Pipeline = pipeline compiler.Parameters.OutputType = CompilerOutputType.Library result = compiler.Run() print result.Errors.ToString(true) nant-0.9.5~git20110729.r1.202a430/examples/pipeline/StyleChecker/000077500000000000000000000000001161462365500234515ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/pipeline/StyleChecker/Example.boo000066400000000000000000000036401161462365500255500ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ A module that violates all checked style guidelines. """ namespace Boo.Examples class person: fname as string lname as string def constructor(FName, LName as string): fname = FName lname = LName override def ToString(): return "${lname}, ${fname}" print(person("Eric", "Idle")) nant-0.9.5~git20110729.r1.202a430/examples/pipeline/StyleChecker/StyleChecker.boo000066400000000000000000000051551161462365500265450ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion namespace StyleChecker import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Steps import Boo.Lang.Compiler.Pipelines class StyleCheckerStep(AbstractVisitorCompilerStep): override def Run(): Visit(CompileUnit) override def LeaveClassDefinition(node as ClassDefinition): if not System.Char.IsUpper(node.Name[0]): AddError(node, "Class name '${node.Name}' does not start with uppercase letter!") override def LeaveField(node as Field): if not node.IsPublic: if not node.Name.StartsWith("_"): AddError(node, "Field name '${node.Name}' does not start with '_'!") override def LeaveParameterDeclaration(node as ParameterDeclaration): if not System.Char.IsLower(node.Name[0]): AddError(node, "Parameter name '${node.Name}' does not start with lowercase letter!") def AddError(node as Node, message as string): Errors.Add(CompilerError(node, message)) class StyleCheckerPipeline(CompileToFile): def constructor(): self.Insert(1, StyleCheckerStep()) nant-0.9.5~git20110729.r1.202a430/examples/pipeline/StyleChecker/default.build000066400000000000000000000021441161462365500261170ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/pipeline/Trace/000077500000000000000000000000001161462365500221225ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/pipeline/Trace/HelloWorld.boo000066400000000000000000000037271161462365500247070ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion class Knight: def Speak(): print("ni!") def foo(): print("inside foo.") for i in range(5000000): pass def bar(): print("inside bar.") for i in range(5000000/2): pass raise "a exception!" def main(): foo() try: bar() except x: print("caught '${x.Message}'.") Knight().Speak() main()nant-0.9.5~git20110729.r1.202a430/examples/pipeline/Trace/TracePipeline.boo000066400000000000000000000070051161462365500253510ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion namespace TracePipeline import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Steps class TracePipelineStep(AbstractVisitorCompilerStep): """ Visits every method adding a trace statement at both its very beginning and end. """ override def Run(): Visit(CompileUnit) override def LeaveMethod(method as Method): stmt = TryStatement() stmt.ProtectedBlock = method.Body stmt.ProtectedBlock.Insert(0, MethodStart("TRACE: Entering ${method.FullName}")) stmt.EnsureBlock = MethodEnd("TRACE: Leaving ${method.FullName}") method.Body = Block() method.Body.Add(stmt) def MethodStart(msg as string): // { print(msg) block = Block() mie = MethodInvocationExpression(ReferenceExpression("print")) mie.Arguments.Add(StringLiteralExpression(msg)) block.Add(mie) // __start = date.Now } dateNow = MemberReferenceExpression( ReferenceExpression("date"), "Now") block.Add( BinaryExpression(BinaryOperatorType.Assign, ReferenceExpression("__start"), dateNow)) return block def MethodEnd(msg as string): // { __time = date.Now - __start block = Block() dateNow = MemberReferenceExpression( ReferenceExpression("date"), "Now") block.Add(BinaryExpression(BinaryOperatorType.Assign, ReferenceExpression("__time"), BinaryExpression(BinaryOperatorType.Subtraction, dateNow, ReferenceExpression("__start")))) // print(msg + ": " + __time) } msgTime = BinaryExpression(BinaryOperatorType.Addition, StringLiteralExpression(msg + ": "), ReferenceExpression("__time")) mie = MethodInvocationExpression(ReferenceExpression("print")) mie.Arguments.Add(msgTime) block.Add(mie) return block class TracePipeline(CompileToFile): def constructor(): self.Insert(1, TracePipelineStep()) nant-0.9.5~git20110729.r1.202a430/examples/pipeline/Trace/default.build000066400000000000000000000023471161462365500245750ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/prevalence/000077500000000000000000000000001161462365500214035ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/prevalence/tasks.boo000066400000000000000000000063211161462365500232330ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import Bamboo.Prevalence from Bamboo.Prevalence class Task: [getter(Id)] _id = -1 [getter(DateCreated)] _dateCreated = date.Now [getter(Summary)] _summary as string [property(Done)] _done = false def constructor([required] summary): _summary = summary internal def Initialize(id): _id = id class TaskList(MarshalByRefObject): _tasks = [] _nextId = 0 Tasks: get: return array(Task, _tasks) PendingTasks: get: return array(task for task as Task in _tasks unless task.Done) def Add([required] task as Task): task.Initialize(++_nextId) _tasks.Add(task) def MarkDone(id as int): for task as Task in _tasks: if id == task.Id: task.Done = true break def Menu(message as string, options as Hash): choice = prompt(message).ToLower() selected as callable = options[choice] if selected: selected() else: print("'${choice}' is not a valid choice") def ShowTasks(tasks as (Task)): print("id\tDate Created\t\tSummary") for task in tasks: print("${task.Id}\t${task.DateCreated}\t\t${task.Summary}") engine = PrevalenceActivator.CreateTransparentEngine(TaskList, "c:\\temp\\data") system as TaskList = engine.PrevalentSystem options = { "a" : { system.Add(Task(prompt("summary: "))) }, "d" : { system.MarkDone(int.Parse(prompt("task id: "))) }, "s" : engine.TakeSnapshot, "q" : { Environment.Exit(-1) } } while true: ShowTasks(system.PendingTasks) Menu("(A)dd task\t(D)one with task\t(S)napshot\t(Q)uit\nyour choice: ", options) nant-0.9.5~git20110729.r1.202a430/examples/remoting/000077500000000000000000000000001161462365500211035ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/remoting/client.boo000066400000000000000000000036631161462365500230720ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.Runtime.Remoting import System.Runtime.Remoting.Channels import System.Runtime.Remoting.Channels.Tcp channel = TcpChannel(); Channels.ChannelServices.RegisterChannel(channel) obj as SampleObject = Activator.GetObject(SampleObject, "tcp://localhost:8080/HelloWorld"); print obj.HelloWorld() nant-0.9.5~git20110729.r1.202a430/examples/remoting/default.build000066400000000000000000000017621161462365500235560ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/remoting/library.boo000066400000000000000000000033351161462365500232540ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System class SampleObject(MarshalByRefObject): def HelloWorld(): return "Hello World!" nant-0.9.5~git20110729.r1.202a430/examples/remoting/server.boo000066400000000000000000000037471161462365500231250ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.Runtime.Remoting import System.Runtime.Remoting.Channels import System.Runtime.Remoting.Channels.Tcp channel = Tcp.TcpChannel(8080) Channels.ChannelServices.RegisterChannel(channel) RemotingConfiguration.RegisterWellKnownServiceType(SampleObject, "HelloWorld", WellKnownObjectMode.SingleCall) prompt("Press the enter key to exit...") nant-0.9.5~git20110729.r1.202a430/examples/sharpdevelop/000077500000000000000000000000001161462365500217535ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/sharpdevelop/pascalcase.boo000077500000000000000000000063421161462365500245630ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System import System.IO import System.Text.RegularExpressions import Useful.IO from Boo.Lang.Useful import ICSharpCode.SharpRefactory.Parser from "ICSharpCode.SharpRefactory" import ICSharpCode.SharpRefactory.Parser.AST import ICSharpCode.SharpRefactory.PrettyPrinter class PascalCasePrinter(PrettyPrintVisitor): def constructor(originalFileName as string): super(originalFileName) override def Visit(method as MethodDeclaration, data): method.Name = ToPascalCase(method.Name) return super(method, data) override def Visit(invocation as InvocationExpression, data): memberRef = invocation.TargetObject as FieldReferenceExpression if memberRef is not null: memberRef.FieldName = ToPascalCase(memberRef.FieldName) else: identifier = invocation.TargetObject as IdentifierExpression if identifier is not null: identifier.Identifier = ToPascalCase(identifier.Identifier) return super(invocation, data) def ToPascalCase(name as string): return name[:1].ToUpper() + name[1:] def preprocess(code as string): pp = PreProcessor() pp.Define("foo") return pp.Process(code) code = """ class YapFoo { #if foo // a comment public void bar() { } #endif public string Prop { get { return null; } } public void baz() { this.bar(); bar(); } #if bang void bang() { } #endif } """ p = Parser() p.Parse(Lexer(StringReader(preprocess(code)))) printer = PascalCasePrinter("code.cs") options = printer.PrettyPrintOptions options.MethodBraceStyle = BraceStyle.NextLine printer.Visit(p.compilationUnit, null) print printer.Text nant-0.9.5~git20110729.r1.202a430/examples/silverlight/000077500000000000000000000000001161462365500216135ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/silverlight/tetris/000077500000000000000000000000001161462365500231255ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/silverlight/tetris/AppManifest.xaml000066400000000000000000000006561161462365500262260ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/silverlight/tetris/Tetris.boo000066400000000000000000000223341161462365500251040ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion #Silverlight Tetris Example #Author: Vladimir Lazunin # #How to compile: #booc -nostdlib -target:library -lib:"C:\Program Files\Microsoft SDKs\Silverlight\v2.0\Reference Assemblies","C:\Program Files\Microsoft SDKs\Silverlight\v2.0\Libraries\Client" Tetris.boo namespace Tetris import System import System.Windows import System.Windows.Controls import System.Windows.Media import System.Windows.Shapes import System.Windows.Input import System.Windows.Threading ######## Silverlight stuff ######## class MyPage(UserControl): canvas = Canvas() textblock = TextBlock() textblock2 = TextBlock() gColors = [Colors.Black, Colors.Gray, Colors.Red, Colors.Green, Colors.Blue, Colors.Yellow, Colors.Orange, Colors.White, Colors.Magenta] leds as (Rectangle, 2) matr as (int, 2) tserv = TetServer() paused = false timer = DispatcherTimer() def constructor(): #a block of text where we print how many lines have you cleared textblock.FontSize = 24 textblock.Text = "Lines: 0" canvas.Children.Add(textblock) canvas.SetLeft(textblock, 100) canvas.SetTop(textblock, 500) #another one, where we print "pause" and "finish" textblock2.FontSize = 30 textblock2.Text = '' textblock2.Foreground = SolidColorBrush(Colors.Yellow) matr = tserv.frame() leds = matrix(Rectangle, len(matr, 0), len(matr, 1)) n_r = 20 for i in range(len(matr, 0)): n_c = 50 for j in range(len(matr, 1)): r = Rectangle() r.Width = 20 r.Height = 20 r.Fill = SolidColorBrush(gColors[matr[i, j]]) canvas.Children.Add(r) canvas.SetLeft(r, n_c) canvas.SetTop(r, n_r) leds[i, j] = r n_c += 20 n_r += 20 canvas.Children.Add(textblock2) canvas.SetLeft(textblock2, 100) canvas.SetTop(textblock2, 200) KeyDown += onKeyDown timer.Interval = TimeSpan(0, 0, 0, 0, 500) timer.Tick += onTimer timer.Start() self.Content = canvas #send e key kommand to the server, receive a matrix, #do the drawing def onKeyDown(sender as object, e as KeyEventArgs): if e.Key == Key.P: paused = not paused if paused: textblock2.Text = "...PAUSED..." else: textblock2.Text = "" if paused: return if e.Key == Key.Left: tserv.move_left() elif e.Key == Key.Right: tserv.move_right() elif e.Key == Key.Up: tserv.rotate() elif e.Key == Key.Down: tserv.fall(3) doDraw() def onTimer(sender as object, e as EventArgs): if paused: return tserv.fall_auto() doDraw() def doDraw(): matr = tserv.frame() for i in range(len(matr, 0)): for j in range(len(matr, 1)): leds[i, j].Fill = SolidColorBrush(gColors[matr[i, j]]) textblock.Text = "Lines: " + tserv.cleared_lines().ToString() if tserv.full(): canvas.KeyDown -= onKeyDown timer.Tick -= onTimer textblock2.Text = "...FINISH..." class MyApp(Application): mp = MyPage() def constructor(): Startup += onStartup def onStartup(sender, e): self.RootVisual = mp ######## Tetris stuff ######## def rotate_cc(m as (int, 2)): """ Rotates matrix 90 degrees counter-clockwise """ m_new as (int, 2) = matrix(int, len(m, 1), len(m, 0)) i_n = -1 for j in range(len(m, 1)-1, -1, -1): i_n += 1 j_n = -1 for i in range(len(m, 0)): j_n += 1 m_new[i_n, j_n] = m[i, j] return m_new def does_collide(m_parent as (int, 2), m_child as (int, 2), c_row as int, c_col as int): for i in range(len(m_child, 0)): for j in range(len(m_child, 1)): ni, nj = i+c_row, j+c_col if (0 <= ni) and (ni < len(m_parent, 0)) and (0 <= nj) and (nj < len(m_parent, 1)): if m_child[i, j] != 0 and m_parent[ni, nj] != 0: return true return false def matr_combine(m_parent as (int, 2), m_child as (int, 2), c_row as int, c_col as int): m_new = matr_copy(m_parent) for i in range(len(m_child, 0)): for j in range(len(m_child, 1)): if m_child[i, j] > 0: m_new[i+c_row, j+c_col] = m_child[i, j] return m_new def matr_copy(src as (int, 2)): dst = matrix(int, len(src, 0), len(src, 1)) for i in range(len(src, 0)): for j in range(len(src, 1)): dst[i, j] = src[i, j] return dst class Figure: public matr as (int, 2) public row as int public col as int glass as Glass public settled = false def constructor(m as (int, 2), g as Glass, r as int, c as int): self.glass = g //reference matr = matr_copy(m) row = r col = c def rotate(): matr_new = rotate_cc(matr) if not does_collide(glass.matr, matr_new, row, col): self.matr = matr_new def drop(): if settled: return if not does_collide(glass.matr, self.matr, row+1, col): row += 1 else: settled = true glass.settle(self) def move_left(): if not does_collide(glass.matr, self.matr, row, col-1): col -= 1 def move_right(): if not does_collide(glass.matr, self.matr, row, col+1): col += 1 class Glass: public matr as (int, 2) twall as int = 1 padding as int = 1 public full = false width as int height as int public cleared as int = 0 def constructor(height as int, width as int): self.width = width self.height = height matr = matrix(int, padding+height+twall+padding, padding+twall+width+twall+padding) for i in range(padding, padding+height): matr[i, padding] = 1 matr[i, padding + twall + width] = 1 //? for i in range(padding, padding+width+twall+1): matr[padding+height, i] = 1 def is_row_full(n_row): for i in range(padding+twall, padding+twall+width): if matr[n_row, i] == 0: return false return true def is_row_empty(n_row): for i in range(padding+twall, padding+twall+width): if matr[n_row, i] != 0: return false return true def destroy_row(n_row): for i in range(n_row, padding, -1): for j in range(padding+twall, padding+twall+width): matr[i, j] = matr[i-1, j] def compact(): for i in range(padding+height): if is_row_full(i): destroy_row(i) cleared += 1 if not(is_row_empty(padding)): full = true def settle(fig as Figure): matr = matr_combine(matr, fig.matr, fig.row, fig.col) compact() class TetServer: public glass as Glass public figure as Figure falling_speed = 0.5 falling_acc = 0.0 rnd = Random() #don't know how to declare the type of this list, so use this 'hack' #to make type inference do the job... fmatrices = [matrix(int, 3, 4)] def full(): return glass.full def constructor(): glass = Glass(20, 10) ###### creating matrices for the figures ###### setting elements is rather tedious - are there literals for 2d arrays? // I figure i = matrix(int, 3, 4) for j in range(4): i[1, j] = 2 fmatrices[0] = i // L figure i = matrix(int, 4, 5) i[1, 1] = 3 i[1, 2] = 3 i[1, 3] = 3 i[2, 1] = 3 fmatrices.Add(i) // J figure i = matrix(int, 4, 5) i[1, 1] = 4 i[1, 2] = 4 i[1, 3] = 4 i[2, 3] = 4 fmatrices.Add(i) // O figure i = matrix(int, 4, 4) i[1, 1] = 5 i[1, 2] = 5 i[2, 1] = 5 i[2, 2] = 5 fmatrices.Add(i) // S figure i = matrix(int, 4, 5) i[1, 2] = 6 i[1, 3] = 6 i[2, 1] = 6 i[2, 2] = 6 fmatrices.Add(i) // T figure i = matrix(int, 5, 5) i[1, 2] = 7 i[2, 1] = 7 i[2, 2] = 7 i[2, 3] = 7 fmatrices.Add(i) // Z figure i = matrix(int, 4, 5) i[1, 1] = 8 i[1, 2] = 8 i[2, 2] = 8 i[2, 3] = 8 fmatrices.Add(i) next_figure() def cleared_lines(): return glass.cleared def frame(): return matr_combine(glass.matr, figure.matr, figure.row, figure.col) def next_figure(): i = rnd.Next(len(fmatrices)) figure = Figure(fmatrices[i], glass, 0, 5) def fall_auto(): if figure.settled: next_figure() falling_acc += falling_speed if falling_acc >= 1.0: falling_acc = 0.0 figure.drop() def fall(amount as int): for i in range(amount): figure.drop() def move_left(): figure.move_left() def move_right(): figure.move_right() def rotate(): figure.rotate() nant-0.9.5~git20110729.r1.202a430/examples/svnutils/000077500000000000000000000000001161462365500211465ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/svnutils/default.build000077500000000000000000000023011161462365500236120ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/examples/svnutils/src/000077500000000000000000000000001161462365500217355ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/examples/svnutils/src/svn.boo000077500000000000000000000057511161462365500232570ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ svn utility functions """ import System import System.IO class ResourceStatus: static def parse(line as string): parts = /\s+/.Split(line.Trim(), 2) return ResourceStatus(code: parts[0], resource: parts[1]) public code as string public resource as string override def ToString(): return "${code}\t${resource}" def svn_status(resource as string): return parse_status(shell("svn", "status ${resource}")) def parse_status(status as string): for line in lines(status): yield ResourceStatus.parse(line) def svn_pg(resource as string, propertyName as string): return shell("svn", "pg ${propertyName} ${resource}") def svn_ps(resource as string, propertyName as string, propertyValue as string): tempFile = ".svn_ignore" File.WriteAllText(tempFile, propertyValue) try: return shell("svn", "ps ${propertyName} --file \"${tempFile}\" ${resource}") ensure: File.Delete(tempFile) def svn_locals(resource as string): return ( status.resource for status in svn_status(resource) if status.code == "?") def svn_ignore(resource as string, whatToIgnore as string): current = svn_pg(resource, "svn:ignore") print svn_ps(resource, "svn:ignore", current.Trim() + "\n" + whatToIgnore) def lines(s as string): return line.Trim() for line in /(\r?\n)+/.Split(s) if len(line.Trim()) def confirm(message as string): return "y" == prompt("${message} (y/n): ") nant-0.9.5~git20110729.r1.202a430/examples/svnutils/src/svn_changed.boo000077500000000000000000000034271161462365500247260ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Print all modified resources in the current repository. """ for status in svn_status("."): print status.resource if status.code in ("M", "A", "C", "D") nant-0.9.5~git20110729.r1.202a430/examples/svnutils/src/svn_ignore.boo000077500000000000000000000033341161462365500246150ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Adds a line to svn:ignore. """ resource, whatToIgnore = argv svn_ignore(resource, whatToIgnore) nant-0.9.5~git20110729.r1.202a430/examples/svnutils/src/svn_ignore_all.boo000077500000000000000000000036761161462365500254560ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Iterate through every non svn controlled resource asking if it should be added to svn:ignore. """ import System.IO for resource in svn_locals("."): path = Path.GetDirectoryName(resource).Replace("\\", "/") what = Path.GetFileName(resource) if confirm("ignore '${what}' in path '${path}'?"): svn_ignore(path, what) nant-0.9.5~git20110729.r1.202a430/examples/svnutils/src/svn_locals.boo000077500000000000000000000033301161462365500246030ustar00rootroot00000000000000#region license // Copyright (c) 2003, 2004, 2005 Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion """ Print all non svn controlled resources. """ for resource in svn_locals("."): print resource nant-0.9.5~git20110729.r1.202a430/extras/000077500000000000000000000000001161462365500167475ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/BooTemplate/000077500000000000000000000000001161462365500211625ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/BooTemplate/default.build000077500000000000000000000035001161462365500236300ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/DEBIAN/000077500000000000000000000000001161462365500176715ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/DEBIAN/control000066400000000000000000000014761161462365500213040ustar00rootroot00000000000000Package: boo Priority: extra Section: devel Installed-Size: 3800 Maintainer: Cedric Vivier Architecture: all Version: @VERSION@-1 Depends: mono-runtime,libmono-system2.0-cil,libmono-corlib2.0-cil,libmono-peapi2.0-cil,libmono2.0-cil Suggests: monodevelop-boo Description: Boo Programming Language Boo is an object-oriented statically-typed programming language for the Common Language Infrastructure (CLI) with a Python inspired syntax and a special focus on language and compiler extensibility. This package includes the Boo libraries as well as a compiler (booc), an interpreter (booi) and an interactive shell (booish). The boo compiler and the programs it produces are 100% CIL and can be run on any compliant CLI virtual machine (MS.NET, Mono, ...). For more information: http://boo.codehaus.org/ nant-0.9.5~git20110729.r1.202a430/extras/DEBIAN/postinst000077500000000000000000000025121161462365500215020ustar00rootroot00000000000000#!/bin/sh # postinst script for boo # # see: dh_installdeb(1) set -e # summary of how this script can be called: # * `configure' # * `abort-upgrade' # * `abort-remove' `in-favour' # # * `abort-remove' # * `abort-deconfigure' `in-favour' # `removing' # # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package if [ -x /usr/bin/cli-gacutil ]; then GACUTIL="/usr/bin/cli-gacutil" else GACUTIL="/usr/bin/gacutil" fi case "$1" in configure) $GACUTIL -i /usr/lib/boo/Boo.Lang.dll $GACUTIL -i /usr/lib/boo/Boo.Lang.CodeDom.dll $GACUTIL -i /usr/lib/boo/Boo.Lang.Compiler.dll $GACUTIL -i /usr/lib/boo/Boo.Lang.Interpreter.dll $GACUTIL -i /usr/lib/boo/Boo.Lang.Parser.dll $GACUTIL -i /usr/lib/boo/Boo.Lang.Useful.dll $GACUTIL -i /usr/lib/boo/Boo.Lang.Extensions.dll $GACUTIL -i /usr/lib/boo/Boo.Lang.PatternMatching.dll ;; abort-upgrade|abort-remove|abort-deconfigure) ;; *) echo "postinst called with unknown argument \`$1'" >&2 exit 1 ;; esac exit 0 nant-0.9.5~git20110729.r1.202a430/extras/DEBIAN/prerm000077500000000000000000000023431161462365500207460ustar00rootroot00000000000000#!/bin/sh # prerm script for boo # # see: dh_installdeb(1) #set -e #we want to ignore gacutil errors if user manually removed the libs # summary of how this script can be called: # * `remove' # * `upgrade' # * `failed-upgrade' # * `remove' `in-favour' # * `deconfigure' `in-favour' # `removing' # # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package if [ -x /usr/bin/cli-gacutil ]; then GACUTIL="/usr/bin/cli-gacutil" else GACUTIL="/usr/bin/gacutil" fi case "$1" in remove|deconfigure) env $GACUTIL -u Boo.Lang env $GACUTIL -u Boo.Lang.CodeDom env $GACUTIL -u Boo.Lang.Compiler env $GACUTIL -u Boo.Lang.Interpreter env $GACUTIL -u Boo.Lang.Parser env $GACUTIL -u Boo.Lang.Useful env $GACUTIL -u Boo.Lang.Extensions env $GACUTIL -u Boo.Lang.PatternMatching ;; upgrade) ;; failed-upgrade) ;; *) echo "prerm called with unknown argument \`$1'" >&2 exit 1 ;; esac exit 0 nant-0.9.5~git20110729.r1.202a430/extras/Gendarme.Rules.Abstract/000077500000000000000000000000001161462365500233245ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/Gendarme.Rules.Abstract/DependenciesMacro.boo000066400000000000000000000110751161462365500274010ustar00rootroot00000000000000// // Gendarme.Rules.Abstract.DependenciesMacro DSL for DependencyCheckingRule // // Authors: // Cedric Vivier // // Copyright (C) 2009 Cedric Vivier // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. namespace Gendarme.Rules.Abstract import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.TypeSystem import Boo.Lang.Compiler.TypeSystem.Services import Boo.Lang.PatternMatching macro dependencies(name as ReferenceExpression): """ This macro/DSL generates a Gendarme rule that checks if there is disallowed internal dependencies in a project. Example below denies using any BCL type from within type/namespace Foo: dependencies NameOfTheDependencySet: within Foo: deny System Example below denies using classes from namespace Bar within namespace Foo (and its inner namespaces), this means usage of Bar interfaces is still allowed: dependencies NameOfTheDependencySet: within Foo: deny Bar, Class Example below denies using interfaces of Bar namespace within visible members of type/namespace Foo: dependencies NameOfTheDependencySet: within Foo: deny Bar, Interface|Visible """ raise dependencies.Documentation if not len(dependencies.Body.Statements) macro within(reference): """ Macro `within' must contain at least one `deny' or `allow' macro. """ raise within.Documentation if not len(within.Body.Statements) macro deny(reference, options as ReferenceExpression*): yield [| yield $(BuildDependencyPermission(NameResolutionService, reference, true, options)) |] macro allow(reference, options as ReferenceExpression*): yield [| yield $(BuildDependencyPermission(NameResolutionService, reference, false, options)) |] nref = GetNormalizedReference(reference) entity = NameResolutionService.ResolveQualifiedName(nref, EntityType.Namespace|EntityType.Type) if not entity or entity.EntityType != EntityType.Namespace: matcher = [| DoesTargetMatchNamespace(CurrentType, typeof($nref).FullName) |] #TODO: MatchType else: #namespace matcher = [| DoesTargetMatchNamespace(CurrentType.FullName, $(nref)) |] yield [| if $matcher: $(within.Body) |] yield [| public class $(name + "DependencyCheckingRule") (DependencyCheckingRule): protected Permissions as DependencyPermission*: override get: $(dependencies.Body) |] internal def GetNormalizedReference(reference as Expression): nref = reference.ToCodeString() //nref = nref.Substring(0, len(nref)-2) if nref.EndsWith('_') return nref internal def BuildDependencyPermission(nre as NameResolutionService, reference as ReferenceExpression, deny as bool, options as ReferenceExpression*): nref = GetNormalizedReference(reference) entity = nre.ResolveQualifiedName(nref, EntityType.Namespace|EntityType.Type) perm = [| DependencyPermission(self) |] if not entity or entity.EntityType != EntityType.Namespace: perm.NamedArguments.Add([| Namespace: typeof($nref).FullName |]) #TODO: Type: else: #namespace perm.NamedArguments.Add([| Namespace: $nref |]) if not deny: perm.NamedArguments.Add([| Deny: false |]) for option in options: if option.Name == "Class": perm.NamedArguments.Add([| Relation: DependencyRelation.Static |]) elif option.Name == "Interface": perm.NamedArguments.Add([| Relation: DependencyRelation.Dynamic |]) elif option.Name == "Visible": perm.NamedArguments.Add([| Visibility: DependencyVisibility.Visible |]) elif option.Name == "NonVisible": perm.NamedArguments.Add([| Visibility: DependencyVisibility.NonVisible |]) else: raise "Invalid option: `${option.Name}'" return perm nant-0.9.5~git20110729.r1.202a430/extras/Gendarme.Rules.Abstract/DependencyCheckingRule.boo000066400000000000000000000210341161462365500303670ustar00rootroot00000000000000// // Gendarme.Rules.Abstract.DependencyCheckingRule // // Authors: // Cedric Vivier // // Copyright (C) 2009 Cedric Vivier // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. namespace Gendarme.Rules.Abstract import System import Mono.Cecil import Mono.Cecil.Cil import Gendarme.Framework import Gendarme.Framework.Rocks import Gendarme.Framework.Engines import Gendarme.Framework.Helpers [Problem("This type member depends from a type that it is not allowed to depend from.")] [Solution("Refactor method or type so that this dependency is removed.")] [EngineDependency(typeof(OpCodeEngine))] abstract public class DependencyCheckingRule (Rule, ITypeRule): """ This rule checks if a type contains members that are depending on types they are, by design, not allowed to depend from. This rules is abstract and is intended to be inherited by a project-specific rule. Bad example (with static/class dependency to namespace Foo not allowed): public class Doer { public void Do (Foo.Class klass) { } } Good example (with static/class dependency to namespace Foo not allowed): public class Doer { public void Do (Foo.IClass klass) //use interface IClass instead of Class { } } """ protected Permissions as DependencyPermission*: """ Permissions get method is the *only* method inheritors are required to implement. Implementors should use CurrentType or CurrentMember property to return context-dependent permissions. """ abstract get: pass protected HasPermissions as bool: virtual get: return false if not Permissions return Permissions.GetEnumerator().MoveNext() protected virtual def ReportDefect(type as TypeReference, culprit as object) as void: if CurrentMember isa IMethodSignature: msg = "Method signature `${CurrentMember}' is not allowed to reference type `${type}'." elif CurrentMember isa FieldDefinition: msg = "Field `${CurrentMember}` of type ${CurrentMethod}' is not allowed to reference type `${type}'." elif culprit and culprit isa VariableDefinition: msg = "Variable `${culprit}' of method `${CurrentMember}' is not allowed to reference type `${type}'." elif culprit and culprit isa Instruction: msg = "Method `${CurrentMember}' is not allowed to call type `${type}'." else: msg = "Type `${CurrentMember}' is not allowed to reference type `${type}'." Runner.Report(Defect(self, CurrentType, culprit or CurrentMember, Severity.High, Confidence.Total, msg)); protected virtual def NeedsChecking(type as TypeDefinition) as bool: return false if type.IsEnum or TypeRocks.IsGeneratedCode(type) #FIXME:resolution bug!? return HasPermissions protected enum DependencyRelation: Any Static //static dependency (class/struct) Dynamic //dynamic dependency (interface) protected enum DependencyVisibility: Any Visible NonVisible protected class DependencyPermission: """ You can inherit this class if you need different or additional behavior. """ [property(Namespace)] #namespace in a general sense (a type is also a namespace) _namespace as string Allow: get: return not _deny set: _deny = false [property(Deny)] #default permission is Deny _deny as bool = true [property(Relation)] _rel as DependencyRelation [property(Visibility)] _vis as DependencyVisibility _rule as DependencyCheckingRule public def constructor(rule as DependencyCheckingRule): _rule = rule public virtual def Match(target as TypeReference, member as IMemberReference) as bool: return false if not DoesTargetMatchNamespace(target.FullName, _namespace) t = target.Resolve() return false if not t //resolution failed return false if _rel == DependencyRelation.Dynamic and not IsDynamic(t) return false if _rel == DependencyRelation.Static and IsDynamic(t) return false if _vis == DependencyVisibility.Visible and not IsVisible(member) return false if _vis == DependencyVisibility.NonVisible and IsVisible(member) return true protected virtual def IsDynamic(type as TypeDefinition) as bool: return type.IsInterface protected virtual def IsVisible(member as IMemberReference) as bool: type = member as TypeReference if type: return TypeRocks.IsVisible(type) #FIXME: NRE FAIL!? elif (method = member as MethodReference): return MethodRocks.IsVisible(method) elif (field = member as FieldReference): return FieldRocks.IsVisible(field) return false #variable or call protected virtual def DoesTargetMatchNamespace(target as string, ns as string) as bool: return _rule.DoesTargetMatchNamespace(target, ns) protected virtual def DoesTargetMatchNamespace(target as string, ns as string) as bool: return target.StartsWith(ns) [property(CurrentMember, Protected: true)] _member as IMemberReference _type as TypeDefinition protected CurrentType as TypeDefinition: get: return _type private set: _member = value _type = value _method as MethodDefinition protected CurrentMethod as MethodDefinition: get: return _method private set: _member = value _method = value public def CheckType(type as TypeDefinition) as RuleResult: CurrentType = type if not NeedsChecking(type): return RuleResult.DoesNotApply CheckBaseTypes() CheckFields() if type.HasFields CheckMethods() if type.HasMethods return Runner.CurrentRuleResult def CheckPermissionsChain(type as TypeReference) as bool: return CheckPermissionsChain[of IMetadataTokenProvider](type, null) def CheckPermissionsChain[of T(class)](type as TypeReference, culprit as T) as bool: return if not type for perm in Permissions: if perm.Match(type, CurrentMember): if perm.Deny: ReportDefect(type, culprit) return false break return true def CheckBaseTypes(): for iface as TypeReference in _type.Interfaces: CheckPermissionsChain(iface) CheckPermissionsChain(_type.BaseType) def CheckFields(): for field as FieldDefinition in _type.Fields: CurrentMember = field CheckPermissionsChain(field.FieldType) def CheckMethods(): for method as MethodDefinition in _type.Methods: CheckMethod(method) def CheckMethod(method as MethodDefinition): CurrentMethod = method if CheckMethodSignature() and method.HasBody: #only run Variables checks if method not FAIL already if CheckMethodVariables(method.Body): #only run IL-level checks if method not FAIL already CheckMethodBody(method.Body) def CheckMethodSignature() as bool: valid = true valid &= CheckPermissionsChain(_method.ReturnType.ReturnType, _method.ReturnType) for p as ParameterDefinition in _method.Parameters: valid &= CheckPermissionsChain(p.ParameterType, p) return valid def CheckMethodVariables(body as MethodBody) as bool: return if not body.HasVariables valid = true for variable as VariableReference in body.Variables: valid &= CheckPermissionsChain(variable.VariableType, variable) return valid def CheckMethodBody(body as MethodBody): #TODO: FIXME: option to ignore body? return if not callsAndNewobjOpCodeBitmask.Intersect(OpCodeEngine.GetBitmask(CurrentMethod)) valid = true for ins as Instruction in body.Instructions: continue if not callsAndNewobjOpCodeBitmask.Get(ins.OpCode.Code) valid &= CheckPermissionsChain(cast(MethodReference, ins.Operand).DeclaringType, ins) return valid static final callsAndNewobjOpCodeBitmask = OpCodeBitmask(0x8000000000, 0x4400000000000, 0x0, 0x0) """ OpCodeBitmask mask = new OpCodeBitmask (); mask.UnionWith (OpCodeBitmask.Calls); mask.Set (Code.Newobj); return mask; """ nant-0.9.5~git20110729.r1.202a430/extras/Gendarme.Rules.Boo/000077500000000000000000000000001161462365500223005ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/Gendarme.Rules.Boo/BooDependenciesCheckingRule.boo000066400000000000000000000005671161462365500303230ustar00rootroot00000000000000""" DSL-representation of disallowed dependencies within Boo project. Checking is done by running permissions chains in order (iptables-like). """ namespace Gendarme.Rules.Boo import Gendarme.Rules.Abstract dependencies Boo: within Boo.Lang.Compiler.Ast: allow Boo.Lang.Compiler.TypeSystem, Interface, NonVisible deny Boo.Lang.Compiler.TypeSystem #deny anything else nant-0.9.5~git20110729.r1.202a430/extras/Makefile.am000066400000000000000000000006701161462365500210060ustar00rootroot00000000000000pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = boo.pc gtksourceviewdir = $(GTKSOURCEVIEW_PREFIX)/share/gtksourceview-1.0/language-specs/ gtksourceview_DATA = boo.lang mimepackagesdir = $(MIME_PREFIX)/share/mime/packages/ mimepackages_DATA = boo-mime-info.xml bin_SCRIPTS = booc booi booish DISTCLEANFILES = booc booi booish boo.pc EXTRA_DIST = $(pkgconfig_DATA) $(gtksourceview_DATA) $(mimeinfo_DATA) \ $(mimepackages_DATA) nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/000077500000000000000000000000001161462365500213435ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/000077500000000000000000000000001161462365500233555ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/Boo.Empty.xft000066400000000000000000000007241161462365500257170ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/Boo.EmptyClass.xft000066400000000000000000000022231161462365500267010ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/Boo.Form.xft000066400000000000000000000027471161462365500255330ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/Boo.xshd000066400000000000000000000214201161462365500247630ustar00rootroot00000000000000 &<>~!@$%^*()-+=|\#/{}[]:;"' , .? """ """ # // /* */ " " """ """ ' ' @@/ / /@! @@!/@ / ( &<>~!@%^*()-+=|\#/{}[]:;"' , .? /* */ &<>~!@%^*()-+=|\#/{}[]:;"' , .? /* */ &<>~!@%^*()-+=|\#/{}[]:;"' , .? ${ } nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/BooBinding.addin000066400000000000000000000062451161462365500263770ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/BooBinding.cmbx000066400000000000000000000011121161462365500262350ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/ConsoleProject.xpt000066400000000000000000000016231161462365500270450ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/FormsProject.xpt000066400000000000000000000026051161462365500265320ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/Library.xpt000066400000000000000000000020341161462365500255150ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/default.build000066400000000000000000000111431161462365500260220ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/icons/000077500000000000000000000000001161462365500244705ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/icons/BitmapResources.txtres000066400000000000000000000004521161462365500310530ustar00rootroot00000000000000Boo.File.EmptyFile = Boo.File.EmptyFile.png Boo.File.Form = Boo.File.Form.png Boo.FileIcon = Boo.FileIcon.png Boo.Project.EmptyProject = Boo.Project.EmptyProject.png Boo.Project.Form = Boo.Project.Form.png Boo.ProjectIcon = Boo.ProjectIcon.png nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/icons/Boo.File.EmptyFile.png000066400000000000000000000031501161462365500304670ustar00rootroot00000000000000PNG  IHDR szzbKGD pHYsnn [tIME "۝IDATXý[lUg֪RCp@-BS᥀& x_D x|0F1EKjIe!la̞]L}QUΜ9X,}@WW[ly ٶmXl6 cc0 U>UQU$+( sNRԆz{{2\TUb1b |\vsGBOdeS[yvq"D' ǏgڵcGΝ;78<<3\U` 4+vӑ(KmG_}}/LVOUUd`f "?rEloA *>DI477S__O*5H__?==$Ib!96,9@ߊ$oܵqX?g=lU{W.T kDI7o---^%KSSS뿘h"ib~:ӝ$ZxZqe-̍n`4?Je`8J 1K@J}ga13&`ӉM4TMg^͝xx;n=ߜ=`CASՏ+'I# 2QT6N3;F7cHHYWN*MGn~t$2Q6z9Pf,S52]1~y"ס j)Be4@}dA’Շ?rEN-_H?,g˻ {W/yEx=ɴ[ȬI0Pq>׹\>,C H} ;fj1}ƭշ' fE+dA\ И YQ6~v9 &Dy(n)=aE.~v"6tgw Se((ׯ; uFگ/4xe5aa_r7͂RԂyƳ7j)+ nR%,? V`@D­ԋ[{[4rπg|J\HJɏْ7H9Ci/Ǜ?s6m6IENDB`nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/icons/Boo.File.Form.png000066400000000000000000000032401161462365500274740ustar00rootroot00000000000000PNG  IHDR szzbKGD pHYsnn [tIME $qy-IDATXý]pT6I,8 .03L$D /38^TX ިw:L~ a(P>lCNBnt?ߧcfNs}9#J0ԥK_zb/{*@r%"" ,j \PxsXCwBGFF@   "rY l3?%/nŕ/n5JyۋeׯfffHsUP(LG然~śɢqc#WU$ lݺ]V$yq-7s|-sԉPUl~†T)R)UUUo N[X췑H22le]:w?`P.^H__LBpm,X` c?칥cڱƆ90mmm4662<>WU2rOUy|I.;}</wx[ދZ[I$&mBb$i+: "dT.M~s^&K:3BOU/;:f ;ىZ2eiDH24k ?ټzkZb,#nEsm̑t:[履|Vqcó?Ҽc玊Xu*m`Ly wc}=,ohܚƭ2F'#k=TGѼ3bgyMS)_\T#~ن ЅuO)uuLM%r<ֹe7{DF]`h*i(gA]]-M>\PCg&:;sݰz*::6 97c\UKR0E/4>kxv4"jF~5,ܯfU2+bK,g^eU"ۍR^S "J8lۗbcc6ƫwƔDj[ӞD0HFS@M4WF˫ \s\ _QOIENDB`nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/icons/Boo.FileIcon.png000066400000000000000000000012571161462365500274110ustar00rootroot00000000000000PNG  IHDRabKGD pHYsss"tIME , ZO/{~JrD5tHkЛDD@lkƣkh[5*;+GP<u|xAY @܌SWCX7ZB<3w-E!r| p,&][*>oC ?+ݕZ2Mx ^Ç r`nPq\(nTLqct?TYbT q78*0- +l)lK<ʝ^bx&ZcIL.4l2΅w x#c`9|V'3@[[4;&%P[e'2~I?B@D#GEKs51 bdvle*vn 7+bd.d;z#ō13A}  ,,kOb;%~? 57&XIENDB`nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/icons/Boo.Project.EmptyProject.png000066400000000000000000000021511161462365500317450ustar00rootroot00000000000000PNG  IHDR szzbKGD pHYsnnhtIME  C IDATXOlTUo:%eSjm A:D#I@Mj!e&$+ Ic]VqIա$Ct2u:sq޼yә xܗ{sše53Y]:^ʿz3#79"*:+ lYky%pW6}((ˁs 8f8U`)"j R7T"7RQQ]V 惫'qy>S7 ^OUJ 5ԘNAx/t?7qUELyv4~v' 1o.b>"6[`ESp]!ett. ~'g$ũpR FHi3/N|݂L ^q,z 9\6Ñxf<7 Q03 hm8~0|g6u+뙓4s,lIe~x/#iD!zrY'39s̱*oNP(EWWfoELHU$3I*շ>j&~/ΥK͎jZ;Nn憚xaJ ڣҗ:۩..޺@d HA**P~+N1EE3;[5652!e1Θok#}&RD䵓Ē1\ΔwL37^Es;[ =/׽kB5ٕn3pxiLD9| Uc=~/["Ltr4W5s"qѿFt gJĄQx`/]%;e~̷/L&W} [/*©dk#)e1 {GN8ZrXf߸T2IENDB`nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/icons/Boo.Project.Form.png000066400000000000000000000021561161462365500302300ustar00rootroot00000000000000PNG  IHDR szzbKGD pHYsnnhtIME #IDATXQh[e7F4ղ3v \fѡsnc(+RK0U EºFغ4m6t1&iҦm2q}ǽs|g*jIZ()[%e^UI ;E`b=K 拏öD"%0OP``\fO%`*eJb*DɚWĈ31bu5n,sA$uc׏q{vZV-<K+5u4{>rsܿS?;VFtdw,f^ 8צ}zE@갼ڢ,)TWV8ڷJ[wlnڌ9HE&+;>\t#f^pjP#T"G7|k|\ġC}Fa7ȨTL"d*)'UED6 vba5L %<lC#?mPݦ>ީ_O(ܮ1zC[t g:x[1# `x{f_ag8 4[]KHtP/ U llVbKosE\|xlDh4h2Fv=AbTcP"{LD͝<ĉIl)޽ڢ[sj@3|VUԘETQ6s-k&1j&Iㅼt;r&`0_6w4?4jً n[6nJGi>f>R$њ'jI tOw;!6@LtTr=g$E``Ũb_xqNORYT׾_}}x\NTEe jĠJ0R3UaL*"XhuuuwDx2Ԍ-Xjc>;9I*\|r0#˶ `*H]pJkIENDB`nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/icons/Boo.ProjectIcon.png000066400000000000000000000011741161462365500301360ustar00rootroot00000000000000PNG  IHDRh6 pHYs  tIME P(IDATxڍKa}|{ b%u]"V!a!RSϝ:th:I]\`+.ホvx[qYa 3# !UKjūBd3jSS taiRHd T_X "l'zջcȳ/f/sO*'VaznH' ?;,;*Hns[|s$ n׎5 ̈́$Y-ron1f5X CP_="Uj+tpHG Iu UK}I HH 1od{r0 9ӜVЦF+љ36- X ?nS}P~.*G"6Ž^8` \d" kqܓibOzc_͓B^T_*1{a=I"aD,J Մ5joZf _رIENDB`nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/icons/ResAsm.cs000066400000000000000000000246131161462365500262170ustar00rootroot00000000000000// ResAsm.cs // Copyright (c) 2001 Mike Krueger // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA using System; using System.Collections; using System.Drawing; using System.Resources; using System.IO; using System.Text; using System.Drawing.Imaging; using System.Windows.Forms; using System.Runtime.Serialization.Formatters.Binary; /// /// This tool is written for SharpDevelop to have a runtime independend /// format for Resource Files. Microsoft did break the resx and resource /// file format during the beta, this tool helped a lot to translate the /// resource files from version to version. /// Could be used in any .NET project to provide a runtime independend /// format (useful for translating resources between mono and ms .net) /// public class ResAsm { /// /// Builds ResAsm files out of resource files /// static void Disassemble(string pattern) { string[] files = Directory.GetFiles(Directory.GetCurrentDirectory(), pattern); foreach (string file in files) { Hashtable resources = new Hashtable(); int length = 0; // read resource files into the hashtable switch (Path.GetExtension(file).ToUpper()) { case ".RESX": ResXResourceReader rx = new ResXResourceReader(file); IDictionaryEnumerator n = rx.GetEnumerator(); while (n.MoveNext()) if (!resources.ContainsKey(n.Key)) { length = Math.Max(length, n.Key.ToString().Length); resources.Add(n.Key, n.Value); } rx.Close(); break; case ".RESOURCES": ResourceReader rr = new ResourceReader(file); foreach (DictionaryEntry entry in rr) { if (!resources.ContainsKey(entry.Key)) { length = Math.Max(length, entry.Key.ToString().Length); resources.Add(entry.Key, entry.Value); } } rr.Close(); break; } // write the hashtable to the resource file string fname = Path.GetFileNameWithoutExtension(file); string path = fname + "-data"; StreamWriter writer = File.CreateText(fname + ".res"); writer.Write("# this file was automatically generated by ResAsm\r\n\r\n"); foreach (DictionaryEntry entry in resources) { // strings are put directly into the resasm format if (entry.Value is string) { writer.Write(entry.Key.ToString() + "=\"" + ConvertIllegalChars(entry.Value.ToString()) + "\"\r\n"); } else { // all other files are referenced as a file and the filename // is saved in the resasm format, the files need to be generated. string extension = ""; string outputname = path + '\\' + entry.Key.ToString(); if (entry.Value is Icon) { extension = ".ico"; if (!Directory.Exists(path)) Directory.CreateDirectory(path); ((Icon)entry.Value).Save(File.Create(outputname + extension)); } else if (entry.Value is Image) { // all bitmaps are saved in the png format extension = ".png"; if (!Directory.Exists(path)) Directory.CreateDirectory(path); ((Image)entry.Value).Save(outputname + extension, ImageFormat.Png); } else { Console.WriteLine("can't save " + entry.Key + " unknown format."); continue; } writer.Write(entry.Key.ToString().PadRight(length) + " = " + outputname + extension + "\r\n"); } } writer.Close(); } } static string ConvertIllegalChars(string str) { StringBuilder newString = new StringBuilder(); for (int i = 0; i < str.Length; ++i) { switch (str[i]) { case '\r': break; case '\n': newString.Append("\\n"); break; case '"': newString.Append("\\\""); break; case '\\': newString.Append("\\\\"); break; default: newString.Append(str[i]); break; } } return newString.ToString(); } /// /// Builds resource files out of the ResAsm format /// static int Assemble(string pattern) { string[] files = Directory.GetFiles(Directory.GetCurrentDirectory(), pattern); if (files.Length == 0) { Console.WriteLine("'" + pattern + "' not found."); return 1; } int linenr = 0; foreach (string file in files) { try { StreamReader reader = new StreamReader(file, new UTF8Encoding()); string resfilename = Path.GetFileNameWithoutExtension(file) + ".resources"; ResourceWriter rw = new ResourceWriter(resfilename); linenr = 0; while (true) { string line = reader.ReadLine(); linenr++; if (line == null) { break; } line = line.Trim(); // skip empty or comment lines if (line.Length == 0 || line[0] == '#') { continue; } // search for a = char int idx = line.IndexOf('='); if (idx < 0) { Console.WriteLine("error in file " + file + " at line " + linenr); return 1; } string key = line.Substring(0, idx).Trim(); string val = line.Substring(idx + 1).Trim(); object entryval = null; if (val[0] == '"') { // case 1 : string value val = val.Trim(new char[] {'"'}); StringBuilder tmp = new StringBuilder(); for (int i = 0; i < val.Length; ++i) { switch (val[i]) { // handle the \ char case '\\': ++i; if (i < val.Length) switch (val[i]) { case '\\': tmp.Append('\\'); break; case 'n': tmp.Append('\n'); break; case '\"': tmp.Append('\"'); break; } break; default: tmp.Append(val[i]); break; } } entryval = tmp.ToString(); } else { // case 2 : no string value -> load resource entryval = LoadResource(val); } rw.AddResource(key, entryval); } rw.Generate(); rw.Close(); reader.Close(); } catch (Exception e) { Console.WriteLine("Error in line " + linenr); Console.WriteLine("Error while processing " + file + " :"); Console.WriteLine(e.ToString()); return 1; } } return 0; } /// /// Loads a file. /// /// /// An object representation of the file (for a bitmap a Bitmap, /// for a Icon an Icon and so on), the fall back is a byte array /// static object LoadResource(string name) { switch (Path.GetExtension(name).ToUpper()) { case ".CUR": return new Cursor(name); case ".ICO": return new Icon(name); default: // try to read a bitmap try { return new Bitmap(name); } catch {} // try to read a serialized object try { Stream r = File.Open(name, FileMode.Open); try { BinaryFormatter c = new BinaryFormatter(); object o = c.Deserialize(r); r.Close(); return o; } catch { r.Close(); } } catch { } // finally try to read a byte array try { FileStream s = new FileStream(name, FileMode.Open); BinaryReader r = new BinaryReader(s); Byte[] d = new Byte[(int) s.Length]; d = r.ReadBytes((int) s.Length); s.Close(); return d; } catch (Exception e) { MessageBox.Show(e.Message, "Can't load resource", MessageBoxButtons.OK); } break; } return null; } /// /// Prints help about the ResAsm format. /// static void ShowInfo() { Console.WriteLine("This tool converts .resource or .resx files to an own format, it helps to"); Console.WriteLine("port these files to a new .NET version."); Console.WriteLine("It can also be used as a resource assembler.\n"); Console.WriteLine("INPUT FORMAT OF THE TEXT FILE :\n"); Console.WriteLine(" #"); Console.WriteLine(" =\n"); Console.WriteLine(" can be :"); Console.WriteLine(" = \"\""); Console.WriteLine(" OR"); Console.WriteLine(" = \n"); Console.WriteLine("The file can be : A Bitmap, Icon, Cursor or serialized object, otherwise"); Console.WriteLine("the file will be interpreted as an byte array"); } /// /// Prints help about the command line arguments. /// static void ShowHelp() { Console.WriteLine(".NET Resource Assembler Version 0.1"); Console.WriteLine("Copyright (C) Mike Krueger 2001. Released under GPL.\n"); Console.WriteLine(" Resource Assembler Options\n"); Console.WriteLine(" - INPUT FILES -"); Console.WriteLine("/d: Disassembles the .resource or .resx files and generates"); Console.WriteLine(" .res files."); Console.WriteLine(" - HELP -"); Console.WriteLine("/? Displays this help."); Console.WriteLine("/INFO Gives additional information about ResAsm."); } /// /// The main function. /// public static int Main(string[] args) { if (args.Length == 0) { ShowHelp(); } // try to find help or info params foreach (string param in args) { string par = param.ToUpper(); if (par == "/INFO") { ShowInfo(); return 0; } if (par == "/?" || par == "/H" || par== "-?" || par == "-H" || par == "?") { ShowHelp(); return 0; } } // no help or info param found, goto the action foreach (string param in args) { if (param.StartsWith("/d:")) { Disassemble(param.Substring(3)); } else { int res = Assemble(param); if (res != 0) return res; } } return 0; } } nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/000077500000000000000000000000001161462365500241445ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/AssemblyInfo.boo000066400000000000000000000024771161462365500272520ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.Reflection import System.Runtime.CompilerServices [assembly: AssemblyTitle("BooBinding")] [assembly: AssemblyDescription("Boo language binding for #develop")] [assembly: AssemblyCompany("www.danielgrunwald.de")] [assembly: AssemblyProduct("SharpDevelop")] [assembly: AssemblyCopyright("(c) 2004 Daniel Grunwald")] [assembly: AssemblyVersion("2.0.9.5")] [assembly: AssemblyDelaySign(false)] [assembly: AssemblyKeyFile("")] nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/BooAmbience.boo000066400000000000000000000314311161462365500270120ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.Collections import System.Text import SharpDevelop.Internal.Parser import ICSharpCode.SharpDevelop.Services import ICSharpCode.Core.Properties import ICSharpCode.Core.Services class BooAmbience(AbstractAmbience): [Getter(TypeConversionTable)] static _typeConversionTable = { 'System.Void' : 'void', 'System.Object' : 'object', 'System.Boolean' : 'bool', 'System.Byte' : 'byte', 'System.SByte' : 'sbyte', 'System.Char' : 'char', //'System.Enum' : 'enum', 'System.Int16' : 'short', 'System.Int32' : 'int', 'System.Int64' : 'long', 'System.UInt16' : 'ushort', 'System.UInt32' : 'uint', 'System.UInt64' : 'ulong', 'System.Single' : 'single', 'System.Double' : 'double', 'System.Decimal' : 'decimal', 'System.String' : 'string', 'System.DateTime' : 'date', 'System.TimeSpan' : 'timespan', 'System.Type' : 'type', 'System.Array' : 'array', 'System.Text.RegularExpressions.Regex' : 'regex' } static _reverseTypeConversionTable as Hashtable static ReverseTypeConversionTable: get: if _reverseTypeConversionTable == null: _reverseTypeConversionTable = Hashtable() for e as DictionaryEntry in _typeConversionTable: _reverseTypeConversionTable.Add(e.Value, e.Key) return _reverseTypeConversionTable private def ModifierIsSet(modifier as ModifierEnum, query as ModifierEnum) as bool: return (modifier & query) == query override def Convert(modifier as ModifierEnum) as string: if ShowAccessibility: if ModifierIsSet(modifier, ModifierEnum.Public): return 'public ' elif ModifierIsSet(modifier, ModifierEnum.Private): return 'private ' elif ModifierIsSet(modifier, ModifierEnum.ProtectedAndInternal): return 'protected internal ' elif ModifierIsSet(modifier, ModifierEnum.ProtectedOrInternal): return 'internal protected ' elif ModifierIsSet(modifier, ModifierEnum.Internal): return 'internal ' elif ModifierIsSet(modifier, ModifierEnum.Protected): return 'protected ' return '' private def GetModifier(decoration as IDecoration) as string: ret as string = '' if IncludeHTMLMarkup: ret += '' if decoration.IsStatic: ret += 'static ' elif decoration.IsFinal: ret += 'final ' elif decoration.IsVirtual: ret += 'virtual ' elif decoration.IsOverride: ret += 'override ' elif decoration.IsNew: ret += 'new ' if IncludeHTMLMarkup: ret += '' return ret override def Convert(c as IClass) as string: builder as StringBuilder = StringBuilder() builder.Append(Convert(c.Modifiers)) if IncludeHTMLMarkup: builder.Append('') cType = c.ClassType if ShowModifiers: if c.IsSealed: if cType == ClassType.Delegate or cType == ClassType.Enum: pass else: builder.Append('final ') elif c.IsAbstract and cType != ClassType.Interface: builder.Append('abstract ') if IncludeHTMLMarkup: builder.Append('') if ShowModifiers: if cType == ClassType.Delegate: builder.Append('callable ') elif cType == ClassType.Class: builder.Append('class ') elif cType == ClassType.Struct: builder.Append('struct ') elif cType == ClassType.Interface: builder.Append('interface ') elif cType == ClassType.Enum: builder.Append('enum ') if cType == ClassType.Delegate and c.Methods.Count > 0: for m as IMethod in c.Methods: if m.Name == 'Invoke': builder.Append(Convert(m.ReturnType)) builder.Append(' ') if IncludeHTMLMarkup: builder.Append('') if UseFullyQualifiedMemberNames: builder.Append(c.FullyQualifiedName) else: builder.Append(c.Name) if IncludeHTMLMarkup: builder.Append('') if c.ClassType == ClassType.Delegate: builder.Append(' (') if IncludeHTMLMarkup: builder.Append('
') for m as IMethod in c.Methods: if m.Name == 'Invoke': for i in range(m.Parameters.Count): if IncludeHTMLMarkup: builder.Append('   ') builder.Append(Convert(m.Parameters[i])) if i + 1 < m.Parameters.Count: builder.Append(', ') if IncludeHTMLMarkup: builder.Append('
') builder.Append(Char.Parse(')')) elif ShowInheritanceList: if c.BaseTypes.Count > 0: builder.Append('(') for i in range(c.BaseTypes.Count): builder.Append(c.BaseTypes[i]) if i + 1 < c.BaseTypes.Count: builder.Append(', ') builder.Append(')') if IncludeBodies: builder.Append(':\n') return builder.ToString() override def ConvertEnd(c as IClass) as string: return '' override def Convert(field as IField) as string: builder as StringBuilder = StringBuilder() builder.Append(Convert(field.Modifiers)) if IncludeHTMLMarkup: builder.Append('') if ShowModifiers: if field.IsStatic and field.IsLiteral: builder.Append('const ') elif field.IsStatic: builder.Append('static ') if field.IsReadonly: builder.Append('readonly ') if IncludeHTMLMarkup: builder.Append('') if IncludeHTMLMarkup: builder.Append('') if UseFullyQualifiedMemberNames: builder.Append(field.FullyQualifiedName) else: builder.Append(field.Name) if field.ReturnType != null and ShowReturnType: builder.Append(' as ') builder.Append(Convert(field.ReturnType)) if IncludeHTMLMarkup: builder.Append('') return builder.ToString() override def Convert(property as IProperty) as string: builder as StringBuilder = StringBuilder() builder.Append(Convert(property.Modifiers)) if ShowModifiers: builder.Append(GetModifier(property)) if IncludeHTMLMarkup: builder.Append('') if UseFullyQualifiedMemberNames: builder.Append(property.FullyQualifiedName) else: builder.Append(property.Name) if IncludeHTMLMarkup: builder.Append('') if property.Parameters.Count > 0: builder.Append('(') if IncludeHTMLMarkup: builder.Append('
') for i in range(property.Parameters.Count): if IncludeHTMLMarkup: builder.Append('   ') builder.Append(Convert(property.Parameters[i])) if i + 1 < property.Parameters.Count: builder.Append(', ') if IncludeHTMLMarkup: builder.Append('
') builder.Append(')') if property.ReturnType != null and ShowReturnType: builder.Append(' as ') builder.Append(Convert(property.ReturnType)) if IncludeBodies: builder.Append(': ') if property.CanGet: builder.Append('get ') if property.CanSet: builder.Append('set ') return builder.ToString() override def Convert(e as IEvent) as string: builder as StringBuilder = StringBuilder() builder.Append(Convert(e.Modifiers)) if ShowModifiers: builder.Append(GetModifier(e)) if IncludeHTMLMarkup: builder.Append('') if UseFullyQualifiedMemberNames: builder.Append(e.FullyQualifiedName) else: builder.Append(e.Name) if IncludeHTMLMarkup: builder.Append('') if e.ReturnType != null and ShowReturnType: builder.Append(' as ') builder.Append(Convert(e.ReturnType)) return builder.ToString() override def Convert(m as IIndexer) as string: builder as StringBuilder = StringBuilder() builder.Append(Convert(m.Modifiers)) if IncludeHTMLMarkup: builder.Append('') if ShowModifiers and m.IsStatic: builder.Append('static ') if IncludeHTMLMarkup: builder.Append('') if m.ReturnType != null and ShowReturnType: builder.Append(Convert(m.ReturnType)) builder.Append(' ') if IncludeHTMLMarkup: builder.Append('') if UseFullyQualifiedMemberNames: builder.Append(m.FullyQualifiedName) else: builder.Append(m.Name) if IncludeHTMLMarkup: builder.Append('') builder.Append('Indexer(') if IncludeHTMLMarkup: builder.Append('
') for i in range(m.Parameters.Count): if IncludeHTMLMarkup: builder.Append('   ') builder.Append(Convert(m.Parameters[i])) if i + 1 < m.Parameters.Count: builder.Append(', ') if IncludeHTMLMarkup: builder.Append('
') builder.Append(')') return builder.ToString() override def Convert(m as IMethod) as string: builder as StringBuilder = StringBuilder() builder.Append(Convert(m.Modifiers)) if ShowModifiers: builder.Append(GetModifier(m)) builder.Append('def ') if ShowReturnType if IncludeHTMLMarkup: builder.Append('') if m.IsConstructor: builder.Append('constructor') else: if UseFullyQualifiedMemberNames: builder.Append(m.FullyQualifiedName) else: builder.Append(m.Name) if IncludeHTMLMarkup: builder.Append('') builder.Append('(') if IncludeHTMLMarkup: builder.Append('
') for i in range(m.Parameters.Count): if IncludeHTMLMarkup: builder.Append('   ') builder.Append(Convert(m.Parameters[i])) if i + 1 < m.Parameters.Count: builder.Append(', ') if IncludeHTMLMarkup: builder.Append('
') builder.Append(')') if m.ReturnType != null and ShowReturnType and not m.IsConstructor: builder.Append(' as ') builder.Append(Convert(m.ReturnType)) if IncludeBodies: if m.DeclaringType != null: if m.DeclaringType.ClassType != ClassType.Interface: builder.Append(': ') else: builder.Append(': ') return builder.ToString() override def ConvertEnd(m as IMethod) as string: return '' override def Convert(returnType as IReturnType) as string: if returnType == null: return '' builder as StringBuilder = StringBuilder() /* linkSet as bool = false if UseLinkArrayList: ret as SharpAssemblyReturnType = returnType as SharpAssemblyReturnType if ret != null: if ret.UnderlyingClass != null: builder.Append('') linkSet = true */ for i in range(returnType.ArrayCount): builder.Append('(') if returnType.FullyQualifiedName != null and _typeConversionTable[returnType.FullyQualifiedName] != null: builder.Append(_typeConversionTable[returnType.FullyQualifiedName]) else: if UseFullyQualifiedNames: builder.Append(returnType.FullyQualifiedName) else: builder.Append(returnType.Name) //if linkSet: // builder.Append('') if returnType.PointerNestingLevel > 0: // Sometimes there are negative pointer nesting levels // (especially in exception constructors in the BCL for i in range(returnType.PointerNestingLevel): builder.Append('*') for i in range(returnType.ArrayCount): if returnType.ArrayDimensions[i] > 1: builder.Append(',') builder.Append(returnType.ArrayDimensions[i]) builder.Append(')') return builder.ToString() override def Convert(param as IParameter) as string: builder as StringBuilder = StringBuilder() if IncludeHTMLMarkup: builder.Append('') if param.IsRef: builder.Append('ref ') elif param.IsOut: builder.Append('out ') elif param.IsParams: builder.Append('params ') if IncludeHTMLMarkup: builder.Append('') if ShowParameterNames: builder.Append(param.Name) builder.Append(' as ') builder.Append(Convert(param.ReturnType)) return builder.ToString() override def WrapAttribute(attribute as string) as string: return '[' + attribute + ']' override def WrapComment(comment as string) as string: return '// ' + comment override def GetIntrinsicTypeName(dotNetTypeName as string) as string: if _typeConversionTable[dotNetTypeName] != null: return _typeConversionTable[dotNetTypeName] return dotNetTypeName nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/BooBinding.prjx000066400000000000000000000132371161462365500270710ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/BooCompiler.boo000066400000000000000000000051451161462365500270640ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.Collections import System.Diagnostics import System.IO import System.Globalization import System.Text import System.Threading import System.Reflection.Assembly as Assembly import Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Resources class BooCompilerWrapper: [property(OutputFile)] _outputFile as string _references = [] _resources = [] _inputFiles = [] _options as BooBinding.CompilerOptions def SetOptions(o as BooBinding.CompilerOptions): _options = o def AddInputFile(fileName as string): _inputFiles.Add(fileName) def AddReference(assemblyName as string): _references.Add(assemblyName) def AddResource(fileName as string): _resources.Add(fileName) def Run(): args = [] if _options.CompileTarget == CompileTarget.WinExe: args.Add("-t:winexe") elif _options.CompileTarget == CompileTarget.Library: args.Add("-t:library") if _options.DuckTypingByDefault: args.Add("-ducky") args.Add("-o:${OutputFile}") for fname in _references: args.Add("-r:${fname}") for fname in _resources: args.Add("-resource:${fname}") for fname in _inputFiles: args.Add(fname) // shellm executes the compiler inprocess in a new AppDomain // for some reason, the compiler output sometimes contains // spurious messages from the main AppDomain //return shellm(GetBoocLocation(), args.ToArray(string)) // Switching to shell instead of shellm because of BOO-243 return shell(GetBoocLocation(), join("\"${arg}\"" for arg in args)) def GetBoocLocation(): return Path.Combine( Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), "booc.exe") nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/BooDesigner.boo000066400000000000000000000463201161462365500270520ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion // The boo forms designer is written by Doug Holton and Daniel Grunwald. namespace BooBinding /* import Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Steps */ import System import System.Text import System.Text.RegularExpressions import System.IO import System.Collections import System.Diagnostics import System.Drawing import System.Drawing.Design import System.Reflection import System.Windows.Forms import System.Drawing.Printing import System.ComponentModel import System.ComponentModel.Design import System.ComponentModel.Design.Serialization import System.Xml import ICSharpCode.SharpDevelop.Gui import ICSharpCode.SharpDevelop.Internal.Project import ICSharpCode.SharpDevelop.Internal.Undo import ICSharpCode.SharpDevelop.Gui.Components import ICSharpCode.SharpDevelop.DefaultEditor.Gui.Editor import ICSharpCode.Core.Properties import ICSharpCode.Core.AddIns import ICSharpCode.Core.Services import ICSharpCode.SharpDevelop.Services import SharpDevelop.Internal.Parser import ICSharpCode.SharpDevelop.FormDesigner.Services import ICSharpCode.SharpDevelop.FormDesigner.Hosts import ICSharpCode.SharpDevelop.FormDesigner.Util import ICSharpCode.Core.AddIns.Codons import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document import System.CodeDom import System.CodeDom.Compiler import Boo.Lang.CodeDom import ICSharpCode.SharpDevelop.FormDesigner class BooDesignerDisplayBinding(AbstractFormDesignerSecondaryDisplayBinding): protected override Extension as string: get: return ".boo" public override def CreateSecondaryViewContent(viewContent as IViewContent) as (ISecondaryViewContent): return (BooDesignerDisplayBindingWrapper(viewContent),) class BooDesignerDisplayBindingWrapper(FormDesignerDisplayBindingBase, ISecondaryViewContent): failedDesignerInitialize as bool c as IClass initializeComponents as IMethod viewContent as IViewContent textAreaControlProvider as ITextEditorControlProvider compilationErrors as string override FileName as string: get: fname = textAreaControlProvider.TextEditorControl.FileName if fname is null: return viewContent.UntitledName return fname override ClipboardHandler as IClipboardHandler: get: return self override Control as Control: get: return super.designPanel override IsDirty as bool: get: if viewContent is null: return false return viewContent.IsDirty set: if not viewContent is null: viewContent.IsDirty = value Document as IDocument: get: return textAreaControlProvider.TextEditorControl.Document def constructor(view as IViewContent): self(view, true) def constructor(view as IViewContent, secondary as bool): self.viewContent = view self.textAreaControlProvider = view as ITextEditorControlProvider InitializeComponents(secondary) private def InitializeComponents(secondary as bool): failedDesignerInitialize = false undoHandler.Reset() Reload() UpdateSelectableObjects() if designPanel != null and secondary == true: designPanel.Disable() protected override def CreateDesignerHost(): super.CreateDesignerHost() host.AddService(typeof(CodeDomProvider), Boo.Lang.CodeDom.BooCodeProvider()) //Boo.CodeDom _parseErrors as string = null private def OnParserError(e as antlr.RecognitionException): _parseErrors += "Line ${e.getLine()}: ${e.getErrorMessage()}\n" override def Reload(): try: Initialize() except ex as Exception: Console.WriteLine('Initialization exception : ' + ex) dirty as bool = viewContent.IsDirty if host != null and c != null: super.host.SetRootFullName(c.FullyQualifiedName) try: compileUnit = Boo.Lang.Compiler.Ast.CompileUnit() _parseErrors = null Boo.Lang.Parser.BooParser.ParseModule(1, compileUnit, "designerIntegrityCheck", StringReader(Document.TextContent), OnParserError) failedDesignerInitialize = _parseErrors != null if failedDesignerInitialize: compilationErrors = _parseErrors return classString = GenerateClassString(Document) /* TODO: use this block of code when ICodeParser has been implemented. parser = BooCodeProvider().CreateParser() if parser == null: failedDesignerInitialize = true compilationErrors = 'Boo.CodeDom.BooCodeProvider.CreateParser() returned null!!!\nBoo.CodeDom.dll needs to implement ICodeParser!' return codeCompileUnit = parser.Parse(StringReader(classString)) */ compileUnit = Boo.Lang.Compiler.Ast.CompileUnit() Boo.Lang.Parser.BooParser.ParseModule(1, compileUnit, "designerLoadClass", StringReader(classString), null) visitor = CodeDomVisitor() compileUnit.Accept(visitor) codeCompileUnit = visitor.OutputCompileUnit Microsoft.CSharp.CSharpCodeProvider().CreateGenerator().GenerateCodeFromCompileUnit(codeCompileUnit, Console.Out, null); if host != null and c != null: super.host.SetRootFullName(c.FullyQualifiedName) serializationManager as CodeDomDesignerSerializetionManager = host.GetService(typeof(IDesignerSerializationManager)) serializationManager.Initialize() baseType as Type = typeof(System.Windows.Forms.Form) for codeNamespace as CodeNamespace in codeCompileUnit.Namespaces: if codeNamespace.Types.Count > 0: baseType = host.GetType(codeNamespace.Types[0].BaseTypes[0].BaseType) break rootSerializer as CodeDomSerializer = serializationManager.GetRootSerializer(baseType) if rootSerializer == null: raise Exception('No root serializer found') for codeNamespace as CodeNamespace in codeCompileUnit.Namespaces: if codeNamespace.Types.Count > 0: designerResourceService as DesignerResourceService = host.GetService(typeof(System.ComponentModel.Design.IResourceService)) if designerResourceService != null: designerResourceService.SerializationStarted(false) try: rootSerializer.Deserialize(serializationManager, codeNamespace.Types[0]) except e as Exception: Console.WriteLine(e) compilationErrors = "Can't deserialize form. Possible reason: Initialize component method was changed manually.\n${e}" failedDesignerInitialize = true return serializationManager.OnSerializationComplete() if designerResourceService != null: designerResourceService.SerializationEnded(false) designPanel.SetRootDesigner() designPanel.Enable() break failedDesignerInitialize = false undoHandler.Reset() except ex as Exception: Console.WriteLine("Got exception : ${ex.Message}\n${ex.StackTrace}") compilationErrors = ex.ToString() failedDesignerInitialize = true viewContent.IsDirty = dirty protected virtual def AppendUsings(builder as StringBuilder, usings as IUsingCollection): for u as IUsing in usings: for usingString as string in u.Usings: if usingString.StartsWith('System'): builder.Append('import ' + usingString + '\n') private def GenerateClassString(document as IDocument) as string: Reparse(document.TextContent) builder as StringBuilder = StringBuilder() //if c.Namespace != null and c.Namespace.Length > 0: // builder.Append('namespace ') // builder.Append(c.Namespace) // builder.Append('\n') AppendUsings(builder, c.CompilationUnit.Usings) className as string = c.Name builder.Append('class ') builder.Append(className) builder.Append('(') builder.Append(ExtractBaseClass(c)) builder.Append('):\n') fields as ArrayList = GetUsedFields(document, c, initializeComponents) for field as IField in fields: fieldLine as LineSegment = document.GetLineSegment(field.Region.BeginLine - 1) builder.Append(document.GetText(fieldLine.Offset, fieldLine.Length)) builder.Append('\n') builder.Append('\tdef constructor():\n') builder.Append('\t\tpass\n') //builder.Append('\t\tself.') //builder.Append(initializeComponents.Name) //builder.Append('()\n') builder.Append('\t\n') initializeComponentsString as string = GetInitializeComponentsString(document, initializeComponents) builder.Append(initializeComponentsString) return builder.ToString() private def GetInitializeComponentsString(doc as IDocument, initializeComponents as IMethod) as string: beginLine as LineSegment = doc.GetLineSegment(initializeComponents.Region.BeginLine - 1) endLine as LineSegment = doc.GetLineSegment(initializeComponents.BodyRegion.EndLine - 1) startOffset as int = beginLine.Offset + initializeComponents.Region.BeginColumn - 1 endOffset as int = endLine.Offset + initializeComponents.BodyRegion.EndColumn - 1 return doc.GetText(startOffset, endOffset - startOffset) private def GetUsedFields(doc as IDocument, c as IClass, initializeComponents as IMethod) as ArrayList: InitializeComponentsString as string = GetInitializeComponentsString(doc, initializeComponents) fields as ArrayList = ArrayList() for field as IField in c.Fields: if InitializeComponentsString.IndexOf('self.' + field.Name + ' ') >= 0: fields.Add(field) return fields private def DeleteFormFields(doc as IDocument): fields as ArrayList = GetUsedFields(doc, c, initializeComponents) i as int = fields.Count - 1 while i >= 0: field as IField = fields[i] fieldLine as LineSegment = doc.GetLineSegment(field.Region.BeginLine - 1) doc.Remove(fieldLine.Offset, fieldLine.TotalLength) --i protected virtual def MergeFormChanges(): if self.failedDesignerInitialize: return writer = StringWriter() CodeDOMGenerator(self.host, BooCodeProvider()).ConvertContentDefinition(writer); currentForm as string = writer.ToString() designerResourceService as DesignerResourceService = host.GetService(typeof(System.ComponentModel.Design.IResourceService)) if designerResourceService != null: self.resources = Hashtable() if designerResourceService.Resources != null and designerResourceService.Resources.Count != 0: for entry as DictionaryEntry in designerResourceService.Resources: self.resources[entry.Key] = DesignerResourceService.ResourceStorage(cast(DesignerResourceService.ResourceStorage, entry.Value)) MessageBox.Show(currentForm) /* generatedInfo as IParseInformation = parserService.ParseFile(FileName, currentForm, false) cu as ICompilationUnit = generatedInfo.BestCompilationUnit if cu.Classes == null or cu.Classes.Count == 0: return generatedClass as IClass = cu.Classes[0] generatedInitializeComponents as IMethod = GetInitializeComponents(cu.Classes[0]) newDoc as IDocument = DocumentFactory().CreateDocument() newDoc.TextContent = currentForm newInitializeComponents as string = GetInitializeComponentsString(newDoc, generatedInitializeComponents) textArea as TextEditorControl = textAreaControlProvider.TextEditorControl textArea.BeginUpdate() marker as (FoldMarker) = textArea.Document.FoldingManager.FoldMarker.ToArray(typeof(FoldMarker)) textArea.Document.FoldingManager.FoldMarker.Clear() oldDoc as IDocument = DocumentFactory().CreateDocument() oldDoc.TextContent = textArea.Document.TextContent Reparse(oldDoc.TextContent) DeleteFormFields(oldDoc) Reparse(oldDoc.TextContent) beginLine as LineSegment = oldDoc.GetLineSegment(initializeComponents.Region.BeginLine - 1) startOffset as int = beginLine.Offset + initializeComponents.Region.BeginColumn - 1 oldDoc.Replace(startOffset, GetInitializeComponentsString(oldDoc, initializeComponents).Length, newInitializeComponents) Reparse(oldDoc.TextContent) lineNr as int = c.Region.BeginLine - 1 while true: if lineNr >= textArea.Document.TotalNumberOfLines - 2: break curLine as LineSegment = oldDoc.GetLineSegment(lineNr) if oldDoc.GetText(curLine.Offset, curLine.Length).Trim().EndsWith('{'): break ++lineNr beginLine = oldDoc.GetLineSegment(lineNr + 1) insertOffset as int = beginLine.Offset for field as IField in generatedClass.Fields: fieldLine as LineSegment = newDoc.GetLineSegment(field.Region.BeginLine - 1) oldDoc.Insert(insertOffset, newDoc.GetText(fieldLine.Offset, fieldLine.TotalLength)) oldCaretPos as Point = textArea.ActiveTextAreaControl.Caret.Position textArea.Document.TextContent = oldDoc.TextContent textArea.ActiveTextAreaControl.Caret.Position = oldCaretPos parseInfo as IParseInformation = parserService.ParseFile(FileName, textArea.Document.TextContent, false) textArea.Document.FoldingManager.UpdateFoldings(FileName, parseInfo) parseInfo = null i as int = 0 while i < marker.Length and i < textArea.Document.FoldingManager.FoldMarker.Count: cast(FoldMarker, textArea.Document.FoldingManager.FoldMarker[i]).IsFolded = marker[i].IsFolded ++i viewContent.IsDirty = dirty textArea.Document.UndoStack.ClearAll() textArea.EndUpdate() textArea.OptionsChanged() */ protected def Reparse(content as string): parserService as IParserService = ICSharpCode.Core.Services.ServiceManager.Services.GetService(typeof(IParserService)) info as IParseInformation = parserService.ParseFile(self.FileName, content, false) cu as ICompilationUnit = info.BestCompilationUnit for c as IClass in cu.Classes: if IsBaseClassDesignable(c): initializeComponents = GetInitializeComponents(c) if initializeComponents != null: self.c = c break private def GetInitializeComponents(c as IClass) as IMethod: for method as IMethod in c.Methods: if (method.Name == 'InitializeComponents' or method.Name == 'InitializeComponent') and method.Parameters.Count == 0: return method return null override def ShowSourceCode(): self.WorkbenchWindow.SwitchView(0) override def ShowSourceCode(lineNumber as int): ShowSourceCode() textAreaControlProvider.TextEditorControl.ActiveTextAreaControl.JumpTo(lineNumber, 255) protected static def GenerateParams(edesc as EventDescriptor, paramNames as bool) as string: t as System.Type = edesc.EventType mInfo as MethodInfo = t.GetMethod('Invoke') param as string = '' csa as IAmbience = null try: csa = cast(IAmbience, AddInTreeSingleton.AddInTree.GetTreeNode('/SharpDevelop/Workbench/Ambiences').BuildChildItem('Boo', typeof(BooDesignerDisplayBindingWrapper))) except: pass i as int = 0 while i < mInfo.GetParameters().Length: pInfo as ParameterInfo = mInfo.GetParameters()[i] typeStr as string = pInfo.ParameterType.ToString() if csa != null: typeStr = csa.GetIntrinsicTypeName(typeStr) param += typeStr if paramNames == true: param += ' ' param += pInfo.Name if i + 1 < mInfo.GetParameters().Length: param += ', ' ++i return param protected def InsertComponentEvent(component as IComponent, edesc as EventDescriptor, eventMethodName as string, body as string, position as int) as bool: if self.failedDesignerInitialize: position = 0 return false Reparse(Document.TextContent) for method as IMethod in c.Methods: if method.Name == eventMethodName: position = method.Region.BeginLine + 1 return true Deselected() MergeFormChanges() Reparse(Document.TextContent) position = c.Region.EndLine + 1 offset as int = Document.GetLineSegment(c.Region.EndLine - 1).Offset text as string = "\tprivate def ${eventMethodName}(${GenerateParams(edesc, true)}):\n\t\t${body}\n\t\n" Document.Insert(offset, text) //Document.FormattingStrategy.IndentLines(self.textAreaControlProvider.TextEditorControl.ActiveTextAreaControl.TextArea, c.Region.EndLine - 1, c.Region.EndLine + 3) return false override def ShowSourceCode(component as IComponent, edesc as EventDescriptor, eventMethodName as string): position as int InsertComponentEvent(component, edesc, eventMethodName, 'pass', position) ShowSourceCode(position) override def GetCompatibleMethods(edesc as EventDescriptor) as ICollection: Reparse(Document.TextContent) compatibleMethods as ArrayList = ArrayList() methodInfo as MethodInfo = edesc.EventType.GetMethod('Invoke') for method as IMethod in c.Methods: if method.Parameters.Count == methodInfo.GetParameters().Length: found as bool = true i as int = 0 while i < methodInfo.GetParameters().Length: pInfo as ParameterInfo = methodInfo.GetParameters()[i] p as IParameter = method.Parameters[i] if p.ReturnType.FullyQualifiedName != pInfo.ParameterType.ToString(): found = false break ++i if found: compatibleMethods.Add(method.Name) return compatibleMethods override def GetCompatibleMethods(edesc as EventInfo) as ICollection: //Reparse(Document.TextContent) compatibleMethods as ArrayList = ArrayList() methodInfo as MethodInfo = edesc.GetAddMethod() pInfo as ParameterInfo = methodInfo.GetParameters()[0] eventName as string = pInfo.ParameterType.ToString().Replace('EventHandler', 'EventArgs') for method as IMethod in c.Methods: if method.Parameters.Count == 2: found as bool = true p as IParameter = method.Parameters[1] if p.ReturnType.FullyQualifiedName != eventName: found = false if found: compatibleMethods.Add(method.Name) return compatibleMethods override def Selected(): isFormDesignerVisible = true Reload() if not failedDesignerInitialize: pass else: if super.designPanel != null: super.designPanel.SetErrorState(compilationErrors) override def Deselected(): isFormDesignerVisible = false super.designPanel.Disable() if not failedDesignerInitialize: MergeFormChanges() textAreaControlProvider.TextEditorControl.Refresh() DeselectAllComponents() def NotifyAfterSave(successful as bool): if successful: designerResourceService as DesignerResourceService = host.GetService(typeof(System.ComponentModel.Design.IResourceService)) if designerResourceService != null: designerResourceService.Save() def NotifyBeforeSave(): MergeFormChanges() //boo bug? compiler thinks these methods are not implemented. override def Dispose(): super() override def RedrawContent(): super() override def SwitchedTo(): super() override WorkbenchWindow as IWorkbenchWindow: set: pass override TabPageText as string: get: return super.TabPageText nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/BooProject.boo000066400000000000000000000056221161462365500267200ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.IO import System.Collections import System.Diagnostics import System.Reflection import System.Xml import ICSharpCode.Core.Services import ICSharpCode.SharpDevelop.Services import ICSharpCode.SharpDevelop.Internal.Project import ICSharpCode.SharpDevelop.Internal.Templates // Describes a Boo Project and its compilation options. class BooProject(AbstractProject): override ProjectType: get: return BooLanguageBinding.LanguageName override def CreateConfiguration() as IConfiguration: return BooCompilerParameters() def constructor(info as ProjectCreateInformation, projectOptions as XmlElement): parserService as IParserService = ServiceManager.Services.GetService(typeof(IParserService)) booDir = Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location) parserService.AddReferenceToCompletionLookup(self, ProjectReference(ReferenceType.Assembly, Path.Combine(booDir, "Boo.Lang.dll"))) if info != null: Name = info.ProjectName; debugConf as BooCompilerParameters = CreateConfiguration("Debug") Configurations.Add(debugConf) debugConf.IncludeDebugInformation = true Configurations.Add(CreateConfiguration("Release")) for parameter as BooCompilerParameters in Configurations: parameter.OutputDirectory = info.BinPath + Path.DirectorySeparatorChar + parameter.Name parameter.OutputAssembly = info.ProjectName if projectOptions != null: target = projectOptions.GetAttribute("Target") pauseConsoleOutput = projectOptions.GetAttribute("PauseConsoleOutput") duckTypingByDefault = projectOptions.GetAttribute("duckTypingByDefault") if target != null and target != "": parameter.CompileTarget = Enum.Parse(typeof(CompileTarget), target) if pauseConsoleOutput != null and pauseConsoleOutput != "": parameter.PauseConsoleOutput = Boolean.Parse(pauseConsoleOutput) if duckTypingByDefault != null and duckTypingByDefault != "": parameter.DuckTypingByDefault = Boolean.Parse(duckTypingByDefault) nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/BooRefactory.boo000066400000000000000000000266421161462365500272550ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.Reflection import System.CodeDom import System.Text import System.Collections import ICSharpCode.SharpRefactory.Parser import ICSharpCode.SharpRefactory.Parser.AST class BooRefactory: // This class can refactor the c# parse tree before converting to boo // to simplify some things // NOTE: While writing this, I discovered many bugs in SharpRefactory I worked around // by overriding the buggy methods. // Since SharpRefactory will be replaced by NRefactory soon, the bugs won't be fixed // in SharpRefactory but only in NRefactory (but many bugs don't exist in NRefactory anyhow) def Refactor(compilationUnit as CompilationUnit): RefactorNamespaces(compilationUnit) compilationUnit.AcceptChildren(RefactoryVisitor(), null); def RefactorNamespaces(compilationUnit as CompilationUnit): namespacecount = CountNamespaces(compilationUnit.Children) if namespacecount == 1: // put the namespace definition to the beginning for o in compilationUnit.Children: n = o as NamespaceDeclaration if n != null: // get using definitions into the namespace num = 0 i = 0 while i < compilationUnit.Children.Count: child = compilationUnit.Children[i] print("i = " + i.ToString() + "; child =" + child.ToString()) if child isa UsingDeclaration or child isa UsingAliasDeclaration: compilationUnit.Children.RemoveAt(i) i -= 1 n.Children.Insert(num, child) num += 1 i += 1 return def CountNamespaces(l as ArrayList) as int: count as int = 0 for o in l: n = o as NamespaceDeclaration if n != null: count += 1 + CountNamespaces(n.Children) return count class RefactoryVisitor(AbstractASTVisitor): override def Visit(typeDeclaration as TypeDeclaration, data): // create DefaultMemberAttribute for indexers if typeDeclaration.Type == Types.Class and HasIndexer(typeDeclaration.Children): expression = PrimitiveExpression('Indexer', 'Indexer') attribute = ICSharpCode.SharpRefactory.Parser.AST.Attribute("DefaultMember", MakeArray(expression), ArrayList()) typeDeclaration.Attributes.Add(AttributeSection(null, MakeArray(attribute))) field as FieldDeclaration = null removed = ArrayList() // list of all elements that should be removed from typeDeclaration // prefix fields with underscore fields = ArrayList() for o in typeDeclaration.Children: field = o as FieldDeclaration if field != null: for var as VariableDeclaration in field.Fields: if var.Name.Length < 2 or not var.Name[1:2] == "_": if Char.IsLower(var.Name, 0) and IsPrivate(field.Modifier): fields.Add(var) typeDeclaration.AcceptChildren(RenameFieldVisitor(fields), data) for var as VariableDeclaration in fields: var.Name = "_" + var.Name // convert Getter/Setter into attributes if possible // -> therefore create a new field list (containing the fields, not VarDecs) fieldHash = Hashtable() for o in typeDeclaration.Children: field = o as FieldDeclaration if field != null: if field.Fields.Count == 1: fieldHash[cast(VariableDeclaration, field.Fields[0]).Name] = field // now look for properties for o in typeDeclaration.Children: property = o as PropertyDeclaration if property != null and property.Modifier == Modifier.Public: ok = true field = null // check if the property has correct getters/setters if property.HasGetRegion: block = property.GetRegion.Block if block != null and block.Children.Count == 1: child = block.Children[0] as ReturnStatement if child != null: retExpr = child.ReturnExpression as IdentifierExpression if retExpr != null: field = fieldHash[retExpr.Identifier] ok = false if field == null if ok and property.HasSetRegion: ok = false setterField as FieldDeclaration = null block = property.SetRegion.Block if block != null and block.Children.Count == 1: childStatement = block.Children[0] as StatementExpression if child != null: expr = childStatement.Expression as AssignmentExpression if expr != null and expr.Op == AssignmentOperatorType.Assign: leftExpr = expr.Left as IdentifierExpression rightExpr = expr.Right as IdentifierExpression if leftExpr != null and rightExpr != null and rightExpr.Identifier == "value": setterField = fieldHash[leftExpr.Identifier] ok = false if setterField == null or setterField != field if ok and property.HasGetRegion: removed.Add(o) print("Replacing property ${property.Name}") identifier = IdentifierExpression(property.Name) attributes = ArrayList() if property.HasSetRegion: attributes.Add(ICSharpCode.SharpRefactory.Parser.AST.Attribute("Property", MakeArray(identifier), ArrayList())) else: attributes.Add(ICSharpCode.SharpRefactory.Parser.AST.Attribute("Getter", MakeArray(identifier), ArrayList())) field.Attributes.Add(AttributeSection(null, attributes)) for o in removed: typeDeclaration.Children.Remove(o) return super(typeDeclaration, data) def IsPrivate(m as Modifier) as bool: // fields without any modifier are also private return false if (m & Modifier.Public) == Modifier.Public return false if (m & Modifier.Protected) == Modifier.Protected return false if (m & Modifier.Internal) == Modifier.Internal return true override def Visit(statementExpression as StatementExpression, data): expr = statementExpression.Expression as UnaryOperatorExpression if expr != null: // found unary operator as single statement expr.Op = UnaryOperatorType.Increment if expr.Op == UnaryOperatorType.PostIncrement expr.Op = UnaryOperatorType.Decrement if expr.Op == UnaryOperatorType.PostDecrement return super(statementExpression, data) override def Visit(localVariableDeclaration as LocalVariableDeclaration, data): /* Check for this structure: [LocalVariableDeclaration: Type=[TypeReference: Type=EmptyClass, PointerNestingLevel=0, RankSpecifier=System.Int32[]], Modifier =None Variables={[VariableDeclaration: Name=c, Initializer=[CastExpression: CastTo=[TypeReference: Type=EmptyClass, PointerNestingLevel=0, RankSpecifier=System.Int32[]], Expression=***]]}] */ if localVariableDeclaration.Variables.Count == 1: var as VariableDeclaration = localVariableDeclaration.Variables[0] castExpr = var.Initializer as CastExpression if castExpr != null: if castExpr.CastTo.Type == localVariableDeclaration.Type.Type: // remove redundant cast var.Initializer = castExpr.Expression return super(localVariableDeclaration, data) override def Visit(blockStatement as BlockStatement, data): // work around SharpRefactory bug (that won't be fixed before NRefactory) switchSection = blockStatement as SwitchSection return Visit(switchSection, data) if switchSection != null return super(blockStatement, data) override def Visit(switchSection as SwitchSection, data): if switchSection.Children != null and switchSection.Children.Count > 0: lastNum = switchSection.Children.Count - 1 lastChild = switchSection.Children[lastNum] if lastChild isa BreakStatement: switchSection.Children.RemoveAt(lastNum) // can't use super because of another SharpRefactory bug for label as Expression in switchSection.SwitchLabels: label.AcceptVisitor(self, data) if label != null return switchSection.AcceptChildren(self, data) def HasIndexer(l as ArrayList) as bool: for o in l: return true if o isa IndexerDeclaration return false def MakeArray(val as object) as ArrayList: a = ArrayList() a.Add(val) return a class RenameFieldVisitor(AbstractASTVisitor): _fields as ArrayList _curBlock = ArrayList() _blocks = Stack() def constructor(fields as ArrayList): _fields = fields override def Visit(typeDeclaration as TypeDeclaration, data): // ignore sub-types return null override def Visit(blockStatement as BlockStatement, data): Push() result = super(blockStatement, data) Pop() return result override def Visit(methodDeclaration as MethodDeclaration, data): Push() result = super(methodDeclaration, data) Pop() return result override def Visit(constructorDeclaration as ConstructorDeclaration, data): Push() result = super(constructorDeclaration, data) Pop() return result private def Push(): //print("PUSH block") _blocks.Push(_curBlock) _curBlock = ArrayList() private def Pop(): _curBlock = _blocks.Pop() //print("POP block") override def Visit(localVariableDeclaration as LocalVariableDeclaration, data): for decl as VariableDeclaration in localVariableDeclaration.Variables: //print("add variable ${decl.Name} to block") _curBlock.Add(decl.Name) return super(localVariableDeclaration, data) override def Visit(parameterDeclarationExpression as ParameterDeclarationExpression, data): _curBlock.Add(parameterDeclarationExpression.ParameterName) //print("add parameter ${parameterDeclarationExpression.ParameterName} to block") return super(parameterDeclarationExpression, data) override def Visit(identifierExpression as IdentifierExpression, data): name = identifierExpression.Identifier for var as VariableDeclaration in _fields: if var.Name == name and not IsLocal(name): identifierExpression.Identifier = "_" + name return null return super(identifierExpression, data) override def Visit(fieldReferenceExpression as FieldReferenceExpression, data): if fieldReferenceExpression.TargetObject isa ThisReferenceExpression: name = fieldReferenceExpression.FieldName for var as VariableDeclaration in _fields: if var.Name == fieldReferenceExpression.FieldName: fieldReferenceExpression.FieldName = "_" + name return null return super(fieldReferenceExpression, data) def IsLocal(name as string) as bool: for block as ArrayList in _blocks: for n as string in block: return true if name == n for n as string in _curBlock: return true if name == n return false override def Visit(invocationExpression as InvocationExpression, data): // this method is a workaround for a bug in SharpRefactory result = data if invocationExpression.TargetObject != null: result = invocationExpression.TargetObject.AcceptVisitor(self, data) if invocationExpression.Parameters != null: for n as INode in invocationExpression.Parameters: n.AcceptVisitor(self, data) return result override def Visit(indexerExpression as IndexerExpression, data): // this method is a workaround for a bug in SharpRefactory result = indexerExpression.TargetObject.AcceptVisitor(self, data) for n as INode in indexerExpression.Indices: n.AcceptVisitor(self, data) return result nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/BooVisitor.boo000066400000000000000000001365511161462365500267570ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.Reflection import System.CodeDom import System.Text import System.Collections import ICSharpCode.SharpRefactory.Parser import ICSharpCode.SharpRefactory.Parser.AST import ICSharpCode.SharpRefactory.PrettyPrinter class BooVisitor(AbstractASTVisitor): _newLineSep = Environment.NewLine [Getter(SourceText)] _sourceText = StringBuilder() _indentLevel = 0 _indentOpenPosition = 0 _errors = Errors() _currentType as TypeDeclaration = null _debugOutput = false _inmacro = false #region IASTVisitor interface implementation override def Visit(node as INode, data as object): _errors.Error(-1, -1, "Visited INode (should NEVER HAPPEN)") Console.WriteLine("Visitor was: " + self.GetType()) Console.WriteLine("Node was : " + node.GetType()) return node.AcceptChildren(self, data) def AppendIndentation(): for i in range(_indentLevel): _sourceText.Append("\t") def AppendNewLine(): _sourceText.Append(_newLineSep) def DebugOutput(o as INode): Console.WriteLine(o.ToString()) if _debugOutput return def AddIndentLevel(): _indentLevel += 1 _indentOpenPosition = _sourceText.Length def RemoveIndentLevel(): if _indentOpenPosition == _sourceText.Length: // nothing was inserted in this block -> insert pass _indentOpenPosition = 0 AppendIndentation() _sourceText.Append("pass") AppendNewLine() _indentLevel -= 1 override def Visit(compilationUnit as CompilationUnit, data): DebugOutput(compilationUnit) BooRefactory().Refactor(compilationUnit) compilationUnit.AcceptChildren(self, data) return null override def Visit(namespaceDeclaration as NamespaceDeclaration, data): DebugOutput(namespaceDeclaration) AppendIndentation() _sourceText.Append("namespace ") _sourceText.Append(namespaceDeclaration.NameSpace) AppendNewLine() namespaceDeclaration.AcceptChildren(self, data) AppendNewLine() return null override def Visit(usingDeclaration as UsingDeclaration, data): DebugOutput(usingDeclaration) AppendIndentation() _sourceText.Append("import ") _sourceText.Append(usingDeclaration.Namespace) AppendNewLine() return null override def Visit(usingAliasDeclaration as UsingAliasDeclaration, data): DebugOutput(usingAliasDeclaration) AppendIndentation() _sourceText.Append("import ") _sourceText.Append(usingAliasDeclaration.Namespace) _sourceText.Append(" as ") _sourceText.Append(usingAliasDeclaration.Alias) AppendNewLine() return null override def Visit(attributeSection as AttributeSection, data): DebugOutput(attributeSection) AppendIndentation() _sourceText.Append("[") if (attributeSection.AttributeTarget != null and attributeSection.AttributeTarget.Length > 0): _sourceText.Append(attributeSection.AttributeTarget) _sourceText.Append(": ") for j in range(attributeSection.Attributes.Count): attr as ICSharpCode.SharpRefactory.Parser.AST.Attribute = attributeSection.Attributes[j] _sourceText.Append(attr.Name) _sourceText.Append("(") for i in range(attr.PositionalArguments.Count): expr as Expression = attr.PositionalArguments[i] _sourceText.Append(expr.AcceptVisitor(self, data).ToString()) if (i + 1 < attr.PositionalArguments.Count): _sourceText.Append(", ") for i in range(attr.NamedArguments.Count): if (i > 0 or attr.PositionalArguments.Count > 0): _sourceText.Append(", ") named as NamedArgument = attr.NamedArguments[i] _sourceText.Append(named.Name) _sourceText.Append(": ") _sourceText.Append(named.Expr.AcceptVisitor(self, data).ToString()) _sourceText.Append(")") if (j + 1 < attributeSection.Attributes.Count): _sourceText.Append(", ") _sourceText.Append("]") AppendNewLine() return null override def Visit(typeDeclaration as TypeDeclaration, data): DebugOutput(typeDeclaration) AppendIndentation() AppendNewLine() AppendAttributes(typeDeclaration.Attributes) //Add a [Module] attribute if this is a class with a Main method if typeDeclaration.Type == Types.Class: for child in typeDeclaration.Children: method = child as MethodDeclaration if method is not null and method.Name == "Main": AppendIndentation() _sourceText.Append("[System.Runtime.CompilerServices.CompilerGlobalScope]") AppendNewLine() modifier = GetModifier(typeDeclaration.Modifier, Modifier.Public) typeString = "class " typeString = "enum " if typeDeclaration.Type == Types.Enum typeString = "interface " if typeDeclaration.Type == Types.Interface AppendIndentation() _sourceText.Append(modifier) _sourceText.Append(typeString) _sourceText.Append(typeDeclaration.Name) if typeDeclaration.BaseTypes == null: _sourceText.Append("(System.ValueType)") if typeDeclaration.Type == Types.Struct else: _sourceText.Append("(") first = true for baseType as string in typeDeclaration.BaseTypes: if first: first = false else: _sourceText.Append(", ") _sourceText.Append(baseType); _sourceText.Append(")") _sourceText.Append(":") AppendNewLine() AddIndentLevel() oldType as TypeDeclaration = _currentType _currentType = typeDeclaration typeDeclaration.AcceptChildren(self, data) _currentType = oldType RemoveIndentLevel() AppendNewLine() return null override def Visit(delegateDeclaration as DelegateDeclaration, data): DebugOutput(delegateDeclaration) AppendNewLine() AppendAttributes(delegateDeclaration.Attributes) AppendIndentation() _sourceText.Append(GetModifier(delegateDeclaration.Modifier, Modifier.Public)) _sourceText.Append("callable ") _sourceText.Append(delegateDeclaration.Name) _sourceText.Append("(") AppendParameters(delegateDeclaration.Parameters) _sourceText.Append(")") if delegateDeclaration.ReturnType.Type != "void": _sourceText.Append(" as ") _sourceText.Append(GetTypeString(delegateDeclaration.ReturnType)) AppendNewLine() return null override def Visit(variableDeclaration as VariableDeclaration, data): AppendIndentation() _sourceText.Append(variableDeclaration.Name) if (variableDeclaration.Initializer != null): _sourceText.Append(" = ") _sourceText.Append(variableDeclaration.Initializer.AcceptVisitor(self, data)) AppendNewLine() return null override def Visit(fieldDeclaration as FieldDeclaration, data): DebugOutput(fieldDeclaration) for field as VariableDeclaration in fieldDeclaration.Fields: AppendAttributes(fieldDeclaration.Attributes) AppendIndentation() // enum fields don't have a type or modifier if fieldDeclaration.TypeReference != null: _sourceText.Append(GetModifier(fieldDeclaration.Modifier, Modifier.Protected)) _sourceText.Append(field.Name) if fieldDeclaration.TypeReference != null: _sourceText.Append(" as ") _sourceText.Append(GetTypeString(fieldDeclaration.TypeReference)) if (field.Initializer != null): _sourceText.Append(" = ") _sourceText.Append(field.Initializer.AcceptVisitor(self, data).ToString()) AppendNewLine() if fieldDeclaration.TypeReference != null: AppendIndentation() AppendNewLine() return null override def Visit(methodDeclaration as MethodDeclaration, data): DebugOutput(methodDeclaration) AppendAttributes(methodDeclaration.Attributes) AppendIndentation() isFunction as bool = methodDeclaration.TypeReference.Type != "void" _sourceText.Append(GetModifier(methodDeclaration.Modifier, Modifier.Public)) _sourceText.Append("def ") _sourceText.Append(methodDeclaration.Name) _sourceText.Append("(") AppendParameters(methodDeclaration.Parameters) _sourceText.Append(")") if (isFunction): _sourceText.Append(" as ") _sourceText.Append(GetTypeString(methodDeclaration.TypeReference)) if (_currentType.Type != Types.Interface): if (methodDeclaration.Body != null): _sourceText.Append(":") AppendNewLine() AddIndentLevel() methodDeclaration.Body.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() else: _sourceText.Append(":") AppendNewLine() AddIndentLevel() AppendIndentation() _sourceText.Append("pass") RemoveIndentLevel() AppendNewLine() AppendNewLine() return null override def Visit(propertyDeclaration as PropertyDeclaration, data): DebugOutput(propertyDeclaration) AppendAttributes(propertyDeclaration.Attributes) AppendIndentation() _sourceText.Append(GetModifier(propertyDeclaration.Modifier, Modifier.Public)) _sourceText.Append(propertyDeclaration.Name) _sourceText.Append(" as ") _sourceText.Append(GetTypeString(propertyDeclaration.TypeReference)) _sourceText.Append(":") AppendNewLine() AddIndentLevel() if (propertyDeclaration.GetRegion != null): propertyDeclaration.GetRegion.AcceptVisitor(self, data) if (propertyDeclaration.SetRegion != null): propertyDeclaration.SetRegion.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(propertyGetRegion as PropertyGetRegion, data): DebugOutput(propertyGetRegion) AppendAttributes(propertyGetRegion.Attributes) AppendIndentation() if propertyGetRegion.Block == null: _sourceText.Append("get") if (_currentType.Type != Types.Interface): _sourceText.Append(":") AppendNewLine() AddIndentLevel() AppendIndentation() _sourceText.Append("pass") RemoveIndentLevel() AppendNewLine() else: _sourceText.Append("get:") AppendNewLine() AddIndentLevel() propertyGetRegion.Block.AcceptVisitor(self, data) RemoveIndentLevel() return null override def Visit(propertySetRegion as PropertySetRegion, data): DebugOutput(propertySetRegion) AppendAttributes(propertySetRegion.Attributes) AppendIndentation() if propertySetRegion.Block == null: _sourceText.Append("set") if (_currentType.Type != Types.Interface): _sourceText.Append(":") AppendNewLine() AddIndentLevel() AppendIndentation() _sourceText.Append("pass") RemoveIndentLevel() AppendNewLine() else: _sourceText.Append("set:") AppendNewLine() AddIndentLevel() propertySetRegion.Block.AcceptVisitor(self, data) RemoveIndentLevel() return null override def Visit(eventDeclaration as EventDeclaration, data): DebugOutput(eventDeclaration) AppendNewLine() if (eventDeclaration.Name == null): for field as VariableDeclaration in eventDeclaration.VariableDeclarators: AppendAttributes(eventDeclaration.Attributes) AppendIndentation() _sourceText.Append(GetModifier(eventDeclaration.Modifier, Modifier.Public)) _sourceText.Append("event ") _sourceText.Append(field.Name) _sourceText.Append(" as ") _sourceText.Append(GetTypeString(eventDeclaration.TypeReference)) AppendNewLine() else: AppendAttributes(eventDeclaration.Attributes) AppendIndentation() _sourceText.Append(GetModifier(eventDeclaration.Modifier, Modifier.Public)) _sourceText.Append("event ") _sourceText.Append(eventDeclaration.Name) _sourceText.Append(" as ") _sourceText.Append(GetTypeString(eventDeclaration.TypeReference)) _sourceText.Append(":") AppendNewLine() AddIndentLevel() if (eventDeclaration.HasAddRegion): eventDeclaration.AddRegion.AcceptVisitor(self, data) if (eventDeclaration.HasRemoveRegion): eventDeclaration.RemoveRegion.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return data override def Visit(eventAddRegion as EventAddRegion, data): AddIndentLevel() _sourceText.Append("add:") AppendNewLine() AddIndentLevel() eventAddRegion.Block.AcceptVisitor(self, data) if eventAddRegion.Block != null RemoveIndentLevel() _errors.Error(-1, -1, "Event add region can't be converted") return null override def Visit(eventRemoveRegion as EventRemoveRegion, data): AddIndentLevel() _sourceText.Append("remove:") AppendNewLine() AddIndentLevel() eventRemoveRegion.Block.AcceptVisitor(self, data) if eventRemoveRegion.Block != null RemoveIndentLevel() _errors.Error(-1, -1, "Event remove region can't be converted") return null override def Visit(constructorDeclaration as ConstructorDeclaration, data): DebugOutput(constructorDeclaration) AppendIndentation() _sourceText.Append(GetModifier(constructorDeclaration.Modifier, Modifier.Public)) _sourceText.Append("def constructor") _sourceText.Append("(") AppendParameters(constructorDeclaration.Parameters) _sourceText.Append("):") AppendNewLine() AddIndentLevel() ci = constructorDeclaration.ConstructorInitializer if (ci != null): AppendIndentation() if (ci.ConstructorInitializerType == ConstructorInitializerType.Base): _sourceText.Append("super") else: _sourceText.Append("self") _sourceText.Append(GetParameters(ci.Arguments)) AppendNewLine() DebugOutput(constructorDeclaration.Body) constructorDeclaration.Body.AcceptChildren(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(destructorDeclaration as DestructorDeclaration, data): DebugOutput(destructorDeclaration) AppendNewLine() AppendIndentation() _sourceText.Append("def destructor():") AppendNewLine() AddIndentLevel() destructorDeclaration.Body.AcceptChildren(self, data) RemoveIndentLevel() return null def GetOperatorName(token as int, opType as OperatorType): if opType == OperatorType.Binary: return "op_Addition" if token == Tokens.Plus return "op_Subtraction" if token == Tokens.Minus return "op_Multiply" if token == Tokens.Times return "op_Division" if token == Tokens.Div return "op_Modulus" if token == Tokens.Mod return "op_Equality" if token == Tokens.Equal return "op_Inequality" if token == Tokens.NotEqual return "op_LessThan" if token == Tokens.LessThan return "op_LessThanOrEqual" if token == Tokens.LessEqual return "op_GreaterThan" if token == Tokens.GreaterThan return "op_GreaterThanOrEqual" if token == Tokens.GreaterEqual return "op_BitwiseOr" if token == Tokens.BitwiseOr return "op_BitwiseAnd" if token == Tokens.BitwiseAnd return "op_" override def Visit(operatorDeclaration as OperatorDeclaration, data): declarator = operatorDeclaration.OpratorDeclarator DebugOutput(operatorDeclaration) AppendAttributes(operatorDeclaration.Attributes) AppendIndentation() _sourceText.Append(GetModifier(operatorDeclaration.Modifier, Modifier.Public)) _sourceText.Append("def ") _sourceText.Append(GetOperatorName(declarator.OverloadOperatorToken, declarator.OperatorType)) _sourceText.Append("(") _sourceText.Append(declarator.FirstParameterName) _sourceText.Append(" as ") _sourceText.Append(GetTypeString(declarator.FirstParameterType)) if (declarator.OperatorType == OperatorType.Binary): _sourceText.Append(", ") _sourceText.Append(declarator.FirstParameterName) _sourceText.Append(" as ") _sourceText.Append(GetTypeString(declarator.FirstParameterType)) _sourceText.Append(") as ") _sourceText.Append(GetTypeString(declarator.TypeReference)) if (operatorDeclaration.Body != null): _sourceText.Append(":") AppendNewLine() AddIndentLevel() operatorDeclaration.Body.AcceptChildren(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(indexerDeclaration as IndexerDeclaration, data): DebugOutput(indexerDeclaration) AppendAttributes(indexerDeclaration.Attributes) AppendIndentation() _sourceText.Append(GetModifier(indexerDeclaration.Modifier, Modifier.Public)) _sourceText.Append("Indexer(") AppendParameters(indexerDeclaration.Parameters) _sourceText.Append(") as ") _sourceText.Append(GetTypeString(indexerDeclaration.TypeReference)) _sourceText.Append(":") AppendNewLine() AddIndentLevel() if (indexerDeclaration.GetRegion != null): indexerDeclaration.GetRegion.AcceptVisitor(self, data) if (indexerDeclaration.SetRegion != null): indexerDeclaration.SetRegion.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(blockStatement as BlockStatement, data): DebugOutput(blockStatement) blockStatement.AcceptChildren(self, data) return null override def Visit(statementExpression as StatementExpression, data): DebugOutput(statementExpression) AppendIndentation() _sourceText.Append(statementExpression.Expression.AcceptVisitor(self, statementExpression).ToString()) AppendNewLine() return null override def Visit(localVariableDeclaration as LocalVariableDeclaration, data): DebugOutput(localVariableDeclaration) for localVar as VariableDeclaration in localVariableDeclaration.Variables: if not _inmacro: AppendIndentation() _sourceText.Append(GetModifier(localVariableDeclaration.Modifier, Modifier.Private)) _sourceText.Append(localVar.Name) if not _inmacro: _sourceText.Append(" as ") _sourceText.Append(GetTypeString(localVariableDeclaration.Type)) if (localVar.Initializer != null): _sourceText.Append(" = ") _sourceText.Append(localVar.Initializer.AcceptVisitor(self, data).ToString()) AppendNewLine() return null override def Visit(emptyStatement as EmptyStatement, data): DebugOutput(emptyStatement) AppendNewLine() return null override def Visit(returnStatement as ReturnStatement, data): DebugOutput(returnStatement) AppendIndentation() _sourceText.Append("return") if (returnStatement.ReturnExpression != null): _sourceText.Append(" ") _sourceText.Append(returnStatement.ReturnExpression.AcceptVisitor(self, data).ToString()) AppendNewLine() return null override def Visit(ifStatement as IfStatement, data): DebugOutput(ifStatement) AppendIndentation() unless data isa IfElseStatement ie as InvocationExpression = GetEventHandlerRaise(ifStatement) if ie == null or data isa IfElseStatement: _sourceText.Append("if ") _sourceText.Append(ifStatement.Condition.AcceptVisitor(self, null).ToString()) _sourceText.Append(":") AppendNewLine() AddIndentLevel() ifStatement.EmbeddedStatement.AcceptVisitor(self, null) RemoveIndentLevel() AppendIndentation() AppendNewLine() else: _sourceText.Append(ie.AcceptVisitor(self, null)) AppendNewLine() return null override def Visit(ifElseStatement as IfElseStatement, data): DebugOutput(ifElseStatement) AppendIndentation() unless data isa IfElseStatement _sourceText.Append("if ") _sourceText.Append(ifElseStatement.Condition.AcceptVisitor(self, null).ToString()) _sourceText.Append(":") AppendNewLine() AddIndentLevel() ifElseStatement.EmbeddedStatement.AcceptVisitor(self, null) RemoveIndentLevel() elseStatement = ifElseStatement.EmbeddedElseStatement if elseStatement isa IfStatement or elseStatement isa IfElseStatement: // convert to elif AppendIndentation() _sourceText.Append("el") elseStatement.AcceptVisitor(self, ifElseStatement) else: AppendIndentation() _sourceText.Append("else:") AppendNewLine() AddIndentLevel() elseStatement.AcceptVisitor(self, null) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(whileStatement as WhileStatement, data): DebugOutput(whileStatement) AppendIndentation() _sourceText.Append("while ") _sourceText.Append(whileStatement.Condition.AcceptVisitor(self, data).ToString()) _sourceText.Append(":") AppendNewLine() AddIndentLevel() whileStatement.EmbeddedStatement.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(doWhileStatement as DoWhileStatement, data): DebugOutput(doWhileStatement) AppendIndentation() _sourceText.Append("while true:") AppendNewLine() AddIndentLevel() doWhileStatement.EmbeddedStatement.AcceptVisitor(self, data) AppendIndentation() _sourceText.Append("break unless ") _sourceText.Append(doWhileStatement.Condition.AcceptVisitor(self, data).ToString()) AppendNewLine() RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(forStatement as ForStatement, data): DebugOutput(forStatement) // TODO: Simplify simple for(int * = *; * < *, *++) statements // if you do so, do it also in the C#->VB.NET converter if forStatement.Initializers != null: for o in forStatement.Initializers: if (o isa Expression): expr as Expression = o AppendIndentation() _sourceText.Append(expr.AcceptVisitor(self, data).ToString()) AppendNewLine() if (o isa Statement): cast(Statement, o).AcceptVisitor(self, data) AppendIndentation() _sourceText.Append("while ") if (forStatement.Condition == null): _sourceText.Append("true") else: _sourceText.Append(forStatement.Condition.AcceptVisitor(self, data).ToString()) _sourceText.Append(":") AppendNewLine() AddIndentLevel() forStatement.EmbeddedStatement.AcceptVisitor(self, data) if (forStatement.Iterator != null): for stmt as Statement in forStatement.Iterator: stmt.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(labelStatement as LabelStatement, data): DebugOutput(labelStatement) AppendIndentation() _sourceText.Append(":") _sourceText.Append(labelStatement.Label) AppendNewLine() return null override def Visit(gotoStatement as GotoStatement, data): DebugOutput(gotoStatement) AppendIndentation() _sourceText.Append("goto ") _sourceText.Append(gotoStatement.Label) AppendNewLine() return null override def Visit(switchStatement as SwitchStatement, data): DebugOutput(switchStatement) /* AppendIndentation() _sourceText.Append("given ") _sourceText.Append(switchStatement.SwitchExpression.AcceptVisitor(self, data).ToString()) _sourceText.Append(":") AppendNewLine() AddIndentLevel() for section as SwitchSection in switchStatement.SwitchSections: AppendIndentation() _sourceText.Append("when ") for i in range(section.SwitchLabels.Count): label as Expression = section.SwitchLabels[i] if (label == null): _sourceText.Append("default") else: _sourceText.Append(label.AcceptVisitor(self, data)) if (i + 1 < section.SwitchLabels.Count): _sourceText.Append(", ") _sourceText.Append(":") AppendNewLine() AddIndentLevel() section.AcceptVisitor(self, data) RemoveIndentLevel() RemoveIndentLevel() AppendIndentation() AppendNewLine() */ AppendIndentation() _sourceText.Append("selector = ") _sourceText.Append(switchStatement.SwitchExpression.AcceptVisitor(self, data).ToString()) AppendNewLine() first = true for section as SwitchSection in switchStatement.SwitchSections: AppendIndentation() if first: first = false else: _sourceText.Append("el") if section.SwitchLabels.Count == 1 and section.SwitchLabels[0] == null: _sourceText.Append("se") else: _sourceText.Append("if ") for i in range(section.SwitchLabels.Count): label as Expression = section.SwitchLabels[i] if (label == null): _sourceText.Append("true") else: _sourceText.Append("selector == ") _sourceText.Append(label.AcceptVisitor(self, data)) if i + 1 < section.SwitchLabels.Count: _sourceText.Append(" or ") _sourceText.Append(":") AppendNewLine() AddIndentLevel() section.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(breakStatement as BreakStatement, data): DebugOutput(breakStatement) AppendIndentation() _sourceText.Append("break") AppendNewLine() return null override def Visit(continueStatement as ContinueStatement, data): DebugOutput(continueStatement) AppendIndentation() _sourceText.Append("continue // WARNING !!! Please check if the converter made an endless loop") AppendNewLine() return null override def Visit(gotoCaseStatement as GotoCaseStatement, data): DebugOutput(gotoCaseStatement) AppendIndentation() _sourceText.Append("goto case ") if (gotoCaseStatement.CaseExpression == null): _sourceText.Append("default") else: _sourceText.Append(gotoCaseStatement.CaseExpression.AcceptVisitor(self, data)) AppendNewLine() return null override def Visit(foreachStatement as ForeachStatement, data): DebugOutput(foreachStatement) AppendIndentation() _sourceText.Append("for ") _sourceText.Append(foreachStatement.VariableName) _sourceText.Append(" as ") _sourceText.Append(self.GetTypeString(foreachStatement.TypeReference)) _sourceText.Append(" in ") _sourceText.Append(foreachStatement.Expression.AcceptVisitor(self, data)) _sourceText.Append(":") AppendNewLine() AddIndentLevel() foreachStatement.EmbeddedStatement.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(lockStatement as LockStatement, data): DebugOutput(lockStatement) AppendIndentation() _sourceText.Append("lock ") _sourceText.Append(lockStatement.LockExpression.AcceptVisitor(self, data)) _sourceText.Append(":") AppendNewLine() AddIndentLevel() lockStatement.EmbeddedStatement.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(usingStatement as UsingStatement, data): DebugOutput(usingStatement) AppendIndentation() _sourceText.Append("using ") _inmacro = true usingStatement.UsingStmnt.AcceptVisitor(self, data) _inmacro = false //HACK: [DH] chopped off trailing newline (crlf) here: _sourceText.Remove(_sourceText.Length - 2,2) _sourceText.Append(":") AppendNewLine() AddIndentLevel() usingStatement.EmbeddedStatement.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(tryCatchStatement as TryCatchStatement, data): DebugOutput(tryCatchStatement) AppendIndentation() _sourceText.Append("try:") AppendNewLine() AddIndentLevel() tryCatchStatement.StatementBlock.AcceptVisitor(self, data) RemoveIndentLevel() if (tryCatchStatement.CatchClauses != null): generated = 0 for catchClause as CatchClause in tryCatchStatement.CatchClauses: AppendIndentation() _sourceText.Append("except") if (catchClause.Type != null): _sourceText.Append(" ") if (catchClause.VariableName == null): _sourceText.Append("exception") if (tryCatchStatement.CatchClauses.Count > 1): _sourceText.Append(generated.ToString()) generated += 1 else: _sourceText.Append(catchClause.VariableName) _sourceText.Append(" as ") _sourceText.Append(catchClause.Type) _sourceText.Append(":") AppendNewLine() AddIndentLevel() catchClause.StatementBlock.AcceptVisitor(self, data) RemoveIndentLevel() if (tryCatchStatement.FinallyBlock != null): AppendIndentation() _sourceText.Append("ensure:") AppendNewLine() AddIndentLevel() tryCatchStatement.FinallyBlock.AcceptVisitor(self, data) RemoveIndentLevel() AppendIndentation() AppendNewLine() return null override def Visit(throwStatement as ThrowStatement, data): DebugOutput(throwStatement) AppendIndentation() _sourceText.Append("raise") if throwStatement.ThrowExpression != null: _sourceText.Append(" ") _sourceText.Append(throwStatement.ThrowExpression.AcceptVisitor(self, data).ToString()) AppendNewLine() return null override def Visit(fixedStatement as FixedStatement, data): DebugOutput(fixedStatement) _errors.Error(-1, -1, "fixed statement not supported by Boo") return null override def Visit(checkedStatement as CheckedStatement, data): DebugOutput(checkedStatement) AppendIndentation() _sourceText.Append("checked:") AppendNewLine() AddIndentLevel() checkedStatement.Block.AcceptVisitor(self, data) RemoveIndentLevel() return null override def Visit(uncheckedStatement as UncheckedStatement, data): DebugOutput(uncheckedStatement) AppendIndentation() _sourceText.Append("unchecked:") AppendNewLine() AddIndentLevel() uncheckedStatement.Block.AcceptVisitor(self, data) RemoveIndentLevel() return null def ConvertCharLiteral(ch as Char): b = StringBuilder("Char.Parse('") ConvertChar(ch, b) b.Append("')") return b.ToString() def ConvertChar(ch as Char, b as StringBuilder): // TODO: Are there any more char literals in Boo? if ch == char('\n'): b.Append("\\n") elif ch == char('\r'): b.Append("\\r") elif ch == char('\0'): b.Append("\\0") elif ch == char('\a'): b.Append("\\a") elif ch == char('\b'): b.Append("\\b") elif ch == char('\f'): b.Append("\\f") elif ch == char('\''): b.Append("\\'") elif ch == char('\\'): b.Append("\\\\") elif char.IsControl(ch): // TODO: Is this possible in boo? b.Append("\\u") b.Append(cast(int, ch)) else: b.Append(ch) def ConvertString(str as string): b = StringBuilder() for c as Char in str: ConvertChar(c, b) return b.ToString() override def Visit(primitiveExpression as PrimitiveExpression, data): DebugOutput(primitiveExpression) if (primitiveExpression.Value == null): return "null" if (primitiveExpression.Value isa bool): if cast(bool, primitiveExpression.Value): return "true" else: return "false" val = primitiveExpression.Value if val isa string: return "'" + ConvertString(val) + "'" if val isa Char: return ConvertCharLiteral(cast(Char, val)) if val isa double: return cast(double, val).ToString(System.Globalization.CultureInfo.InvariantCulture) if val isa single: return cast(single, val).ToString(System.Globalization.CultureInfo.InvariantCulture) // TODO: How to express decimals in Boo? /*if (primitiveExpression.Value isa decimal) { return String.Concat(primitiveExpression.Value.ToString(), "D") } */ return primitiveExpression.Value override def Visit(binaryOperatorExpression as BinaryOperatorExpression, data): DebugOutput(binaryOperatorExpression) left = binaryOperatorExpression.Left.AcceptVisitor(self, data).ToString() right = binaryOperatorExpression.Right.AcceptVisitor(self, data).ToString() opType = binaryOperatorExpression.Op op = " " + opType.ToString() + " " op = " + " if opType == BinaryOperatorType.Add op = " - " if opType == BinaryOperatorType.Subtract op = " * " if opType == BinaryOperatorType.Multiply op = " / " if opType == BinaryOperatorType.Divide op = " % " if opType == BinaryOperatorType.Modulus // TODO: Bitshift and XOR doesn't work op = " << " if opType == BinaryOperatorType.ShiftLeft op = " >> " if opType == BinaryOperatorType.ShiftRight op = " & " if opType == BinaryOperatorType.BitwiseAnd op = " | " if opType == BinaryOperatorType.BitwiseOr op = " ^ " if opType == BinaryOperatorType.ExclusiveOr op = " and " if opType == BinaryOperatorType.LogicalAnd op = " or " if opType == BinaryOperatorType.LogicalOr op = " as " if opType == BinaryOperatorType.AS op = " isa " if opType == BinaryOperatorType.IS op = " != " if opType == BinaryOperatorType.InEquality op = " == " if opType == BinaryOperatorType.Equality op = " > " if opType == BinaryOperatorType.GreaterThan op = " >= " if opType == BinaryOperatorType.GreaterThanOrEqual op = " < " if opType == BinaryOperatorType.LessThan op = " <= " if opType == BinaryOperatorType.LessThanOrEqual return left + op + right override def Visit(parenthesizedExpression as ParenthesizedExpression, data): DebugOutput(parenthesizedExpression) innerExpr = parenthesizedExpression.Expression.AcceptVisitor(self, data).ToString() // parenthesized cast expressions evaluate to a single 'method call' and don't need // to be parenthesized anymore like in C#. // C# "((Control)sender).Visible = false;" -> "cast(Control, sender).Visible = false" if (parenthesizedExpression.Expression isa CastExpression): return innerExpr else: return "(" + innerExpr + ")" override def Visit(invocationExpression as InvocationExpression, data): DebugOutput(invocationExpression) target = invocationExpression.TargetObject.AcceptVisitor(self, data) return target + GetParameters(invocationExpression.Parameters) override def Visit(identifierExpression as IdentifierExpression, data): DebugOutput(identifierExpression) return identifierExpression.Identifier override def Visit(typeReferenceExpression as TypeReferenceExpression, data): DebugOutput(typeReferenceExpression) return GetTypeString(typeReferenceExpression.TypeReference) override def Visit(unaryOperatorExpression as UnaryOperatorExpression, data): DebugOutput(unaryOperatorExpression) expr = unaryOperatorExpression.Expression.AcceptVisitor(self, data).ToString() opType = unaryOperatorExpression.Op op = opType.ToString() + " " // TODO: Bitwise not operator op = "~" if opType == UnaryOperatorType.BitNot op = "--" if opType == UnaryOperatorType.Decrement op = "++" if opType == UnaryOperatorType.Increment op = "-" if opType == UnaryOperatorType.Minus op = "not " if opType == UnaryOperatorType.Not op = "" if opType == UnaryOperatorType.Plus // include these though they are not supported by boo op = "*" if opType == UnaryOperatorType.Star op = "&" if opType == UnaryOperatorType.BitWiseAnd if opType == UnaryOperatorType.PostDecrement: return "Math.Max(${expr}, --${expr})" if opType == UnaryOperatorType.PostIncrement: return "Math.Min(${expr}, ++${expr})" return op + expr override def Visit(assignmentExpression as AssignmentExpression, data): DebugOutput(assignmentExpression) left = assignmentExpression.Left.AcceptVisitor(self, data).ToString() right = assignmentExpression.Right.AcceptVisitor(self, data).ToString() op as string = null opType = assignmentExpression.Op op = " = " if opType == AssignmentOperatorType.Assign op = " += " if opType == AssignmentOperatorType.Add op = " -= " if opType == AssignmentOperatorType.Subtract op = " *= " if opType == AssignmentOperatorType.Multiply op = " /= " if opType == AssignmentOperatorType.Divide return left + op + right if op != null // TODO: Bitshift operators don't work op = " << " if opType == AssignmentOperatorType.ShiftLeft op = " >> " if opType == AssignmentOperatorType.ShiftRight op = " % " if opType == AssignmentOperatorType.Modulus op = " ^ " if opType == AssignmentOperatorType.ExclusiveOr op = " & " if opType == AssignmentOperatorType.BitwiseAnd op = " | " if opType == AssignmentOperatorType.BitwiseOr return left + " = " + left + op + right if op != null return left + " " + opType.ToString() + " " + right override def Visit(sizeOfExpression as SizeOfExpression, data): DebugOutput(sizeOfExpression) _errors.Error(-1, -1, "sizeof expression not supported by Boo") return null override def Visit(typeOfExpression as TypeOfExpression, data): DebugOutput(typeOfExpression) return "typeof(" + GetTypeString(typeOfExpression.TypeReference) + ")" override def Visit(checkedExpression as CheckedExpression, data): DebugOutput(checkedExpression) _errors.Error(-1, -1, "checked expression not supported by Boo") return null override def Visit(uncheckedExpression as UncheckedExpression, data): DebugOutput(uncheckedExpression) _errors.Error(-1, -1, "unchecked expression not supported by Boo") return null override def Visit(pointerReferenceExpression as PointerReferenceExpression, data): _errors.Error(-1, -1, "pointer reference (->) not supported by Boo") return "" override def Visit(castExpression as CastExpression, data): DebugOutput(castExpression) expression = castExpression.Expression.AcceptVisitor(self, data).ToString() targetType = GetTypeString(castExpression.CastTo) return "cast(${targetType}, ${expression})" override def Visit(stackAllocExpression as StackAllocExpression, data): _errors.Error(-1, -1, "stack alloc expression not supported by Boo") return "" override def Visit(indexerExpression as IndexerExpression, data): DebugOutput(indexerExpression) target = indexerExpression.TargetObject.AcceptVisitor(self, data) parameters = GetExpressionList(indexerExpression.Indices) return "${target}[${parameters}]" override def Visit(thisReferenceExpression as ThisReferenceExpression, data): DebugOutput(thisReferenceExpression) return "self" override def Visit(baseReferenceExpression as BaseReferenceExpression, data): DebugOutput(baseReferenceExpression) return "super" override def Visit(objectCreateExpression as ObjectCreateExpression, data): DebugOutput(objectCreateExpression) if (IsEventHandlerCreation(objectCreateExpression)): expr as Expression = objectCreateExpression.Parameters[0] if expr isa FieldReferenceExpression: return cast(FieldReferenceExpression, expr).FieldName else: return expr.AcceptVisitor(self, data).ToString() else: targetType = GetTypeString(objectCreateExpression.CreateType) parameters = GetParameters(objectCreateExpression.Parameters) return targetType + parameters override def Visit(ace as ArrayCreateExpression, data): DebugOutput(ace) if (ace.ArrayInitializer != null and ace.ArrayInitializer.CreateExpressions != null): return ace.ArrayInitializer.AcceptVisitor(self, data) if (ace.Parameters != null and ace.Parameters.Count > 0): b = StringBuilder("array(") for i in range(ace.Parameters.Count - 1): b.Append("(") b.Append(GetTypeString(ace.CreateType)) for i in range(ace.Parameters.Count - 1): b.Append(")") b.Append(", ") b.Append(GetExpressionList(cast(ArrayCreationParameter, ace.Parameters[0]).Expressions)) b.Append(")") return b.ToString() else: return "(,)" override def Visit(parameterDeclarationExpression as ParameterDeclarationExpression, data): // should never be called: raise NotImplementedException() override def Visit(fieldReferenceExpression as FieldReferenceExpression, data): DebugOutput(fieldReferenceExpression) target = fieldReferenceExpression.TargetObject.AcceptVisitor(self, data) return "${target}.${fieldReferenceExpression.FieldName}" override def Visit(directionExpression as DirectionExpression, data): DebugOutput(directionExpression) // there is nothing in a Boo method call for out & ref return directionExpression.Expression.AcceptVisitor(self, data) override def Visit(arrayInitializerExpression as ArrayInitializerExpression, data): b = StringBuilder("(") b.Append(GetExpressionList(arrayInitializerExpression.CreateExpressions)) b.Append(",") if arrayInitializerExpression.CreateExpressions.Count < 2 b.Append(")") return b.ToString() override def Visit(conditionalExpression as ConditionalExpression, data): // TODO: Implement IIF for Boo condition = conditionalExpression.TestCondition.AcceptVisitor(self, data).ToString() trueExpression = conditionalExpression.TrueExpression.AcceptVisitor(self, data).ToString() falseExpression = conditionalExpression.FalseExpression.AcceptVisitor(self, data).ToString() return "iif(${condition}, ${trueExpression}, ${falseExpression})" #endregion def ConvertTypeString(typeString as string): return "single" if typeString == "float" if typeString.StartsWith("System."): convertedType = BooAmbience.TypeConversionTable[typeString] else: convertedType = BooAmbience.TypeConversionTable["System." + typeString] return convertedType if convertedType != null return typeString def GetTypeString(typeRef as TypeReference): if (typeRef == null): _errors.Error(-1, -1, "Got empty type string (internal error, check generated source code for empty types") return "!Got empty type string!" b = StringBuilder() if (typeRef.RankSpecifier != null): for i in range(typeRef.RankSpecifier.Length): // b.Append("(") // Emulate multidimensional arrays as jagged arrays for j in range(typeRef.RankSpecifier[i]): b.Append("(") b.Append(ConvertTypeString(typeRef.Type)) if (typeRef.RankSpecifier != null): for i in range(typeRef.RankSpecifier.Length): for j in range(typeRef.RankSpecifier[i]): b.Append(")") if (typeRef.PointerNestingLevel > 0): // append stars so the problem is visible in the generated source code for i in range(typeRef.PointerNestingLevel): b.Append("*") _errors.Error(-1, -1, "Pointer types are not supported by Boo") return b.ToString() def GetModifier(modifier as Modifier, default as Modifier): builder = StringBuilder() // TODO: Check if modifiers are called like this in Boo if ((modifier & Modifier.Public) == Modifier.Public): builder.Append("public ") if default != Modifier.Public elif ((modifier & (Modifier.Protected | Modifier.Internal)) == (Modifier.Protected | Modifier.Internal)): builder.Append("protected internal ") elif ((modifier & Modifier.Internal) == Modifier.Internal): builder.Append("internal ") if default != Modifier.Internal elif ((modifier & Modifier.Protected) == Modifier.Protected): builder.Append("protected ") if default != Modifier.Protected elif ((modifier & Modifier.Private) == Modifier.Private): builder.Append("private ") if default != Modifier.Private builder.Append("static ") if (modifier & Modifier.Static) == Modifier.Static builder.Append("virtual ") if (modifier & Modifier.Virtual) == Modifier.Virtual builder.Append("abstract ") if (modifier & Modifier.Abstract) == Modifier.Abstract builder.Append("override ") if (modifier & Modifier.Override) == Modifier.Override //builder.Append("") if (modifier & Modifier.New) == Modifier.New builder.Append("final ") if (modifier & Modifier.Sealed) == Modifier.Sealed builder.Append("final ") if (modifier & Modifier.Readonly) == Modifier.Readonly builder.Append("final ") if (modifier & Modifier.Const) == Modifier.Const builder.Append("extern ") if (modifier & Modifier.Extern) == Modifier.Extern builder.Append("volatile ") if (modifier & Modifier.Volatile) == Modifier.Volatile builder.Append("unsafe ") if (modifier & Modifier.Unsafe) == Modifier.Unsafe if ((modifier & Modifier.Volatile) == Modifier.Volatile): _errors.Error(-1, -1, "'volatile' modifier not convertable") if ((modifier & Modifier.Unsafe) == Modifier.Unsafe): _errors.Error(-1, -1, "'unsafe' modifier not convertable") return builder.ToString() def GetParameters(l as ArrayList): return "(" + GetExpressionList(l) + ")" def GetExpressionList(l as ArrayList): if (l == null): return "" sb = StringBuilder() for exp as Expression in l: if sb.Length > 0: sb.Append(", ") sb.Append(exp.AcceptVisitor(self, null)) return sb.ToString() def AppendParameters(parameters as ArrayList): return if parameters == null first = true for pde as ParameterDeclarationExpression in parameters: if first: first = false else: _sourceText.Append(", ") AppendAttributes(pde.Attributes) _sourceText.Append("ref ") if pde.ParamModifiers == ParamModifiers.Ref _sourceText.Append("out ") if pde.ParamModifiers == ParamModifiers.Out _sourceText.Append("*") if pde.ParamModifiers == ParamModifiers.Params _sourceText.Append(pde.ParameterName) _sourceText.Append(" as ") _sourceText.Append(GetTypeString(pde.TypeReference)) def AppendAttributes(attr as ArrayList): return if attr == null for section as AttributeSection in attr: section.AcceptVisitor(self, null) def GetEventHandlerRaise(ifStatement as IfStatement) as InvocationExpression: op = ifStatement.Condition as BinaryOperatorExpression if (op != null and op.Op == BinaryOperatorType.InEquality): if op.Left isa IdentifierExpression and op.Right isa PrimitiveExpression and (cast(PrimitiveExpression,op.Right).Value == null): identifier as string = cast(IdentifierExpression,op.Left).Identifier se as StatementExpression = null if (ifStatement.EmbeddedStatement isa StatementExpression): se = ifStatement.EmbeddedStatement elif (ifStatement.EmbeddedStatement.Children.Count == 1): se = ifStatement.EmbeddedStatement.Children[0] as StatementExpression if se != null: ie = se.Expression as InvocationExpression if ie != null: ex as Expression = ie.TargetObject methodName as string = null if (ex isa IdentifierExpression): methodName = cast(IdentifierExpression,ex).Identifier elif (ex isa FieldReferenceExpression): fre as FieldReferenceExpression = ex if (fre.TargetObject isa ThisReferenceExpression): methodName = fre.FieldName if methodName != null and methodName == identifier: for o in _currentType.Children: ed = o as EventDeclaration if ed != null: if (ed.Name == methodName): return ie for field as VariableDeclaration in ed.VariableDeclarators: if (field.Name == methodName): return ie return null def IsEventHandlerCreation(expr as Expression): if (expr isa ObjectCreateExpression): oce as ObjectCreateExpression = expr if (oce.Parameters.Count == 1): expr = oce.Parameters[0] methodName as string = null if (expr isa IdentifierExpression): methodName = cast(IdentifierExpression,expr).Identifier elif (expr isa FieldReferenceExpression): methodName = cast(FieldReferenceExpression,expr).FieldName if (methodName != null): for o in _currentType.Children: if (o isa MethodDeclaration and cast(MethodDeclaration,o).Name == methodName): return true return false; nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/BooishView.boo000066400000000000000000000051441161462365500267270ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rodrigobamboo@gmail.com) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.Drawing import System.Windows.Forms import ICSharpCode.SharpDevelop.Gui import ICSharpCode.Core.Services import ICSharpCode.SharpDevelop.Services import booish.gui class CompletionWindowImageProvider(booish.gui.ICompletionWindowImageProvider): _classBrowserIconService as ClassBrowserIconsService def constructor(): self._classBrowserIconService = ServiceManager.Services.GetService(ClassBrowserIconsService) ImageList as ImageList: get: return _classBrowserIconService.ImageList NamespaceIndex as int: get: return _classBrowserIconService.NamespaceIndex ClassIndex as int: get: return _classBrowserIconService.ClassIndex InterfaceIndex as int: get: return _classBrowserIconService.InterfaceIndex EnumIndex as int: get: return _classBrowserIconService.EnumIndex StructIndex as int: get: return _classBrowserIconService.StructIndex CallableIndex as int: get: return _classBrowserIconService.DelegateIndex MethodIndex as int: get: return _classBrowserIconService.MethodIndex FieldIndex as int: get: return _classBrowserIconService.FieldIndex LiteralIndex as int: get: return _classBrowserIconService.LiteralIndex PropertyIndex as int: get: return _classBrowserIconService.PropertyIndex EventIndex as int: get: return _classBrowserIconService.EventIndex class BooishView(AbstractPadContent): _booish as InteractiveInterpreterControl def constructor(): super("booish", "Boo.ProjectIcon") _booish = InteractiveInterpreterControl(Font: System.Drawing.Font("Lucida Console", 10)) _booish.CompletionWindowImageProvider = CompletionWindowImageProvider() _booish.Interpreter.SetValue("Workbench", WorkbenchSingleton.Workbench) override Control as Control: get: return _booish nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeCompletion/000077500000000000000000000000001161462365500270505ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeCompletion/BooParser.boo000077500000000000000000000111711161462365500314510ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding.CodeCompletion import System import System.Collections import System.Diagnostics import System.IO import ICSharpCode.Core.Services import ICSharpCode.SharpDevelop.Services import ICSharpCode.SharpDevelop.Internal.Project import SharpDevelop.Internal.Parser import Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Steps class BooParser(IParser): private _lexerTags as (string) LexerTags as (string): get: return _lexerTags set: _lexerTags = value ExpressionFinder as IExpressionFinder: get: return BooBinding.CodeCompletion.ExpressionFinder() def CanParse(fileName as string): return Path.GetExtension(fileName).ToLower() == ".boo" def CanParse(project as IProject): return project.ProjectType == BooBinding.BooLanguageBinding.LanguageName def Parse(fileName as string) as ICompilationUnitBase: /* compiler = BooCompiler() compiler.Parameters.Input.Add(FileInput(fileName)) return Parse(fileName, compiler) */ content as string using r = StreamReader(fileName): content = r.ReadToEnd() return Parse(fileName, content) def Parse(fileName as string, fileContent as string) as ICompilationUnitBase: print "Parse ${fileName} with content" cr = '\r'[0] ln = '\n'[0] linecount = 1 for c as Char in fileContent: linecount += 1 if c == ln lineLength = array(int, linecount) length = 0 i = 0 for c as Char in fileContent: if c == ln: lineLength[i] = length i += 1 length = 0 elif c != cr: length += 1 lineLength[i] = length compiler = BooCompiler() compiler.Parameters.Input.Add(StringInput(fileName, fileContent)) return Parse(fileName, lineLength, compiler) private def Parse(fileName as string, lineLength as (int), compiler as BooCompiler): compiler.Parameters.OutputWriter = StringWriter() compiler.Parameters.TraceSwitch.Level = TraceLevel.Warning; compiler.Parameters.Environment = ClosedEnvironment(Boo.Lang.Parser.ParserSettings(TabSize: 1)) compilePipe = Compile() num = compilePipe.Find(typeof(ProcessMethodBodiesWithDuckTyping)) visitor = Visitor(LineLength:lineLength) visitor.Cu.FileName = fileName compilePipe[num] = visitor // Remove unneccessary compiler steps while compilePipe.Count > num + 1: compilePipe.RemoveAt(compilePipe.Count - 1) num = compilePipe.Find(typeof(TransformCallableDefinitions)) compilePipe.RemoveAt(num) //for i in range(compilePipe.Count): // print compilePipe[i].ToString() compilePipe.BreakOnErrors = false compiler.Parameters.Pipeline = compilePipe try: compiler.Run() // somehow the SD parser thread goes into an endless loop if this flag is not set visitor.Cu.ErrorsDuringCompile = true //context.Errors.Count > 0 except e: ShowException(e) return visitor.Cu def CtrlSpace(parserService as IParserService, caretLine as int, caretColumn as int, fileName as string) as ArrayList: print "Ctrl-Space (${caretLine}/${caretColumn})" try: return Resolver().CtrlSpace(parserService, caretLine, caretColumn, fileName) except e: ShowException(e) return null def Resolve(parserService as IParserService, expression as string, caretLineNumber as int, caretColumn as int, fileName as string, fileContent as string) as ResolveResult: print "Resolve ${expression} (${caretLineNumber}/${caretColumn})" try: return Resolver().Resolve(parserService, expression, caretLineNumber, caretColumn, fileName, fileContent) except e: ShowException(e) return null static def ShowException(e as Exception): messageService as IMessageService = ServiceManager.Services.GetService(typeof(IMessageService)) messageService.ShowError(e.ToString()) ExpressionFinder.boo000066400000000000000000000210141161462365500327570ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeCompletion#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding.CodeCompletion import System import System.Text import SharpDevelop.Internal.Parser class ExpressionFinder(IExpressionFinder): // The expression finder can find an expression in a text // inText is the full source code, offset the cursor position // example: "_var = 'bla'\n_var^\nprint _var" // where ^ is the cursor position // in that simple case the expression finder should return 'n_var'. // but also complex expressions like // 'filename.Substring(filename.IndexOf("var="))' // should be returned if the cursor is after the last ). // implementation note: the text after offset is irrelevant, so // every operation on the string aborts after reaching offset final static _closingBrackets = '}])' final static _openingBrackets = '{[(' def FindExpression(inText as string, offset as int) as string: return null if inText == null print "Trying quickfind for ${offset}" // OK, first try a kind of "quick find" i = offset + 1 forbidden = '"\'/#)]}' finish = '([{=+*<,:' start = -1 while i > 0: i -= 1 c = inText[i] if finish.IndexOf(c) >= 0: start = i + 1 break if forbidden.IndexOf(c) >= 0: print "Quickfind failed: got ${c}" break if Char.IsWhiteSpace(c): if i > 6 and inText.Substring(i - 6, 6) == "import": i -= 7 // include 'import' in the expression start = i + 1 break if start >= 0: if CheckString(inText, start, '/#"\'', '\r\n'): return GetExpression(inText, start, offset + 1) inText = SimplifyCode(inText, offset) if inText == null: print 'SimplifyCode returned null (cursor is in comment/string???)' return null // inText now has no comments or string literals, but the same meaning in // terms of the type system // Now go back until a finish-character or a whitespace character bracketStack = StringBuilder() // use Stack instead in .NET 2.0 i = inText.Length while i > 0: i -= 1 c = inText[i] if bracketStack.Length == 0 and (finish.IndexOf(c) >= 0 or Char.IsWhiteSpace(c)): return GetExpression(inText, i + 1, inText.Length) if _closingBrackets.IndexOf(c) >= 0: bracketStack.Append(c) bracket = _openingBrackets.IndexOf(c) if bracket >= 0: while Pop(bracketStack) > bracket: pass return null private def CheckString(text as string, offset as int, forbidden as string, finish as string): i = offset while i > 0: i -= 1 c = text[i] return false if forbidden.IndexOf(c) >= 0 return true if finish.IndexOf(c) >= 0 return true private def Pop(bracketStack as StringBuilder): return -1 if bracketStack.Length == 0 c = bracketStack[bracketStack.Length - 1] bracketStack.Length -= 1 return _closingBrackets.IndexOf(c) private def GetExpression(inText as string, start as int, end as int): b = StringBuilder() wasSpace = true i = start while i < end: c = inText[i] if Char.IsWhiteSpace(c): b.Append(' ') unless wasSpace wasSpace = true else: wasSpace = false b.Append(c) i += 1 print "Expression is '${b}'" return b.ToString().Trim() // TODO: We could need some unit tests for this. final static _elseIndex = 10 static _stateTable = ( // " ' \ \n $ { } # / * else /* 0: in Code */ ( 1 , 7 , 0 , 0 , 0 , 0 , 0 , 13 , 12 , 0 , 0 ), /* 1: after " */ ( 2 , 6 , 10 , 0 , 8 , 6 , 6 , 6 , 6 , 6 , 6 ), /* 2: after "" */ ( 3 , 7 , 0 , 0 , 0 , 0 , 0 , 13 , 12 , 0 , 0 ), /* 3: in """ */ ( 4 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 ), /* 4: in """, " */ ( 5 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 ), /* 5: in """, "" */ ( 0 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 ), /* 6: in "-string */ ( 0 , 6 , 10 , 0 , 8 , 6 , 6 , 6 , 6 , 6 , 6 ), /* 7: in '-string */ ( 7 , 0 , 11 , 0 , 7 , 7 , 7 , 7 , 7 , 7 , 7 ), /* 8: after $ in " */ ( 0 , 6 , 10 , 0 , 8 , 9 , 6 , 6 , 6 , 6 , 6 ), /* 9: in "{ */ ( 9 , 9 , 9 , 9 , 9 , 9 , 6 , 9 , 9 , 9 , 9 ), /* 10: after \ in " */ ( 6 , 6 , 6 , 0 , 6 , 6 , 6 , 6 , 6 , 6 , 6 ), /* 11: after \ in ' */ ( 7 , 7 , 7 , 0 , 7 , 7 , 7 , 7 , 7 , 7 , 7 ), /* 12: after / */ ( 1 , 7 , 0 , 0 , 0 , 0 , 0 , 0 , 13 ,-14 , 0 ), /* 13: line comment */ ( 13 , 13 , 13 , 0 , 13 , 13 , 13 , 13 , 13 , 13 , 13 ), /* 14: block comment*/ ( 14 , 14 , 14 , 14 , 14 , 14 , 14 , 14 , 14 , 15 , 14 ), /* 15: after * in bc*/ ( 14 , 14 , 14 , 14 , 14 , 14 , 14 , 14 ,-15 , 15 , 14 ) ) def SimplifyCode(inText as string, offset as int): """This method makes boo source code "simpler" by removing all comments and replacing all string litarals through string.Empty. Regular expressions literals are replaced with the simple regex /a/""" result = StringBuilder() inStringResult = StringBuilder(' ') state = 0 commentblocks = 0 inputTable = array(int, 128) for i in range(128): inputTable[i] = _elseIndex inputTable[ 34] = 0 // " inputTable[ 39] = 1 // ' inputTable[ 92] = 2 // \ inputTable[ 10] = 3 // \n inputTable[ 13] = 3 // \r inputTable[ 36] = 4 // $ inputTable[123] = 5 // { inputTable[125] = 6 // } inputTable[ 35] = 7 // # inputTable[ 47] = 8 // / inputTable[ 42] = 9 // * i = -1 while i < offset: i += 1 c as char = inText[i] charNum as int = cast(int, c) if charNum > 127: input = _elseIndex else: input = inputTable[charNum] action = _stateTable[state][input] if action == -14: // enter block comment commentblocks += 1 state = 14 elif action == -15: // leave block comment commentblocks -= 1 if commentblocks == 0: state = 0 else: state = 14 elif action == 9: // enter inner string expression (${...}) if state == 9: inStringResult.Append(c) else: inStringResult.Length = 1 state = action elif action == 0 or action == 12: // go to normal code if action == 12: // after / could be a regular expression, do a special check for that regexEnd = SkipRegularExpression(inText, i, offset) if regexEnd > 0: i = regexEnd result.Append('/a') elif regexEnd == -1: // cursor is in regex return null if state == 2 or (state >= 6 and state <= 11): result.Append("string.Empty") if state == 0 or state == 2 or state == 12: result.Append(c) state = action else: state = action if state == 0 or state == 2 or state == 12: // cursor is in normal code return result.ToString() elif state == 9: // cursor is in inner string expression (${...}) return inStringResult.ToString() else: // cursor is in comment/string return null final static slashChar = '/'[0] def SkipRegularExpression(inText as string, pos as int, maxOffset as int): """Skips the regular expression in inText at position pos. Returns end position of the ending / if successful or 0 is no regular expression was found at the location. Return -1 if maxOffset is inside the regular expression.""" if pos > 0: containsWhitespace = (inText[pos - 1] == '@'[0]) else: containsWhitespace = false return -1 if pos == maxOffset // cursor is after / -> cursor inside regex return 0 if inText[pos + 1] == slashChar // double // is comment, no regex i = pos while i < maxOffset: i += 1 if (not containsWhitespace) and Char.IsWhiteSpace(inText, i): return 0; // this is no regex return i if inText[i] == slashChar return -1 // maxOffset inside regex ExpressionTypeVisitor.boo000066400000000000000000000222531161462365500340570ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeCompletion#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding.CodeCompletion import System import System.Collections import SharpDevelop.Internal.Parser import Boo.Lang.Compiler.Ast class ExpressionTypeVisitor(DepthFirstVisitor): protected override def OnError(node as Node, error as Exception): BooParser.ShowException(error) super(node, error) [Property(ReturnType)] _returnType as IReturnType [Property(ReturnClass)] _returnClass as IClass [Property(Resolver)] _resolver as Resolver private def CreateReturnType(fullClassName as string): _returnClass = null if fullClassName == null: _returnType = null else: _returnType = BooBinding.CodeCompletion.ReturnType(fullClassName) private def CreateReturnType(reference as TypeReference): _returnClass = null if reference == null: _returnType = null else: _returnType = BooBinding.CodeCompletion.ReturnType(reference) private def CreateReturnType(c as IClass): _returnClass = c if c == null: _returnType = null else: _returnType = BooBinding.CodeCompletion.ReturnType(c) private def SetReturnType(r as IReturnType): _returnClass = null _returnType = r private def Debug(node): if node == null: print "-- null --" else: print "${node.ToString()} - ${node.GetType().FullName}" override def OnCallableBlockExpression(node as CallableBlockExpression): Debug(node) CreateReturnType("System.Delegate") override def OnMethodInvocationExpression(node as MethodInvocationExpression): Debug(node) Debug(node.Target) if node.Target isa MemberReferenceExpression: // call a method on another object mre as MemberReferenceExpression = node.Target Visit(mre.Target) if _returnClass == null and _returnType != null: _returnClass = _resolver.SearchType(_returnType.FullyQualifiedName) return if ProcessMethod(node, mre.Name, _returnClass) // try if the MemberReferenceExpression is a fully qualified class name (constructor call) ProcessMemberReferenceExpression(mre.Name) CreateReturnType(_returnClass) elif node.Target isa ReferenceExpression: re as ReferenceExpression = node.Target // try if it is a method on the current object return if ProcessMethod(node, re.Name, _resolver.CallingClass) // try if it is a builtin method return if ProcessMethod(node, re.Name, _resolver.BuiltinClass) // try if it is a class name -> constructor CreateReturnType(_resolver.SearchType(re.Name)) else: SetReturnType(null) private def ProcessMethod(node as MethodInvocationExpression, name as string, c as IClass) as bool: return false if c == null possibleOverloads = FindMethods(c, name, node.Arguments.Count) print "found ${possibleOverloads.Count} overloads (multiple overloads not supported yet)" if possibleOverloads.Count >= 1: SetReturnType(cast(IMethod, possibleOverloads[0]).ReturnType) return true /*// find best overload argumentTypes = array(IReturnType, node.Arguments.Count) for i as int in range(argumentTypes.Length): Visit(node.Arguments[i]) argumentTypes[i] = _returnType ... */ return false private def FindMethods(c as IClass, name as string, arguments as int): possibleOverloads = ArrayList() for cl as IClass in c.ClassInheritanceTree: for m as IMethod in cl.Methods: if m.Parameters.Count == arguments and name == m.Name: possibleOverloads.Add(m) return possibleOverloads override def OnSlicingExpression(node as SlicingExpression): Debug(node) Visit(node.Target) slice as Slice = node.Indices[0] if (slice.End != null): // Boo slice, returns a part of the source -> same type as source return if _returnType != null and _returnType.ArrayDimensions != null and _returnType.ArrayDimensions.Length > 0: SetReturnType(BooBinding.CodeCompletion.ReturnType(_returnType.FullyQualifiedName, _returnType.ArrayDimensions[0 : _returnType.ArrayDimensions.Length - 1], 0)) return if _returnClass == null and _returnType != null: _returnClass = _resolver.SearchType(_returnType.FullyQualifiedName) if _returnClass != null: indexers = FindIndexer(_returnClass, 1) if indexers.Count > 0: SetReturnType(cast(IIndexer, indexers[0]).ReturnType) return SetReturnType(null) private def FindIndexer(c as IClass, arguments as int): possibleOverloads = ArrayList() for cl as IClass in c.ClassInheritanceTree: for m as IIndexer in cl.Indexer: if m.Parameters.Count == arguments: possibleOverloads.Add(m) return possibleOverloads override def OnBinaryExpression(node as BinaryExpression): Debug(node) CombineTypes(node.Left, node.Right) override def OnConditionalExpression(node as ConditionalExpression): Debug(node) CombineTypes(node.TrueValue, node.FalseValue) private def CombineTypes(a as Expression, b as Expression): Visit(a) override def OnReferenceExpression(node as ReferenceExpression): // Resolve reference (to a variable, field, parameter or type) return if ProcessMember(node.Name, _resolver.CallingClass) rt = _resolver.GetTypeFromLocal(node.Name) if rt != null: SetReturnType(rt) return if _resolver.IsNamespace(node.Name): SetReturnType(NamespaceReturnType(node.Name)) else: CreateReturnType(_resolver.SearchType(node.Name)) override def OnMemberReferenceExpression(node as MemberReferenceExpression): Debug(node) Visit(node.Target) ProcessMemberReferenceExpression(node.Name) private def ProcessMemberReferenceExpression(name as string): """Gets the return type of the MemberReferenceExpression with the specified name on the current return type.""" if _returnType isa NamespaceReturnType: name = _returnType.FullyQualifiedName + '.' + name if _resolver.IsNamespace(name): SetReturnType(NamespaceReturnType(name)) else: CreateReturnType(_resolver.SearchType(name)) return if _returnClass == null and _returnType != null: _returnClass = _resolver.SearchType(_returnType.FullyQualifiedName) return if ProcessMember(name, _returnClass) SetReturnType(null) private def ProcessMember(name as string, parentClass as IClass): return false if parentClass == null for cl as IClass in parentClass.ClassInheritanceTree: for c as IClass in cl.InnerClasses: if c.Name == name: CreateReturnType(c) return true for f as IField in cl.Fields: if f.Name == name: SetReturnType(f.ReturnType) return true for p as IProperty in cl.Properties: if p.Name == name: SetReturnType(p.ReturnType) return true for m as IMethod in cl.Methods: if m.Name == name: CreateReturnType("System.Delegate") return true return false override def OnTimeSpanLiteralExpression(node as TimeSpanLiteralExpression): CreateReturnType("System.TimeSpan") override def OnIntegerLiteralExpression(node as IntegerLiteralExpression): CreateReturnType("System.Int32") override def OnDoubleLiteralExpression(node as DoubleLiteralExpression): CreateReturnType("System.Double") override def OnNullLiteralExpression(node as NullLiteralExpression): CreateReturnType("System.Object") override def OnStringLiteralExpression(node as StringLiteralExpression): CreateReturnType("System.String") override def OnSelfLiteralExpression(node as SelfLiteralExpression): CreateReturnType(_resolver.CallingClass) override def OnSuperLiteralExpression(node as SuperLiteralExpression): CreateReturnType(_resolver.ParentClass) override def OnBoolLiteralExpression(node as BoolLiteralExpression): CreateReturnType("System.Boolean") override def OnRELiteralExpression(node as RELiteralExpression): CreateReturnType("System.Text.RegularExpressions.Regex") override def OnHashLiteralExpression(node as HashLiteralExpression): CreateReturnType("System.Collections.Hashtable") override def OnListLiteralExpression(node as ListLiteralExpression): CreateReturnType("System.Collections.ArrayList") override def OnArrayLiteralExpression(node as ArrayLiteralExpression): CreateReturnType("System.Array") override def OnTryCastExpression(node as TryCastExpression): CreateReturnType(node.Type) override def OnCastExpression(node as CastExpression): CreateReturnType(node.Type) override def OnTypeofExpression(node as TypeofExpression): CreateReturnType("System.Type") nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeCompletion/Resolver.boo000066400000000000000000000263241161462365500313610ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding.CodeCompletion import BooBinding import System import System.Collections import System.Diagnostics import System.IO import ICSharpCode.SharpDevelop.Services import SharpDevelop.Internal.Parser import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast as AST import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Steps class Resolver: _parserService as IParserService _caretLine as int _caretColumn as int [Getter(CallingClass)] _callingClass as IClass _compilationUnit as ICompilationUnit _parentClass as IClass ParentClass as IClass: get: _parentClass = _parserService.BaseClass(_callingClass) if _parentClass == null return _parentClass _resolvedMember = false _currentMember as IMember CurrentMember as IMember: get: if not _resolvedMember: _resolvedMember = true _currentMember = ResolveCurrentMember() return _currentMember #region Helper methods private def ResolveCurrentMember() as IMember: print "Getting current method... caretLine = ${_caretLine}, caretColumn = ${_caretColumn}" return null if _callingClass == null best as IMember = null line = 0 for m as IMember in _callingClass.Methods: if m.Region != null: if m.Region.BeginLine <= _caretLine and m.Region.BeginLine > line: line = m.Region.BeginLine best = m for m as IMember in _callingClass.Properties: if m.Region != null: if m.Region.BeginLine <= _caretLine and m.Region.BeginLine > line: line = m.Region.BeginLine best = m if _callingClass.Region == null: for m as IMember in _callingClass.Methods: if m.Region == null: if best == null or best.Region.EndLine < _caretLine: return m return best _localTypes as Hashtable = {} def GetTypeFromLocal(name as string) as IReturnType: // gets the type of a local variable or method parameter print "Trying to get local variable ${name}..." return _localTypes[name] if _localTypes.ContainsKey(name) _localTypes[name] = null // prevent stack overflow by caching null first rt = InnerGetTypeFromLocal(name) _localTypes[name] = rt return rt def InnerGetTypeFromLocal(name as string) as IReturnType: member = self.CurrentMember Print("member", member) if member isa BooAbstractMethod: method as BooAbstractMethod = member for para as IParameter in method.Parameters: return para.ReturnType if para.Name == name if method.Node != null and method.Node.Body != null: varLookup = VariableLookupVisitor(Resolver: self, LookFor: name) print "Visiting method body..." varLookup.Visit(method.Node.Body) print "Finished visiting method body!" return varLookup.ReturnType elif member isa Property: property as Property = member return property.ReturnType if name == "value" for para as IParameter in property.Parameters: return para.ReturnType if para.Name == name if property.Node != null: varLookup = VariableLookupVisitor(Resolver: self, LookFor: name) print "Visiting property body..." varLookup.Visit(GetPropertyMethod(property)) print "Finished visiting property body!" return varLookup.ReturnType return null def GetPropertyMethod(property as Property): return property.Node.Getter if property.Node.Setter == null return property.Node.Setter if property.Node.Getter == null first = property.Node.Getter last = property.Node.Setter if first.LexicalInfo.Line > last.LexicalInfo.Line: first, last = last, first return last if _caretLine > last.LexicalInfo.Line return first def SearchType(name as string) as IClass: expandedName = BooAmbience.ReverseTypeConversionTable[name] return _parserService.GetClass(expandedName) if expandedName != null return _parserService.SearchType(name, _callingClass, _caretLine, _caretColumn) builtinClass as IClass BuiltinClass as IClass: get: builtinClass = _parserService.GetClass("Boo.Lang.Builtins") if builtinClass == null return builtinClass def IsNamespace(name as string) as bool: return _parserService.NamespaceExists(name) #endregion #region CtrlSpace-Completion def CtrlSpace(parserService as IParserService, caretLine as int, caretColumn as int, fileName as string) as ArrayList: _parserService = parserService _caretLine = caretLine _caretColumn = caretColumn result = ArrayList(BooAmbience.TypeConversionTable.Values) result.Add("System") // system namespace can be used everywhere builtinClass = self.BuiltinClass if builtinClass != null: for method as IMethod in builtinClass.Methods: result.Add(method) parseInfo = parserService.GetParseInformation(fileName) cu = parseInfo.MostRecentCompilationUnit as CompilationUnit _compilationUnit = cu if cu != null: curClass = parserService.GetInnermostClass(cu, caretLine, caretColumn) as IClass _callingClass = curClass if curClass != null: result = AddCurrentClassMembers(result, curClass) result.AddRange(parserService.GetNamespaceContents(curClass.Namespace)) for u as IUsing in cu.Usings: if u != null and (u.Region == null or u.Region.IsInside(caretLine, caretColumn)): for name as string in u.Usings: result.AddRange(parserService.GetNamespaceContents(name)) for alias as string in u.Aliases.Keys: result.Add(alias) member = self.CurrentMember Print("member", member) if member != null: varList as Hashtable = null if member isa BooAbstractMethod: method as BooAbstractMethod = member for para as IParameter in method.Parameters: result.Add(Field(para.ReturnType, para.Name, ModifierEnum.Private, null)) if method.Node != null: varLookup = VariableListLookupVisitor(Resolver: self) print "Visiting method body..." varLookup.Visit(cast(BooAbstractMethod, member).Node.Body) print "Finished visiting method body!" varList = varLookup.Results elif member isa Property: property as Property = member if property.Node != null: varLookup = VariableListLookupVisitor(Resolver: self) print "Visiting property body..." varLookup.Visit(GetPropertyMethod(property)) print "Finished visiting property body!" varList = varLookup.Results if varList != null: for e as DictionaryEntry in varList: result.Add(Field(e.Value, e.Key, ModifierEnum.Private, null)) result.AddRange(parserService.GetNamespaceContents("")) return result def AddCurrentClassMembers(result as ArrayList, curClass as IClass) as ArrayList: if self.CurrentMember != null and self.CurrentMember.IsStatic == false: result = _parserService.ListMembers(result, curClass, curClass, false) // Add static members, but only from this class (not from base classes) for method as IMethod in curClass.Methods: result.Add(method) if (method.Modifiers & ModifierEnum.Static) == ModifierEnum.Static for field as IField in curClass.Fields: result.Add(field) if (field.Modifiers & ModifierEnum.Static) == ModifierEnum.Static for property as IProperty in curClass.Properties: result.Add(property) if (property.Modifiers & ModifierEnum.Static) == ModifierEnum.Static for e as Event in curClass.Events: result.Add(e) if (e.Modifiers & ModifierEnum.Static) == ModifierEnum.Static return result #endregion #region Resolve CC def Initialize(parserService as IParserService, caretLine as int, caretColumn as int, fileName as string): _parserService = parserService _caretLine = caretLine _caretColumn = caretColumn parseInfo = parserService.GetParseInformation(fileName) cu = parseInfo.MostRecentCompilationUnit as CompilationUnit _compilationUnit = cu if _compilationUnit == null: print "BooResolver: No parse information!" return false _callingClass = parserService.GetInnermostClass(cu, caretLine, caretColumn) if _callingClass == null: return false if cu.Classes.Count == 0 _callingClass = cu.Classes[cu.Classes.Count - 1] if _callingClass.Region != null: return false if _callingClass.Region.BeginLine > caretLine return true def Resolve(parserService as IParserService, expression as string, caretLine as int, caretColumn as int, fileName as string, fileContent as string) as ResolveResult: if expression == null or expression == '': return null if expression.StartsWith("import "): expression = expression.Substring(7).Trim() if parserService.NamespaceExists(expression): return ResolveResult(parserService.GetNamespaceList(expression)) return null if not Initialize(parserService, caretLine, caretColumn, fileName): return null callingClass = _callingClass returnClass as IClass = null if expression == "self": returnClass = callingClass elif expression == "this": // SharpDevelop uses "this" as expression when requesting method insight information // for a method on the current class returnClass = callingClass elif expression == "super": returnClass = self.ParentClass else: // try looking if the expression is the name of a class expressionClass = self.SearchType(expression) if expressionClass != null: return ResolveResult(expressionClass, parserService.ListMembers(ArrayList(), expressionClass, callingClass, true)) // try if it is the name of a namespace if parserService.NamespaceExists(expression): return ResolveResult(array(string, 0), parserService.GetNamespaceContents(expression)) expr = Boo.Lang.Parser.BooParser.ParseExpression("expression", expression) return null if expr isa AST.IntegerLiteralExpression visitor = ExpressionTypeVisitor(Resolver : self) visitor.Visit(expr) retType = visitor.ReturnType Print ("result", retType) if visitor.ReturnClass != null: returnClass = visitor.ReturnClass elif retType != null: if retType.ArrayDimensions != null and retType.ArrayDimensions.Length > 0: returnClass = self.SearchType("System.Array") else: returnClass = self.SearchType(retType.FullyQualifiedName) return null if returnClass == null return ResolveResult(returnClass, parserService.ListMembers(ArrayList(), returnClass, callingClass, false)) private def Print(name as string, obj): Console.Write(name); Console.Write(' = '); if obj == null: Console.WriteLine('null') else: Console.WriteLine('{0} ({1})', obj, obj.GetType().FullName) #endregion nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeCompletion/ReturnType.boo000066400000000000000000000142431161462365500316760ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding.CodeCompletion import System import System.Collections import System.Diagnostics import SharpDevelop.Internal.Parser import ICSharpCode.SharpDevelop.Services import Boo.Lang.Compiler.Ast as AST ///////////////////////////////////// /// Return Type /// ///////////////////////////////////// class ReturnType(AbstractReturnType): def constructor(fullyQualifiedName as string): self(fullyQualifiedName, array(int, 0), 0) def constructor(fullyQualifiedName as string, arrayDimensions as (int), pointerNestingLevel as int): self.FullyQualifiedName = fullyQualifiedName self.arrayDimensions = arrayDimensions self.pointerNestingLevel = pointerNestingLevel def constructor(t as AST.TypeReference): super.pointerNestingLevel = 0 if t isa AST.SimpleTypeReference: super.arrayDimensions = array(int, 0) name = cast(AST.SimpleTypeReference, t).Name expandedName = BooBinding.BooAmbience.ReverseTypeConversionTable[name] name = expandedName if expandedName != null super.FullyQualifiedName = name elif t isa AST.ArrayTypeReference: ar as AST.ArrayTypeReference = t depth = 1 while ar.ElementType isa AST.ArrayTypeReference: depth += 1 ar = ar.ElementType dimensions = array(int, depth) for i as int in range(depth): dimensions[i] = 1 self.arrayDimensions = dimensions if ar.ElementType isa AST.SimpleTypeReference: super.FullyQualifiedName = cast(AST.SimpleTypeReference, ar.ElementType).Name else: print ("Got unknown TypeReference in Array: ${t}") super.FullyQualifiedName = "" else: super.arrayDimensions = array(int, 0) super.FullyQualifiedName = "" print ("Got unknown TypeReference ${t}") static def CreateReturnType(node as AST.Node) as IReturnType: if node isa AST.Field: t = (node as AST.Field).Type elif node isa AST.Property: t = (node as AST.Property).Type elif node isa AST.Method: t = (node as AST.Method).ReturnType elif node isa AST.Event: t = (node as AST.Event).Type else: raise "Unknown node ${node.GetType().FullName}" str = t as AST.SimpleTypeReference if (str != null and str.Name != "unknown") or t isa AST.ArrayTypeReference: return ReturnType(t) else: if node isa AST.Field: return InferredReturnType((node as AST.Field).Initializer, node.LexicalInfo) elif node isa AST.Property: prop as AST.Property = node return InferredReturnType(GetReturnExpression(prop.Getter), node.LexicalInfo) elif node isa AST.Method: return InferredReturnType(GetReturnExpression(node), node.LexicalInfo) private static def GetReturnExpression(method as AST.Method): return null if method == null return null if method.Body == null visitor = FindReturnExpressionVisitor() method.Body.Accept(visitor) return visitor.Expression private class FindReturnExpressionVisitor(AST.DepthFirstVisitor): [Getter(Expression)] _expression as AST.Expression override def OnReturnStatement(node as AST.ReturnStatement): if _expression isa AST.NullLiteralExpression or not (node.Expression isa AST.NullLiteralExpression): _expression = node.Expression def constructor(t as AST.TypeDefinition): self(t.FullName) def constructor(c as IClass): self(c.FullyQualifiedName) def Clone() as ReturnType: return ReturnType(FullyQualifiedName, arrayDimensions, pointerNestingLevel) override def ToString(): return "[${GetType().Name} Name=${FullyQualifiedName}]" ///////////////////////////////////// /// Namespace Return Type /// ///////////////////////////////////// class NamespaceReturnType(AbstractReturnType): def constructor(fullyQualifiedName as string): self.FullyQualifiedName = fullyQualifiedName self.arrayDimensions = array(int, 0) self.pointerNestingLevel = 0 override def ToString(): return "[${GetType().Name} Name=${FullyQualifiedName}]" ///////////////////////////////////// /// Inferred Return Type /// ///////////////////////////////////// class InferredReturnType(AbstractReturnType): _expression as AST.Expression _filename as string _caretLine as int _caretColumn as int def constructor(expression as AST.Expression, info as AST.LexicalInfo): _expression = expression if info == null or expression == null: _resolved = true // don't resolve but return error else: _filename = info.FileName _caretLine = info.Line _caretColumn = info.Column _baseType as IReturnType _resolved as bool = false override FullyQualifiedName as string: get: r = self.BaseType if r == null: return "" else: return r.FullyQualifiedName set: raise NotSupportedException() override PointerNestingLevel as int: get: r = self.BaseType if r == null: return 0 else: return r.PointerNestingLevel override ArrayDimensions as (int): get: r = self.BaseType if r == null: return array(int, 0) else: return r.ArrayDimensions BaseType as IReturnType: get: if not _resolved: _resolved = true _baseType = Resolve() return _baseType def Resolve() as IReturnType: resolver = Resolver() parserService = ICSharpCode.Core.Services.ServiceManager.Services.GetService(typeof(IParserService)) if resolver.Initialize(parserService, _caretLine, _caretColumn, _filename): visitor = ExpressionTypeVisitor(Resolver : resolver) visitor.Visit(_expression) return visitor.ReturnType else: return null nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeCompletion/Tree.boo000066400000000000000000000067511161462365500304610ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding.CodeCompletion import System import System.Collections import System.Diagnostics import SharpDevelop.Internal.Parser import Boo.Lang.Compiler.Ast as AST ///////////////////////////////////// /// Compilation Unit /// ///////////////////////////////////// class CompilationUnit(AbstractCompilationUnit): override MiscComments as CommentCollection: get: return null override DokuComments as CommentCollection: get: return null ///////////////////////////////////// /// Class /// ///////////////////////////////////// class Class(AbstractClass): _cu as ICompilationUnit override CompilationUnit as ICompilationUnit: get: return _cu def constructor(cu as CompilationUnit, t as ClassType, m as ModifierEnum, region as IRegion): _cu = cu classType = t self.region = region modifiers = m def UpdateModifier(): if classType == ClassType.Enum: for f as Field in Fields: f.AddModifier(ModifierEnum.Public) return for f as Field in Fields: if f.Modifiers == ModifierEnum.None: f.AddModifier(ModifierEnum.Protected) if classType != ClassType.Interface: return for c as Class in InnerClasses: c.modifiers = c.modifiers | ModifierEnum.Public for m as IMethod in Methods: if m isa BooAbstractMethod: cast(BooAbstractMethod, m).AddModifier(ModifierEnum.Public) else: Debug.Assert(false, 'Unexpected type in method of interface. Can not set modifier to public!') for e as Event in Events: e.AddModifier(ModifierEnum.Public) for f as Field in Fields: f.AddModifier(ModifierEnum.Public) for i as Indexer in Indexer: i.AddModifier(ModifierEnum.Public) for p as Property in Properties: p.AddModifier(ModifierEnum.Public) ///////////////////////////////////// /// Parameter /// ///////////////////////////////////// class Parameter(AbstractParameter): def constructor(name as string, rtype as ReturnType): Name = name returnType = rtype ///////////////////////////////////// /// Attributes /// ///////////////////////////////////// class AttributeSection(AbstractAttributeSection): def constructor(attributeTarget as AttributeTarget, attributes as AttributeCollection): self.attributeTarget = attributeTarget self.Attributes = attributes class ASTAttribute(AbstractAttribute): def constructor(name as string, positionalArguments as ArrayList, namedArguments as SortedList): self.name = name self.positionalArguments = positionalArguments self.namedArguments = namedArguments nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeCompletion/TypeMembers.boo000066400000000000000000000102361161462365500320070ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding.CodeCompletion import System import SharpDevelop.Internal.Parser import Boo.Lang.Compiler.Ast as AST ///////////////////////////////////// /// Constructor /// ///////////////////////////////////// class Constructor(BooAbstractMethod): def constructor(m as ModifierEnum, region as IRegion, bodyRegion as IRegion): FullyQualifiedName = '#ctor' self.region = region self.bodyRegion = bodyRegion modifiers = m ///////////////////////////////////// /// Destructor /// ///////////////////////////////////// class Destructor(BooAbstractMethod): def constructor(className as string, m as ModifierEnum, region as IRegion, bodyRegion as IRegion): FullyQualifiedName = '~' + className self.region = region self.bodyRegion = bodyRegion modifiers = m class BooAbstractMethod(AbstractMethod): [Property(Node)] _node as AST.Method def AddModifier(m as ModifierEnum): modifiers = modifiers | m ///////////////////////////////////// /// Event /// ///////////////////////////////////// class Event(AbstractEvent): def AddModifier(m as ModifierEnum): modifiers = modifiers | m def constructor(name as string, rtype as IReturnType, m as ModifierEnum, region as IRegion, bodyRegion as IRegion): FullyQualifiedName = name returnType = rtype self.region = region self.bodyRegion = bodyRegion modifiers = m ///////////////////////////////////// /// Field /// ///////////////////////////////////// class Field(AbstractField): def AddModifier(m as ModifierEnum): modifiers = modifiers | m def constructor(rtype as IReturnType, fullyQualifiedName as string, m as ModifierEnum, region as IRegion): self.returnType = rtype self.FullyQualifiedName = fullyQualifiedName self.region = region modifiers = m def SetModifiers(m as ModifierEnum): modifiers = m ///////////////////////////////////// /// Indexer /// ///////////////////////////////////// class Indexer(AbstractIndexer): def AddModifier(m as ModifierEnum): modifiers = modifiers | m def constructor(rtype as IReturnType, parameters as ParameterCollection, m as ModifierEnum, region as IRegion, bodyRegion as IRegion): returnType = rtype self.Parameters = parameters self.region = region self.bodyRegion = bodyRegion modifiers = m ///////////////////////////////////// /// Method /// ///////////////////////////////////// class Method(BooAbstractMethod): def constructor(name as string, rtype as IReturnType, m as ModifierEnum, region as IRegion, bodyRegion as IRegion): FullyQualifiedName = name self.returnType = rtype self.region = region self.bodyRegion = bodyRegion modifiers = m ///////////////////////////////////// /// Property /// ///////////////////////////////////// class Property(AbstractProperty): [Property(Node)] _node as AST.Property def AddModifier(m as ModifierEnum): modifiers = modifiers | m def constructor(fullyQualifiedName as string, rtype as IReturnType, m as ModifierEnum, region as IRegion, bodyRegion as IRegion): self.FullyQualifiedName = fullyQualifiedName self.returnType = rtype self.region = region self.bodyRegion = bodyRegion modifiers = m VariableLookupVisitor.boo000066400000000000000000000064531161462365500340010ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeCompletion#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding.CodeCompletion import System import System.Collections import SharpDevelop.Internal.Parser import Boo.Lang.Compiler.Ast class VariableLookupVisitor(DepthFirstVisitor): [Property(Resolver)] _resolver as Resolver [Property(LookFor)] _lookFor as string [Getter(ReturnType)] _returnType as IReturnType private def Finish(expr as Expression): return if expr == null return if _returnType != null visitor = ExpressionTypeVisitor(Resolver: _resolver) visitor.Visit(expr) _returnType = visitor.ReturnType private def Finish(reference as TypeReference): return if _returnType != null return if reference == null _returnType = BooBinding.CodeCompletion.ReturnType(reference) override def OnDeclaration(node as Declaration): return if node.Name != _lookFor Finish(node.Type) override def OnDeclarationStatement(node as DeclarationStatement): return if node.Declaration.Name != _lookFor Visit(node.Declaration) Finish(node.Initializer) override def OnBinaryExpression(node as BinaryExpression): if node.Operator == BinaryOperatorType.Assign and node.Left isa ReferenceExpression: reference as ReferenceExpression = node.Left if reference.Name == _lookFor: Finish(node.Right) unless reference isa MemberReferenceExpression super(node) class VariableListLookupVisitor(DepthFirstVisitor): [Property(Resolver)] _resolver as Resolver [Getter(Results)] _results as Hashtable = {} private def Add(name as string, expr as Expression): return if name == null or expr == null return if _results.ContainsKey(name) visitor = ExpressionTypeVisitor(Resolver: _resolver) visitor.Visit(expr) _results.Add(name, visitor.ReturnType) private def Add(name as string, reference as TypeReference): return if reference == null or name == null return if _results.ContainsKey(name) _results.Add(name, BooBinding.CodeCompletion.ReturnType(reference)) override def OnDeclaration(node as Declaration): Add(node.Name, node.Type) override def OnDeclarationStatement(node as DeclarationStatement): Visit(node.Declaration) Add(node.Declaration.Name, node.Initializer) override def OnBinaryExpression(node as BinaryExpression): if node.Operator == BinaryOperatorType.Assign and node.Left isa ReferenceExpression: reference as ReferenceExpression = node.Left Add(reference.Name, node.Right) unless reference isa MemberReferenceExpression super(node) nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeCompletion/Visitor.boo000066400000000000000000000223541161462365500312160ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding.CodeCompletion import System import System.Collections import ICSharpCode.Core.Services import SharpDevelop.Internal.Parser import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast as AST import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Steps class Using(AbstractUsing): pass class Visitor(AbstractVisitorCompilerStep): [Getter(Cu)] _cu as CompilationUnit = CompilationUnit() _currentClass as Stack = Stack() _firstModule = true override def Run(): print "RUN" try: Visit(CompileUnit) except e: print e.ToString() //msg as IMessageService = ServiceManager.Services.GetService(typeof(IMessageService)) //msg.ShowError(e) private def GetModifier(m as AST.TypeMember) as ModifierEnum: r = ModifierEnum.None r = r | ModifierEnum.Public if m.IsPublic r = r | ModifierEnum.Protected if m.IsProtected r = r | ModifierEnum.Private if m.IsPrivate r = r | ModifierEnum.Internal if m.IsInternal r = r | ModifierEnum.Static if m.IsStatic r = r | ModifierEnum.Virtual if m.IsModifierSet(AST.TypeMemberModifiers.Virtual) r = r | ModifierEnum.Abstract if m.IsModifierSet(AST.TypeMemberModifiers.Abstract) r = r | ModifierEnum.Override if m.IsModifierSet(AST.TypeMemberModifiers.Override) r = r | ModifierEnum.Final if m.IsFinal return r [Property(LineLength)] _lineLength as (int) private def GetLineEnd(line as int) as int: return 0 if _lineLength == null or line < 1 or line > _lineLength.Length return _lineLength[line - 1] + 1 private def GetRegion(m as AST.Node): l = m.LexicalInfo return null if (l.Line < 0) return DefaultRegion(l.Line, 0 /*l.Column*/, l.Line, GetLineEnd(l.Line)) private def GetClientRegion(m as AST.Node) as DefaultRegion: l = m.LexicalInfo return null if l.Line < 0 l2 as AST.SourceLocation = null if m isa AST.Method: l2 = cast(AST.Method, m).Body.EndSourceLocation elif m isa AST.Property: p as AST.Property = m if p.Getter != null and p.Getter.Body != null: l2 = cast(AST.Property, m).Getter.Body.EndSourceLocation if p.Setter != null and p.Setter.Body != null: l3 = cast(AST.Property, m).Setter.Body.EndSourceLocation l2 = l3 if l3.Line > l2.Line elif p.Setter != null and p.Setter.Body != null: l2 = cast(AST.Property, m).Setter.Body.EndSourceLocation else: l2 = m.EndSourceLocation return null if l2 == null or l2.Line < 0 or l.Line == l2.Line // TODO: use l.Column / l2.Column when the tab-bug has been fixed return DefaultRegion(l.Line, GetLineEnd(l.Line), l2.Line, GetLineEnd(l2.Line)) override def OnImport(p as AST.Import): u = Using() if p.Alias == null: u.Usings.Add(p.Namespace) else: u.Aliases[p.Alias.Name] = p.Namespace _cu.Usings.Add(u) override def OnCallableDefinition(node as AST.CallableDefinition): print "OnCallableDefinition: ${node.FullName}" region = GetRegion(node) modifier = GetModifier(node) c = Class(_cu, ClassType.Delegate, modifier, region) c.BaseTypes.Add('System.Delegate') c.FullyQualifiedName = node.FullName if _currentClass.Count > 0: cast(Class, _currentClass.Peek()).InnerClasses.Add(c) else: _cu.Classes.Add(c) invokeMethod = Method('Invoke', ReturnType(node.ReturnType), modifier, region, region) invokeMethod.Parameters = GetParameters(node.Parameters) c.Methods.Add(invokeMethod) override def EnterClassDefinition(node as AST.ClassDefinition): EnterTypeDefinition(node, ClassType.Class) return super(node) override def EnterInterfaceDefinition(node as AST.InterfaceDefinition): EnterTypeDefinition(node, ClassType.Interface) return super(node) override def EnterEnumDefinition(node as AST.EnumDefinition): EnterTypeDefinition(node, ClassType.Enum) return super(node) override def EnterModule(node as AST.Module): EnterTypeDefinition(node, ClassType.Class) unless _firstModule _firstModule = false return super(node) private def EnterTypeDefinition(node as AST.TypeDefinition, classType as ClassType): try: print "Enter ${node.GetType().Name} (${node.FullName})" region = GetClientRegion(node) modifier = GetModifier(node) c = Class(_cu, classType, modifier, region) c.FullyQualifiedName = node.FullName c.Documentation = node.Documentation if _currentClass.Count > 0: cast(Class, _currentClass.Peek()).InnerClasses.Add(c) else: _cu.Classes.Add(c) if node.BaseTypes != null: for r as AST.SimpleTypeReference in node.BaseTypes: c.BaseTypes.Add(r.Name) _currentClass.Push(c) except ex: print ex.ToString() raise override def LeaveClassDefinition(node as AST.ClassDefinition): LeaveTypeDefinition(node) super(node) override def LeaveInterfaceDefinition(node as AST.InterfaceDefinition): LeaveTypeDefinition(node) super(node) override def LeaveEnumDefinition(node as AST.EnumDefinition): LeaveTypeDefinition(node) super(node) override def LeaveModule(node as AST.Module): LeaveTypeDefinition(node) unless _currentClass.Count == 0 super(node) private def LeaveTypeDefinition(node as AST.TypeDefinition): c as Class = _currentClass.Pop() print "Leave ${node.GetType().Name} ${node.FullName} (Class = ${c.FullyQualifiedName})" c.UpdateModifier() override def OnMethod(node as AST.Method): try: print "Method: ${node.FullName}" method = Method(node.Name, ReturnType.CreateReturnType(node), GetModifier(node), GetRegion(node), GetClientRegion(node)) method.Parameters = GetParameters(node.Parameters) method.Node = node method.Documentation = node.Documentation cast(Class, _currentClass.Peek()).Methods.Add(method) except ex: print ex.ToString() raise private def GetParameters(params as AST.ParameterDeclarationCollection): parameters = ParameterCollection() return parameters if params == null for par as AST.ParameterDeclaration in params: parameters.Add(Parameter(par.Name, ReturnType(par.Type))) return parameters override def OnConstructor(node as AST.Constructor): return if node.Body.Statements.Count == 0 ctor = Constructor(GetModifier(node), GetRegion(node), GetClientRegion(node)) ctor.Parameters = GetParameters(node.Parameters) ctor.Node = node ctor.Documentation = node.Documentation cast(Class, _currentClass.Peek()).Methods.Add(ctor) override def OnEnumMember(node as AST.EnumMember): try: c as Class = _currentClass.Peek() field = Field(ReturnType(c), node.Name, GetModifier(node), GetRegion(node)) field.Documentation = node.Documentation field.SetModifiers(ModifierEnum.Const | ModifierEnum.SpecialName) c.Fields.Add(field) except x: print x raise override def OnField(node as AST.Field): try: print "Field ${node.Name}" c as Class = _currentClass.Peek() field = Field(ReturnType.CreateReturnType(node), node.Name, GetModifier(node), GetRegion(node)) field.Documentation = node.Documentation c.Fields.Add(field) except ex: print ex.ToString() raise override def OnEvent(node as AST.Event): try: print "event ${node.Name}" c as Class = _currentClass.Peek() region = GetRegion(node) e = Event(node.Name, ReturnType.CreateReturnType(node), GetModifier(node), region, region) e.Documentation = node.Documentation c.Events.Add(e) except ex: print ex.ToString() raise override def OnProperty(node as AST.Property): try: print "Property ${node.Name}" property = Property(node.Name, ReturnType.CreateReturnType(node), GetModifier(node), GetRegion(node), GetClientRegion(node)) property.Documentation = node.Documentation property.Node = node cast(Class, _currentClass.Peek()).Properties.Add(property) except ex: print ex.ToString() raise /* // TODO: Detect indexer method and add it as Indexer override def Visit(indexerDeclaration as AST.IndexerDeclaration, data as object) as object: region as DefaultRegion = GetRegion(indexerDeclaration.StartLocation, indexerDeclaration.EndLocation) bodyRegion as DefaultRegion = GetRegion(indexerDeclaration.BodyStart, indexerDeclaration.BodyEnd) parameters as ParameterCollection = ParameterCollection() i as Indexer = Indexer(ReturnType(indexerDeclaration.TypeReference), parameters, indexerDeclaration.Modifier, region, bodyRegion) if indexerDeclaration.Parameters != null: for par as AST.ParameterDeclarationExpression in indexerDeclaration.Parameters: parType as ReturnType = ReturnType(par.TypeReference) p as Parameter = Parameter(par.ParameterName, parType) parameters.Add(p) c as Class = _currentClass.Peek() c.Indexer.Add(i) return null */ nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CodeDomVisitor.boo000066400000000000000000000341761161462365500275520ustar00rootroot00000000000000namespace BooBinding import System import System.Collections import System.CodeDom import System.Text import Boo.Lang.Compiler import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.Ast.Visitors import Boo.Lang.Parser import SharpDevelop.Internal.Parser class CodeDomVisitor(IAstVisitor): """The CodeDomVisitor is able to convert from the Boo AST to System.CodeDom It makes use of the SharpDevelop parser service to get necessary additional information about the types.""" [Getter(OutputCompileUnit)] _compileUnit = CodeCompileUnit() _namespace as CodeNamespace _class as CodeTypeDeclaration _statements as CodeStatementCollection _expression as CodeExpression def ConvModifiers(member as TypeMember) as MemberAttributes: if member isa Field: return ConvModifiers(member.Modifiers, MemberAttributes.Family) else: return ConvModifiers(member.Modifiers, MemberAttributes.Public) def ConvModifiers(modifier as TypeMemberModifiers, defaultAttr as MemberAttributes) as MemberAttributes: // Boo is not able to convert 0 to MemberAttributes, therefore we need to use // a trick to get the default value noAttr = MemberAttributes.Abstract & MemberAttributes.Final attr = noAttr if (modifier & TypeMemberModifiers.Abstract) == TypeMemberModifiers.Abstract: attr = attr | MemberAttributes.Abstract if (modifier & TypeMemberModifiers.Final) == TypeMemberModifiers.Final: attr = attr | MemberAttributes.Const if (modifier & TypeMemberModifiers.Internal) == TypeMemberModifiers.Internal: attr = attr | MemberAttributes.Assembly if (modifier & TypeMemberModifiers.Override) == TypeMemberModifiers.Override: attr = attr | MemberAttributes.Override if (modifier & TypeMemberModifiers.Private) == TypeMemberModifiers.Private: attr = attr | MemberAttributes.Private if (modifier & TypeMemberModifiers.Protected) == TypeMemberModifiers.Protected: attr = attr | MemberAttributes.Family if (modifier & TypeMemberModifiers.Public) == TypeMemberModifiers.Public: attr = attr | MemberAttributes.Public if (modifier & TypeMemberModifiers.Static) == TypeMemberModifiers.Static: attr = attr | MemberAttributes.Static if (modifier & TypeMemberModifiers.Virtual) != TypeMemberModifiers.Virtual: attr = attr | MemberAttributes.Final if attr == noAttr: return defaultAttr else: return attr def ConvTypeRef(tr as TypeReference): return null if tr == null name = tr.ToString() expandedName = BooAmbience.ReverseTypeConversionTable[name] name = expandedName if expandedName != null return CodeTypeReference(name) def OnCompileUnit(node as CompileUnit): for m as Module in node.Modules: m.Accept(self) def OnModule(node as Module): if node.Namespace == null: _namespace = CodeNamespace("Global") _compileUnit.Namespaces.Add(_namespace) else: node.Namespace.Accept(self) for i as Import in node.Imports: i.Accept(self) for m as TypeMember in node.Members: m.Accept(self) def OnNamespaceDeclaration(node as NamespaceDeclaration): _namespace = CodeNamespace(node.Name) _compileUnit.Namespaces.Add(_namespace) def OnImport(node as Import): _namespace.Imports.Add(CodeNamespaceImport(node.Namespace)) def OnClassDefinition(node as ClassDefinition): oldClass = _class _class = CodeTypeDeclaration(node.Name) _class.IsClass = true for b as TypeReference in node.BaseTypes: _class.BaseTypes.Add(ConvTypeRef(b)) for member as TypeMember in node.Members: member.Accept(self) if oldClass == null: _namespace.Types.Add(_class) else: oldClass.Members.Add(_class) _class = oldClass def OnStructDefinition(node as StructDefinition): oldClass = _class _class = CodeTypeDeclaration(node.Name) _class.IsStruct = true for b as TypeReference in node.BaseTypes: _class.BaseTypes.Add(ConvTypeRef(b)) for member as TypeMember in node.Members: member.Accept(self) if oldClass == null: _namespace.Types.Add(_class) else: oldClass.Members.Add(_class) _class = oldClass def OnInterfaceDefinition(node as InterfaceDefinition): oldClass = _class _class = CodeTypeDeclaration(node.Name) _class.IsInterface = true for b as TypeReference in node.BaseTypes: _class.BaseTypes.Add(ConvTypeRef(b)) for member as TypeMember in node.Members: member.Accept(self) if oldClass == null: _namespace.Types.Add(_class) else: oldClass.Members.Add(_class) _class = oldClass def OnField(node as Field): field = CodeMemberField(ConvTypeRef(node.Type), node.Name) field.Attributes = ConvModifiers(node) if node.Initializer != null: _expression = null //Visit(node.Initializer) field.InitExpression = _expression _class.Members.Add(field) def OnConstructor(node as Constructor): ConvertMethod(node, CodeConstructor()) def OnMethod(node as Method): ConvertMethod(node, CodeMemberMethod(Name: node.Name)) def OnDestructor(node as Destructor): ConvertMethod(node, CodeMemberMethod(Name: "Finalize")) def ConvertMethod(node as Method, method as CodeMemberMethod): method.Attributes = ConvModifiers(node) method.ReturnType = ConvTypeRef(node.ReturnType) if node.Parameters != null: for p as ParameterDeclaration in node.Parameters: method.Parameters.Add(CodeParameterDeclarationExpression(ConvTypeRef(p.Type), p.Name)) _statements = method.Statements if node.Body != null: node.Body.Accept(self) _class.Members.Add(method) def OnArrayLiteralExpression(node as ArrayLiteralExpression): pass def OnArrayTypeReference(node as ArrayTypeReference): pass def OnTryCastExpression(node as TryCastExpression): pass def OnAttribute(node as Boo.Lang.Compiler.Ast.Attribute): pass def OnBinaryExpression(node as BinaryExpression): op = node.Operator if op == BinaryOperatorType.Assign: _expression = null node.Left.Accept(self) left = _expression _expression = null node.Right.Accept(self) if left != null and _expression != null: _statements.Add(CodeAssignStatement(left, _expression)) _expression = null return def OnBlock(node as Block): for n as Statement in node.Statements: n.Accept(self) def OnBreakStatement(node as BreakStatement): pass def OnCallableBlockExpression(node as CallableBlockExpression): pass def OnCallableDefinition(node as CallableDefinition): pass def OnCallableTypeReference(node as CallableTypeReference): pass def OnCastExpression(node as CastExpression): pass def OnContinueStatement(node as ContinueStatement): pass def OnDeclaration(node as Declaration): pass def OnDeclarationStatement(node as DeclarationStatement): pass def OnEnumDefinition(node as EnumDefinition): pass def OnEnumMember(node as EnumMember): pass def OnParameterDeclaration(node as ParameterDeclaration): pass def OnEvent(node as Event): pass def OnExceptionHandler(node as ExceptionHandler): pass def OnExpressionInterpolationExpression(node as ExpressionInterpolationExpression): pass def OnExpressionPair(node as ExpressionPair): pass def OnExpressionStatement(node as ExpressionStatement): _expression = null node.Expression.Accept(self) if _expression != null: _statements.Add(CodeExpressionStatement(_expression)) def OnForStatement(node as ForStatement): pass def OnExtendedGeneratorExpression(node as ExtendedGeneratorExpression): pass def OnGeneratorExpression(node as GeneratorExpression): pass def OnGivenStatement(node as GivenStatement): pass def OnGotoStatement(node as GotoStatement): _statements.Add(CodeGotoStatement(node.Label.Name)) def OnNullLiteralExpression(node as NullLiteralExpression): _expression = CodePrimitiveExpression(null) def OnBoolLiteralExpression(node as BoolLiteralExpression): _expression = CodePrimitiveExpression(node.Value) def OnStringLiteralExpression(node as StringLiteralExpression): _expression = CodePrimitiveExpression(node.Value) def OnCharLiteralExpression(node as CharLiteralExpression): _expression = CodePrimitiveExpression(node.Value) def OnHashLiteralExpression(node as HashLiteralExpression): pass def OnIntegerLiteralExpression(node as IntegerLiteralExpression): _expression = CodePrimitiveExpression(node.Value) def OnDoubleLiteralExpression(node as DoubleLiteralExpression): _expression = CodePrimitiveExpression(node.Value) def OnListLiteralExpression(node as ListLiteralExpression): pass def OnIfStatement(node as IfStatement): pass def OnLabelStatement(node as LabelStatement): pass def OnLocal(node as Local): pass def OnMacroStatement(node as MacroStatement): pass def OnMemberReferenceExpression(node as MemberReferenceExpression): _expression = null node.Target.Accept(self) if _expression != null: if _expression isa CodeTypeReferenceExpression: // TODO: lookup if expression is static member or subtype _expression = CodeTypeReferenceExpression("${cast(CodeTypeReferenceExpression, _expression).Type.BaseType}.${node.Name}") else: _expression = CreateMemberExpression(_expression, node.Name) def OnReferenceExpression(node as ReferenceExpression): p = GetParserService() if p.GetClass(node.Name) != null: _expression = CodeTypeReferenceExpression(node.Name) elif p.NamespaceExists(node.Name): _expression = CodeTypeReferenceExpression(node.Name) else: _expression = CreateMemberExpression(CodeThisReferenceExpression(), node.Name) def CreateMemberExpression(expr as CodeExpression, name as string): if expr isa CodeTypeReferenceExpression: typeRef = cast(CodeTypeReferenceExpression, _expression).Type.BaseType return CreateMemberExpression(expr, typeRef, name, true) elif expr isa CodeThisReferenceExpression: typeRef = "${_namespace.Name}.${_class.Name}" return CreateMemberExpression(expr, typeRef, name, false) return CodeFieldReferenceExpression(expr, name) def CreateMemberExpression(target as CodeExpression, parentName as string, name as string, isStatic as bool): combinedName = "${parentName}.${name}" p = GetParserService() parentClass = p.GetClass(parentName) if parentClass == null: if p.GetClass(combinedName) != null: return CodeTypeReferenceExpression(combinedName) elif p.NamespaceExists(combinedName): return CodeTypeReferenceExpression(combinedName) else: if isStatic: for innerClass as IClass in parentClass.InnerClasses: if innerClass.Name == name: return CodeTypeReferenceExpression(combinedName) for c as IClass in parentClass.ClassInheritanceTree: for ev as IEvent in c.Events: if ev.IsStatic == isStatic: return CodeEventReferenceExpression(target, name) for me as IMethod in c.Methods: if me.IsStatic == isStatic: return CodeMethodReferenceExpression(target, name) for prop as IProperty in c.Properties: if prop.IsStatic == isStatic: return CodePropertyReferenceExpression(target, name) for field as IField in c.Fields: if field.IsStatic == isStatic: return CodeFieldReferenceExpression(target, name) return CodeFieldReferenceExpression(target, name) def GetParserService() as ICSharpCode.SharpDevelop.Services.IParserService: return ICSharpCode.Core.Services.ServiceManager.Services.GetService(typeof(ICSharpCode.SharpDevelop.Services.IParserService)) def OnAstLiteralExpression(node as AstLiteralExpression): _expression = CodeObjectCreateExpression(node.Node.GetType()) def OnMethodInvocationExpression(node as MethodInvocationExpression): _expression = null node.Target.Accept(self) if _expression != null: if _expression isa CodeTypeReferenceExpression: coce = CodeObjectCreateExpression(cast(CodeTypeReferenceExpression, _expression).Type) ConvertExpressions(coce.Parameters, node.Arguments) _expression = coce elif _expression isa CodeMethodReferenceExpression: cmie = CodeMethodInvokeExpression(_expression) ConvertExpressions(cmie.Parameters, node.Arguments) _expression = cmie elif _expression isa CodeFieldReferenceExpression: // when a type is unknown, a MemberReferenceExpression is translated into a CodeFieldReferenceExpression cfre as CodeFieldReferenceExpression = _expression cmie = CodeMethodInvokeExpression(cfre.TargetObject, cfre.FieldName) ConvertExpressions(cmie.Parameters, node.Arguments) _expression = cmie else: _expression = null def ConvertExpressions(args as CodeExpressionCollection, expressions as ExpressionCollection): """Converts a list of expressions to CodeDom expressions.""" for e in expressions: _expression = null e.Accept(self) args.Add(_expression) def OnOmittedExpression(node as OmittedExpression): pass def OnProperty(node as Property): pass def OnRaiseStatement(node as RaiseStatement): pass def OnRELiteralExpression(node as RELiteralExpression): pass def OnRetryStatement(node as RetryStatement): pass def OnReturnStatement(node as ReturnStatement): _expression = null if node.Expression != null: node.Expression.Accept(self) _statements.Add(CodeMethodReturnStatement(_expression)) def OnSelfLiteralExpression(node as SelfLiteralExpression): _expression = CodeThisReferenceExpression() def OnSimpleTypeReference(node as SimpleTypeReference): pass def OnSlice(node as Slice): pass def OnSlicingExpression(node as SlicingExpression): pass def OnStatementModifier(node as StatementModifier): pass def OnSuperLiteralExpression(node as SuperLiteralExpression): _expression = CodeBaseReferenceExpression() def OnConditionalExpression(node as ConditionalExpression): pass def OnTimeSpanLiteralExpression(node as TimeSpanLiteralExpression): pass def OnTryStatement(node as TryStatement): pass def OnTypeofExpression(node as TypeofExpression): pass def OnUnaryExpression(node as UnaryExpression): pass def OnUnlessStatement(node as UnlessStatement): pass def OnUnpackStatement(node as UnpackStatement): pass def OnWhenClause(node as WhenClause): pass def OnWhileStatement(node as WhileStatement): pass def OnExplicitMemberInfo(node as ExplicitMemberInfo): pass def OnYieldStatement(node as YieldStatement): pass def OnGenericReferenceExpression(node as GenericReferenceExpression): pass def OnGenericTypeReference(node as GenericTypeReference): pass nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CompilerManager.boo000066400000000000000000000156201161462365500277160ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.Collections import System.IO import System.Diagnostics import System.Text import System.Reflection import System.CodeDom.Compiler import Boo.Lang.Compiler.CompilerWarning as BooWarning import Boo.Lang.Compiler.CompilerError as BooError import ICSharpCode.Core.Services import ICSharpCode.SharpDevelop.Internal.Project import ICSharpCode.SharpDevelop.Gui import ICSharpCode.SharpDevelop.Services class BooBindingCompilerManager: _booLibNotFound = "Boo.Dll was not found in the boo addin directory" def GetCompiledOutputName(fileName as string) as string: return Path.ChangeExtension(fileName, ".exe") def GetCompiledOutputName(p as BooProject) as string: compilerparameters as BooCompilerParameters = p.ActiveConfiguration fileUtilityService as FileUtilityService = ServiceManager.Services.GetService(typeof(FileUtilityService)) exe = fileUtilityService.GetDirectoryNameWithSeparator(compilerparameters.OutputDirectory) + compilerparameters.OutputAssembly if (compilerparameters.CompileTarget == CompileTarget.Library): return exe + ".dll" else: return exe + ".exe" def CanCompile(fileName as string) as bool: return Path.GetExtension(fileName).ToLower() == ".boo" private def MakeError(text as string): cr = CompilerResults(TempFileCollection()) cr.Errors.Add(CompilerError(ErrorText: text)) return DefaultCompilerResult(cr, text) private def Compile(compilerparameters as BooCompilerParameters, fileNames as (string), outputFile as string, p as IProject) as ICompilerResult: messageService as IMessageService = ServiceManager.Services.GetService(typeof(IMessageService)) cr as CompilerResults = null booDir = Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location) booLib = Path.Combine(booDir, "Boo.Lang.dll") if not File.Exists(booLib): messageService.ShowError(_booLibNotFound) return MakeError(_booLibNotFound) outputDirectory = Path.GetDirectoryName(outputFile) try: File.Copy(booLib, Path.Combine(outputDirectory, Path.GetFileName(booLib)), true) except: pass if compilerparameters.CompileTarget == CompileTarget.WinExe: WriteManifestFile(outputFile) compiler as BooCompilerWrapper = BooCompilerWrapper() compiler.SetOptions(compilerparameters.CurrentCompilerOptions) compiler.OutputFile = outputFile for fileName as string in fileNames: compiler.AddInputFile(Path.GetFullPath(fileName)) if p != null: // write references for lib as ProjectReference in p.ProjectReferences: compiler.AddReference(lib.GetReferencedFileName(p)) // write embedded resources for finfo as ProjectFile in p.ProjectFiles: if finfo.Subtype != Subtype.Directory and finfo.BuildAction == BuildAction.EmbedAsResource: compiler.AddResource(finfo.Name) p.CopyReferencesToOutputPath(true) result = compiler.Run() cr = CompilerResults(TempFileCollection()) compilerOutput = StringBuilder() for line in StringReader(result): print line compilerOutput.Append(line) compilerOutput.Append(Environment.NewLine) error = CompilerError() match = /^(.+)\((\d+),(\d+)\):\s([\w\d]+):\s(.+)$/.Match(line) if match.Success: groups = match.Groups filename = groups[1].Value lineNumber = int.Parse(groups[2].Value) column = int.Parse(groups[3].Value) code = groups[4].Value message = groups[5].Value error.ErrorNumber = code error.ErrorText = message error.IsWarning = code.StartsWith("BCW") if lineNumber >= 0: error.Column = column error.Line = lineNumber error.FileName = filename else: match = /^([\w\d]+):\s(.+)$/.Match(line) if match.Success: error.ErrorNumber = match.Groups[1].Value error.ErrorText = match.Groups[2].Value else: match = /^(.+):\s(.+)$/.Match(line) if match.Success: error.ErrorText = line else: continue cr.Errors.Add(error) compiler = null return DefaultCompilerResult(cr, compilerOutput.ToString()) static def MyResolveEventHandler(sender, e as ResolveEventArgs) as Assembly: Console.WriteLine("Resolving ${e.Name}") return null def CompileFile(fileName as string, compilerparameters as BooCompilerParameters) as ICompilerResult: compilerparameters.OutputDirectory = Path.GetDirectoryName(fileName) compilerparameters.OutputAssembly = Path.GetFileNameWithoutExtension(fileName) return Compile(compilerparameters, (fileName,), GetCompiledOutputName(fileName), null) def CompileProject(project as BooProject) as ICompilerResult: compilerparameters as BooCompilerParameters = project.ActiveConfiguration fileNames = ArrayList() for finfo as ProjectFile in project.ProjectFiles: if finfo.Subtype != Subtype.Directory: if finfo.BuildAction == BuildAction.Compile: fileNames.Add(finfo.Name) exe = GetCompiledOutputName(project) if compilerparameters.CompileTarget == CompileTarget.WinExe: WriteManifestFile(exe) return Compile(compilerparameters, fileNames.ToArray(typeof(string)), exe, project) def WriteManifestFile(fileName as string): manifestFile = fileName + ".manifest" return if File.Exists(manifestFile) using sw = StreamWriter(manifestFile): sw.WriteLine("") sw.WriteLine("") sw.WriteLine("") sw.WriteLine(" ") sw.WriteLine(" ") sw.WriteLine(" ") sw.WriteLine(" ") sw.WriteLine(" ") sw.WriteLine("") nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/CompilerParameters.boo000066400000000000000000000102071161462365500304430ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.IO import System.Text import System.Xml import System.Diagnostics import System.ComponentModel import ICSharpCode.SharpDevelop.Gui.Components import ICSharpCode.SharpDevelop.Internal.Project import ICSharpCode.Core.Services enum CompileTarget: WinExe Exe Library enum NetRuntime: Mono MonoInterpreter MsNet class BooCompilerParameters(AbstractProjectConfiguration): _compilerOptions = CompilerOptions() [Browsable(false)] CurrentCompilerOptions: get: return _compilerOptions [LocalizedProperty("Output path", Description : "The path where the assembly is created.")] OutputPath as string: get: return OutputDirectory set: OutputDirectory = value [LocalizedProperty("Output assembly", Description : "The assembly name.")] AssemblyName as string: get: return OutputAssembly set: OutputAssembly = value [LocalizedProperty("Parameters", Description : "Command line parameters passed to the executed application.")] CommandLineParameters as string: get: return _compilerOptions.CommandLineParameters set: _compilerOptions.CommandLineParameters = value [DefaultValue(BooBinding.CompileTarget.Exe)] [LocalizedProperty("Compile Target", Description : "The compilation target of the source code.")] CompileTarget as BooBinding.CompileTarget: get: return _compilerOptions.CompileTarget set: _compilerOptions.CompileTarget = value [DefaultValue(false)] [LocalizedProperty("Include debug information", Description : "Specifies if debug information should be omited.")] IncludeDebugInformation as bool: get: return _compilerOptions.IncludeDebugInformation set: _compilerOptions.IncludeDebugInformation = value [DefaultValue(true)] [LocalizedProperty("Pause console", Description : "Specifies if after the executing the program in the console the window should wait for any key before closing.")] PauseConsoleOutput as bool: get: return _compilerOptions.PauseConsoleOutput set: _compilerOptions.PauseConsoleOutput = value [DefaultValue(NetRuntime.MsNet)] [LocalizedProperty("Runtime", Description : "Specifies the runtime for executing the program.")] Runtime as NetRuntime: get: return _compilerOptions.Runtime set: _compilerOptions.Runtime = value [DefaultValue(false)] [LocalizedProperty("Duck typing by default", Description : "A slower but more flexible python-like mode in which types that cannot be inferred are resolved at runtime (duck typed).")] DuckTypingByDefault as bool: get: return _compilerOptions.DuckTypingByDefault set: _compilerOptions.DuckTypingByDefault = value def constructor(name as string): self.name = name def constructor(): pass [XmlNodeName("CompilerOptions")] class CompilerOptions: [XmlAttribute("runtime")] public Runtime = NetRuntime.MsNet [XmlAttribute("compileTarget")] public CompileTarget = BooBinding.CompileTarget.Exe [XmlAttribute("includeDebugInformation")] public IncludeDebugInformation = false [XmlAttribute("commandLineParameters")] public CommandLineParameters = "" [XmlAttribute("pauseConsoleOutput")] public PauseConsoleOutput = true [XmlAttribute("duckTypingByDefault")] public DuckTypingByDefault = false nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/ConvertCommand.boo000066400000000000000000000056031161462365500275700ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.IO import System.Collections import System.ComponentModel import System.Windows.Forms import ICSharpCode.Core.Services import ICSharpCode.Core.AddIns import ICSharpCode.Core.Properties import ICSharpCode.Core.AddIns.Codons import System.CodeDom.Compiler import ICSharpCode.SharpDevelop.Gui import ICSharpCode.SharpDevelop.Internal.Project import ICSharpCode.SharpDevelop.Gui.Dialogs import ICSharpCode.SharpDevelop.Services import ICSharpCode.SharpDevelop.Commands import ICSharpCode.SharpRefactory.PrettyPrinter import ICSharpCode.SharpRefactory.Parser class ConvertBufferCommand(AbstractMenuCommand): override def Run(): window = WorkbenchSingleton.Workbench.ActiveWorkbenchWindow if window != null and window.ViewContent isa IEditable: viewContent as IEditable = window.ViewContent p = Parser() p.Parse(Lexer(ICSharpCode.SharpRefactory.Parser.StringReader(viewContent.Text))) if p.Errors.count > 0: Console.WriteLine(p.Errors.ErrorOutput) messageService as IMessageService = ServiceManager.Services.GetService(typeof(IMessageService)) messageService.ShowError("Correct source code errors first (only correct source code would convert).") return bv = BooVisitor(); bv.Visit(p.compilationUnit, null) fileService as IFileService = ServiceManager.Services.GetService(typeof(IFileService)) fileService.NewFile("Generated.boo", "Boo", bv.SourceText.ToString()) class CSharpConvertProjectToBoo(AbstractProjectConverter): protected override Extension as string: get: return '.boo' // specifying the correct targetLanguage needs at least SharpDevelop 1.0.2a protected override def CreateProject(outputPath as string, originalProject as IProject) as IProject: return CreateProject(outputPath, originalProject, BooLanguageBinding.LanguageName) protected override def ConvertFile(fileName as string) as string: p as Parser = Parser() p.Parse(Lexer(ICSharpCode.SharpRefactory.Parser.FileReader(fileName))) bv = BooVisitor() bv.Visit(p.compilationUnit, null) return bv.SourceText.ToString() nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/ExecutionManager.boo000066400000000000000000000065501161462365500301110ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.IO import System.Diagnostics import System.Collections import System.Reflection import System.Resources import System.Windows.Forms import System.Xml import System.CodeDom.Compiler import System.Threading import ICSharpCode.SharpDevelop.Internal.Project import ICSharpCode.SharpDevelop.Gui import ICSharpCode.SharpDevelop.Services import ICSharpCode.Core.Services class BooBindingExecutionManager: def Execute(filename as string, debug as bool): exe = Path.ChangeExtension(filename, ".exe") debuggerService as DebuggerService = ServiceManager.Services.GetService(typeof(DebuggerService)) if debug: debuggerService.Start(exe, Path.GetDirectoryName(exe), "") else: psi = ProcessStartInfo(Environment.GetEnvironmentVariable("ComSpec"), "/c \"${exe}\" & pause") psi.WorkingDirectory = Path.GetDirectoryName(exe) psi.UseShellExecute = false debuggerService.StartWithoutDebugging(psi) def Execute(project as BooProject, debug as bool): parameters as BooCompilerParameters = project.ActiveConfiguration fileUtilityService as FileUtilityService = ServiceManager.Services.GetService(typeof(FileUtilityService)) directory = fileUtilityService.GetDirectoryNameWithSeparator(parameters.OutputDirectory) exe = parameters.OutputAssembly + ".exe" args = parameters.CommandLineParameters if parameters.CompileTarget == CompileTarget.Library: messageService as IMessageService = ServiceManager.Services.GetService(typeof(IMessageService)) messageService.ShowError('${res:BackendBindings.ExecutionManager.CantExecuteDLLError}') return debuggerService as DebuggerService = ServiceManager.Services.GetService(typeof(DebuggerService)) if debug: debuggerService.Start(Path.Combine(directory, exe), directory, args) return runtimeStarter = String.Empty; if parameters.Runtime == NetRuntime.Mono: runtimeStarter = "mono " if parameters.Runtime == NetRuntime.MonoInterpreter: runtimeStarter = "mint " psi as ProcessStartInfo = null; if parameters.CompileTarget != CompileTarget.WinExe and parameters.PauseConsoleOutput: psi = ProcessStartInfo(Environment.GetEnvironmentVariable("ComSpec"), "/c ${runtimeStarter}\"${directory}${exe}\" ${args} & pause") else: psi = ProcessStartInfo(runtimeStarter + "\"" + directory + exe + "\"") psi.Arguments = args psi.WorkingDirectory = Path.GetDirectoryName(directory) psi.UseShellExecute = false debuggerService.StartWithoutDebugging(psi) nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/FormattingStrategy.boo000066400000000000000000000033641161462365500305100ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document import ICSharpCode.TextEditor.Actions class BooFormattingStrategy(DefaultFormattingStrategy): override def SmartIndentLine(area as TextArea, line as int) as int: document = area.Document previousLine = document.GetLineSegment(line-1) if document.GetText(previousLine).EndsWith(":"): currentLine = document.GetLineSegment(line) indentation = GetIndentation(area, line-1) indentation += Tab.GetIndentationString(document) document.Replace(currentLine.Offset, currentLine.Length, indentation + document.GetText(currentLine)) return len(indentation) return super(area, line) // Deactivate indenting multiple lines with Ctrl-I override def IndentLines(textArea as TextArea, begin as int, end as int): pass nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/LanguageBinding.boo000066400000000000000000000051001161462365500276570ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.IO import System.Diagnostics import System.Collections import System.Reflection import System.Resources import System.Windows.Forms import System.Xml import System.CodeDom.Compiler import System.Threading import ICSharpCode.SharpDevelop.Internal.Project; import ICSharpCode.SharpDevelop.Internal.Templates; import ICSharpCode.SharpDevelop.Gui; class BooLanguageBinding(ILanguageBinding): public static LanguageName = "Boo" Language as string: get: return LanguageName _compilerManager = BooBindingCompilerManager(); _executionManager = BooBindingExecutionManager(); def Execute(filename as string, debug as bool): _executionManager.Execute(filename, debug) def Execute(project as IProject, debug as bool): _executionManager.Execute(project, debug) def GetCompiledOutputName(fileName as string) as string: return _compilerManager.GetCompiledOutputName(fileName) def GetCompiledOutputName(project as IProject) as string: return _compilerManager.GetCompiledOutputName(project); def CanCompile(fileName as string) as bool: return _compilerManager.CanCompile(fileName); def CompileFile(fileName as string) as ICompilerResult: param = BooCompilerParameters(); param.OutputAssembly = Path.ChangeExtension(fileName, ".exe"); return _compilerManager.CompileFile(fileName, param) def CompileProject(project as IProject) as ICompilerResult: return _compilerManager.CompileProject(project) def RecompileProject(project as IProject) as ICompilerResult: return CompileProject(project) def CreateProject(info as ProjectCreateInformation, projectOptions as XmlElement) as IProject: return BooProject(info, projectOptions) nant-0.9.5~git20110729.r1.202a430/extras/SharpDevelop/BooBinding/src/OptionsPanel.boo000066400000000000000000000031051161462365500272570ustar00rootroot00000000000000#region license // Copyright (c) 2004, Daniel Grunwald (daniel@danielgrunwald.de) // All rights reserved. // // BooBinding is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // BooBinding is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with BooBinding; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooBinding import System import System.IO import System.Drawing import System.Windows.Forms import ICSharpCode.SharpDevelop.Internal.Project import ICSharpCode.SharpDevelop.Internal.ExternalTool import ICSharpCode.SharpDevelop.Gui.Dialogs import ICSharpCode.Core.Services import ICSharpCode.Core.Properties import ICSharpCode.Core.AddIns.Codons class BooOptionsPanel(AbstractOptionPanel): _compilerParameters as BooCompilerParameters = null override def LoadPanelContents(): _compilerParameters = cast(IProperties, CustomizationObject).GetProperty("Config"); Controls.Add(PropertyGrid(Dock : DockStyle.Fill, SelectedObject : _compilerParameters)) override def StorePanelContents() as bool: return true; nant-0.9.5~git20110729.r1.202a430/extras/bisect.sh000077500000000000000000000020731161462365500205610ustar00rootroot00000000000000#!/bin/sh #Boo Bisecting Tool # #Usage: # $ git bisect start BAD_REV GOOD_REV (eg. HEAD v0.8.1) # # [to test against one testcase/source] # $ export TESTCASE=testcase.boo (eg. tests/testcases/regression/BOO-1008-1.boo) # [to test against one testfixture] # $ export TESTFIXTURE=fixture (eg. BooCompiler.Semantics [no .Tests.dll]) # # with no TESTCASE/TESTFIXTURE the whole testsuite is run # # $ git bisect run extras/bisect.sh # #Enjoy! # if [ ! -z "$TESTCASE" ] && [ ! -z "$TESTFIXTURE" ]; then echo "!!! Both TESTCASE and TESTFIXTURE environment variable are set. Please make your mind!" exit 255 fi #compile nant if [ "$?" -ne "0" ]; then echo "!!! SKIP (cannot build)" exit 125 fi #test if [ ! -z "$TESTCASE" ]; then build/booc.exe $BOOC_OPTIONS $TESTCASE BOOC_EXITCODE="$?" fi if [ ! -z "$TESTFIXTURE" ]; then nant -D:fixture="$TESTFIXTURE" test BOOC_EXITCODE="$?" fi if [ -z "$BOOC_EXITCODE" ]; then nant test BOOC_EXITCODE="$?" fi #return result to git if [ "$BOOC_EXITCODE" != "0" ]; then echo "!!! BAD" exit 1 fi echo "!!! good" exit 0 nant-0.9.5~git20110729.r1.202a430/extras/boo-completion.bash000077500000000000000000000025221161462365500225400ustar00rootroot00000000000000# boo(c|i|ish) completion # put this file in /etc/bash_completion.d/ have booc && _booc() { local cur COMP_WORDBREAKS=${COMP_WORDBREAKS//:} COMPREPLY=() cur=${COMP_WORDS[COMP_CWORD]} if [[ "$cur" == -* ]]; then COMPREPLY=( $( compgen -W '-help -v -vv -vvv \ -debug- -define: -delaysign -ducky -checked- -embedres: \ -lib: -noconfig -nostdlib -nologo -p:boo -p:ast -p:dump -p:verify \ -target:exe -target:library -target:winexe -o: \ -reference: -srcdir: -resource: -pkg: -utf8 -wsa' -- $cur ) ) else _filedir '@(boo)' fi } [ "${have:-}" ] && complete -F _booc $filenames booc have booi && _booi() { local cur COMP_WORDBREAKS=${COMP_WORDBREAKS//:} COMPREPLY=() cur=${COMP_WORDS[COMP_CWORD]} if [[ "$cur" == -* ]]; then COMPREPLY=( $( compgen -W '\ -debug- -define: -ducky -checked- \ -noconfig -nostdlib -nologo \ -reference: -utf8 -wsa' -- $cur ) ) else _filedir '@(boo)' fi } [ "${have:-}" ] && complete -F _booi $filenames booi have booish && _booish() { local cur COMP_WORDBREAKS=${COMP_WORDBREAKS//:} COMPREPLY=() cur=${COMP_WORDS[COMP_CWORD]} if [[ "$cur" == -* ]]; then COMPREPLY=( $( compgen -W '\ -debug- -define: -ducky -checked- \ -noconfig -nostdlib -nologo \ -reference: -utf8 -wsa' -- $cur ) ) else _filedir '@(boo)' fi } [ "${have:-}" ] && complete -F _booish $filenames booish nant-0.9.5~git20110729.r1.202a430/extras/boo-jedit41.xml000077500000000000000000000150331161462365500215170ustar00rootroot00000000000000 # // /* */ """ """ ''' ''' " " ' ' /[^[:blank:]]*?[^\\]/ =~ = != ! >= <= ++ -- + - += -= / % * /= %= *= > < % & | ^ ( abstract and as break callable cast class const constructor destructor continue def do elif else end enum ensure event except failure final for from given get if interface in include import is isa mixin partial namespace not of or otherwise override new pass raise ref retry self struct return set success try transient virtual while when unless yield ref public protected private internal static bool regex string object duck byte sbyte short ushort int char uint long ulong single double decimal date timespan void iif len __addressof__ __eval__ __switch__ typeof assert array matrix print gets prompt enumerate zip filter map join cat iterator shell abs using lock required getter setter property checked unchecked rawArrayIndexing normalArrayIndexing yieldAll false null self super true ${ } nant-0.9.5~git20110729.r1.202a430/extras/boo-jedit42.xml000077500000000000000000000151661161462365500215270ustar00rootroot00000000000000 # // /* */ """ """ ''' ''' " " ' ' @/ / /[^[:blank:]]*?/ : [ ] { } ( ) =~ = != ! >= <= ++ -- + - += -= / % * /= %= *= > < % & | ^ ( ast abstract and as break callable cast class const constructor destructor continue def do elif else end enum ensure event except failure final for from macro get goto if interface in include import is isa mixin namespace not of or match otherwise override new partial pass raise ref retry self struct return set success try transient virtual while case unless yield ref public protected private internal static bool regex string object duck byte sbyte short ushort char int uint long ulong single double decimal date timespan void len __addressof__ __eval__ __switch__ my array matrix typeof assert print gets prompt enumerate zip filter map join cat iterator shell abs using lock required getter setter property checked unchecked rawArrayIndexing normalArrayIndexing yieldAll false null self super true ${ } nant-0.9.5~git20110729.r1.202a430/extras/boo-mime-info.xml000066400000000000000000000024101161462365500221230ustar00rootroot00000000000000 ]> boo source code fonte boo nant-0.9.5~git20110729.r1.202a430/extras/boo.keys000066400000000000000000000003201161462365500204160ustar00rootroot00000000000000text/x-boo category=Software Development/Source Code can_be_executable=true default_action_type=application description=Boo source code short_list_application_ids_for_novice_user_level=monodevelop,gedit nant-0.9.5~git20110729.r1.202a430/extras/boo.lang000077500000000000000000000127161161462365500204030ustar00rootroot00000000000000 \ """ """ " " ' ' # // /[^/] / /\* \*/ import from as namespace bool byte sbyte double decimal single short ushort int char uint long ulong object duck string regex date timespan abstract virtual override new static final transient protected private public internal partial class struct interface enum callable of def constructor destructor do get set event return yield \b[0-9][0-9\.]*(m|ms|d|h|s|f|F|l|L)?\b true false null self super and break cast continue elif else except ensure for given goto if in is isa not or otherwise pass raise try unless when while ref assert __eval__ __switch__ enumerate filter len typeof map max min property using getter required lock range zip checked unchecked rawArrayIndexing normalArrayIndexing print array matrix yieldAll nant-0.9.5~git20110729.r1.202a430/extras/boo.mime000066400000000000000000000000251161462365500203740ustar00rootroot00000000000000text/x-boo ext: boo nant-0.9.5~git20110729.r1.202a430/extras/boo.nanorc000066400000000000000000000017731161462365500207400ustar00rootroot00000000000000## Boo syntax file ## syntax "boo" "\.boo$" color brightblue "import" color magenta "\<(char|sbyte|byte|bool|int|uint|short|ushort|long|ulong|single|double|decimal|date|timespan)\>[\*\?]?" color magenta "\<(string|object|duck|regex)\>\*?" color brightblue "def [0-9a-zA-Z_]*" color brightwhite "\<(and|as|assert|break|class|continue|def|elif|else|except|enum|ensure|event|for|from|if|import|in|interface|is|isa|not|of|or|pass|raise|return|struct|try|while|yield|public|protected|private|internal|final|static|abstract|override|virtual|transient|partial|array|enumerate|gets|iterator|join|map|matrix|print|range|reversed|shell|shellm|shellp|zip|typeof|macro)\>" color green "['][^']*[^\\][']" "[']{3}.*[^\\][']{3}" color green "["][^"]*[^\\]["]" "["]{3}.*[^\\]["]{3}" color green start=""""[^"]" end=""""" start="'''[^']" end="'''" color green "\<[0-9\.]*\>" color red "\<(true|false|null)\>" color yellow "#.*$|//.*$" color brightgreen "(\[\|)|(]|)" ## trailing-whitespace warning ala git color ,red "[[:space:]]+$" nant-0.9.5~git20110729.r1.202a430/extras/boo.pc.in000066400000000000000000000005621161462365500204620ustar00rootroot00000000000000prefix=@prefix@ exec_prefix=${prefix} libdir=${prefix}/lib Name: Boo Description: A wrist friendly language for the CLI Version: @VERSION@ Libs: -r:${libdir}/boo/Boo.Lang.dll -r:${libdir}/boo/Boo.Lang.Useful.dll -r:${libdir}/boo/Boo.Lang.CodeDom.dll -r:${libdir}/boo/Boo.Lang.Compiler.dll -r:${libdir}/boo/Boo.Lang.Parser.dll -r:${libdir}/boo/Boo.Lang.Interpreter.dll nant-0.9.5~git20110729.r1.202a430/extras/boo.vim000077500000000000000000000223501161462365500202500ustar00rootroot00000000000000" Vim syntax file " Language: boo " Maintainer: nsf " Latest Revision: 22 July 2011 " " Based on boo.vim by Rodrigo B. de Oliveira which is based on python syntax " file by Neil Schemenauer. " " Options to control syntax highlighting: " " For highlighted numbers: " " let boo_highlight_numbers = 1 " " For highlighted builtin functions: " " let boo_highlight_builtins = 1 " " For highlighted standard exceptions: " " let boo_highlight_exceptions = 1 " " Highlight erroneous whitespace: " " let boo_highlight_space_errors = 1 " " If you want all possible highlighting (the same as setting the " preceding options): " let boo_highlight_all = 1 " " For version 5.x: Clear all syntax items " For version 6.x: Quit when a syntax file was already loaded if version < 600 syntax clear elseif exists("b:current_syntax") finish endif syn keyword booConstant true false null syn keyword booAccess public protected private syn keyword booModifier abstract final internal override ref new syn keyword booModifier partial static transient virtual event syn region booImportRegion start="^import" end="$" contains=booImport syn keyword booImport import as from contained syn keyword booRepeat for while then syn keyword booConditional if elif else unless syn keyword booStatement break continue return pass yield goto syn keyword booStatement get set syn keyword booStatement constructor destructor typeof super syn keyword booOperator and in is isa in not or of cast as syn keyword booExceptionKWs try except raise ensure failure syn keyword booStorage callable class def enum do syn keyword booStorage interface namespace struct syn keyword booTodo WARNING TODO FIXME XXX contained syn match booComment "#.*$" contains=booTodo syn match booComment2 "//.*$" contains=booTodo syn region booRegionComment start="/\*" end="\*/" contains=booTodo " strings syn region booString matchgroup=Normal start=+[uU]\='+ end=+'+ skip=+\\\\\|\\'+ contains=booEscape syn region booString matchgroup=Normal start=+[uU]\="+ end=+"+ skip=+\\\\\|\\"+ contains=booEscape syn region booString matchgroup=Normal start=+[uU]\="""+ end=+"""+ contains=booEscape syn region booString matchgroup=Normal start=+[uU]\='''+ end=+'''+ contains=booEscape syn region booRawString matchgroup=Normal start=+[uU]\=[rR]'+ end=+'+ skip=+\\\\\|\\'+ syn region booRawString matchgroup=Normal start=+[uU]\=[rR]"+ end=+"+ skip=+\\\\\|\\"+ syn region booRawString matchgroup=Normal start=+[uU]\=[rR]"""+ end=+"""+ syn region booRawString matchgroup=Normal start=+[uU]\=[rR]'''+ end=+'''+ syn match booEscape +\\[abfnrtv'"\\]+ contained syn match booEscape "\\\o\{1,3}" contained syn match booEscape "\\x\x\{2}" contained syn match booEscape "\(\\u\x\{4}\|\\U\x\{8}\)" contained syn match booEscape "\\$" " TODO: regexp? if exists("boo_highlight_all") let boo_highlight_numbers = 1 let boo_highlight_builtins = 1 let boo_highlight_exceptions = 1 let boo_highlight_space_errors = 1 endif "------------------------------------------------------------------------------ " Built-ins "------------------------------------------------------------------------------ if exists("boo_highlight_builtins") " built-in macros from: " grep "^macro" Boo.Lang.Extensions/Macros/*.boo syn keyword booBuiltin assert unchecked checked debug lock preserving print syn keyword booBuiltin property normalArrayIndexing rawArrayIndexing using syn keyword booBuiltin yieldAll " built-in functions from booish: " dir(Boo.Lang.Builtins) syn keyword booBuiltin print gets prompt join map array matrix iterator syn keyword booBuiltin shellp shell shellm enumerate range reversed zip cat " built-in types from: " Boo.Lang.Compiler/TypeSystem/Services/TypeSystemServices.cs:997 syn keyword booType duck void object callable decimal date syn keyword booType bool sbyte byte short ushort int uint long ulong syn keyword booType single double char string regex timespan " self syn keyword booBuiltin self endif "------------------------------------------------------------------------------ " Numbers from: " Boo.Lang.Parser/booel.g:67 "------------------------------------------------------------------------------ " holy shit, if anyone wants to edit this, good luck :D " digits ([eE][+-]? digits)? ([lL] | [fF] | (('.' digits ([eE][+-]? digits)? [fF]? )? ([smhd] | ms)?)) "syn match booNumber "\< \d\%(\%(_\d\)\|\d\)* \%([eE][+-]\=\d\%(\%(_\d\)\|\d\)*\)\= \%([lL] \| [fF] \| \%(\%(\.\d\%(\%(_\d\)\|\d\)* \%([eE][+-]\=\d\%(\%(_\d\)\|\d\)*\)\= [fF]\= \)\= \%([smhd]\|ms\)\=\) \)\>" " if exists("boo_highlight_numbers") syn match booNumber "\<0x\x\+[lL]\=\>" syn match booNumber "\<\d\%(\%(_\d\)\|\d\)*\%([eE][+-]\=\d\%(\%(_\d\)\|\d\)*\)\=\%([lL]\|[fF]\|\%(\%(\.\d\%(\%(_\d\)\|\d\)*\%([eE][+-]\=\d\%(\%(_\d\)\|\d\)*\)\=[fF]\=\)\=\%([smhd]\|ms\)\=\)\)\>" syn match booNumber "\.\d\%(\%(_\d\)\|\d\)*\%([eE][+-]\=\d\%(\%(_\d\)\|\d\)*\)\=\%([fF]\|\%([smhd]\|ms\)\)\=\>" endif if exists("boo_highlight_exceptions") " common .NET exceptions syn keyword booException Exception SystemException ArgumentException syn keyword booException ArgumentNullException ArgumentOutOfRangeException syn keyword booException DuplicateWaitObjectException ArithmeticException syn keyword booException DivideByZeroException OverflowException syn keyword booException NotFiniteNumberException ArrayTypeMismatchException syn keyword booException ExecutionEngineException FormatException syn keyword booException IndexOutOfRangeException InvalidCastException syn keyword booException InvalidOperationException ObjectDisposedException syn keyword booException InvalidProgramException IOException syn keyword booException DirectoryNotFoundException EndOfStreamException syn keyword booException FileLoadException FileNotFoundException syn keyword booException PathTooLongException NotImplementedException syn keyword booException NotSupportedException NullReferenceException syn keyword booException OutOfMemoryException RankException syn keyword booException SecurityException VerificationException syn keyword booException StackOverflowException SynchronizationLockException syn keyword booException ThreadAbortException ThreadStateException syn keyword booException TypeInitializationException UnauthorizedAccessException endif if exists("boo_highlight_space_errors") " trailing whitespace syn match booSpaceError display excludenl "\S\s\+$"ms=s+1 " mixed tabs and spaces syn match booSpaceError display " \+\t" syn match booSpaceError display "\t\+ " endif " This is fast but code inside triple quoted strings screws it up. It " is impossible to fix because the only way to know if you are inside a " triple quoted string is to start from the beginning of the file. If " you have a fast machine you can try uncommenting the "sync minlines" " and commenting out the rest. syn sync match booSync grouphere NONE "):$" syn sync maxlines=200 "syn sync minlines=2000 if version >= 508 || !exists("did_boo_syn_inits") if version <= 508 let did_boo_syn_inits = 1 command -nargs=+ HiLink hi link else command -nargs=+ HiLink hi def link endif " The default methods for highlighting. Can be overridden later HiLink booConstant Constant HiLink booAccess StorageClass HiLink booModifier StorageClass HiLink booImport Include HiLink booRepeat Repeat HiLink booConditional Conditional HiLink booStatement Statement HiLink booOperator Operator HiLink booExceptionKWs Exception if exists("boo_highlight_exceptions") HiLink booException Exception endif HiLink booStorage StorageClass HiLink booTodo Todo HiLink booComment Comment HiLink booComment2 Comment HiLink booRegionComment Comment HiLink booString String HiLink booRawString String HiLink booRegex String HiLink booEscape Special if exists("boo_highlight_builtins") HiLink booBuiltin Function HiLink booType Type endif if exists("boo_highlight_numbers") HiLink booNumber Number endif if exists("boo_highlight_space_errors") HiLink booSpaceError Error endif delcommand HiLink endif let b:current_syntax = "boo" " vim: ts=8 nant-0.9.5~git20110729.r1.202a430/extras/boo.xml000077500000000000000000000304621161462365500202600ustar00rootroot00000000000000 import from as namespace and assert in is not or bool byte sbyte double decimal single short ushort int char uint long ulong object duck string regex date timespan abstract virtual override new static final transient macro protected private public internal partial class struct interface enum callable of def constructor destructor do get set event return yield true false null self super and break cast continue elif else except ensure for given goto if in is isa not or otherwise pass raise try unless when while ref assert __eval__ __switch__ enumerate filter len typeof map max min property using getter required lock range zip checked unchecked rawArrayIndexing normalArrayIndexing print array matrix yieldAll nant-0.9.5~git20110729.r1.202a430/extras/booc.in000066400000000000000000000003151161462365500202200ustar00rootroot00000000000000#!/bin/sh if [ -x @prefix@/bin/cli ]; then env @prefix@/bin/cli $MONO_OPTIONS @libdir@/boo/booc.exe $BOOC_OPTIONS "$@" else env @RUNTIME@ $MONO_OPTIONS @libdir@/boo/booc.exe $BOOC_OPTIONS "$@" fi nant-0.9.5~git20110729.r1.202a430/extras/booi.in000066400000000000000000000003151161462365500202260ustar00rootroot00000000000000#!/bin/sh if [ -x @prefix@/bin/cli ]; then env @prefix@/bin/cli $MONO_OPTIONS @libdir@/boo/booi.exe $BOOI_OPTIONS "$@" else env @RUNTIME@ $MONO_OPTIONS @libdir@/boo/booi.exe $BOOI_OPTIONS "$@" fi nant-0.9.5~git20110729.r1.202a430/extras/booish.gui/000077500000000000000000000000001161462365500210155ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/booish.gui/default.build000066400000000000000000000027551161462365500234730ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/booish.gui/src/000077500000000000000000000000001161462365500216045ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/booish.gui/src/CodeCompletionData.boo000066400000000000000000000131611161462365500260050ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace booish.gui import System import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document import ICSharpCode.TextEditor.Actions import ICSharpCode.TextEditor.Gui.CompletionWindow import Boo.Lang.Interpreter import Boo.Lang.Compiler.TypeSystem internal class AbstractCompletionData(ICompletionData, IComparable): _name as string def constructor(name): _name = name Text as (string): get: return (_name,) abstract ImageIndex as int: get: pass abstract Description as string: get: pass def InsertAction(control as TextEditorControl): control.ActiveTextAreaControl.TextArea.InsertString(_name) public def CompareTo(obj) as int: if obj is null or not obj isa CodeCompletionData: return -1 other = obj as CodeCompletionData return _name.CompareTo(other._name) internal class CodeCompletionData(AbstractCompletionData): _imageProvider as ICompletionWindowImageProvider _entities as List = [] def constructor(imageProvider, name): super(name) _imageProvider = imageProvider Description: get: description = InteractiveInterpreter.DescribeEntity(_entities[0]) return description if 1 == len(_entities) return "${description} (+${len(_entities)-1} overloads)" ImageIndex as int: get: entity = _entities[0] as IEntity entityType = entity.EntityType if EntityType.Type == entityType: type as IType = entity if type.IsInterface: return _imageProvider.InterfaceIndex elif type.IsEnum: return _imageProvider.EnumIndex elif type.IsValueType: return _imageProvider.StructIndex elif type isa ICallableType: return _imageProvider.CallableIndex else: return _imageProvider.ClassIndex elif EntityType.Method == entityType: return _imageProvider.MethodIndex elif EntityType.Field == entityType: if (entity as IField).IsLiteral: return _imageProvider.LiteralIndex else: return _imageProvider.FieldIndex elif EntityType.Property == entityType: return _imageProvider.PropertyIndex elif EntityType.Event == entityType: return _imageProvider.EventIndex return _imageProvider.NamespaceIndex def AddEntity(entity as IEntity): _entities.Add(entity) abstract internal class AbstractCompletionDataProvider(ICompletionDataProvider): _imageProvider as ICompletionWindowImageProvider def constructor([required] imageProvider): _imageProvider = imageProvider ImageList as System.Windows.Forms.ImageList: get: return _imageProvider.ImageList PreSelection as string: get: return null abstract def GenerateCompletionData(fileName as string, textArea as TextArea, charTyped as System.Char) as (ICompletionData): pass internal class GlobalsCompletionDataProvider(AbstractCompletionDataProvider): _interpreter as InteractiveInterpreter class GlobalCompletionData(AbstractCompletionData): [getter(ImageIndex)] _imageIndex as int [getter(Description)] _description as string def constructor(name, imageIndex, description): super(name) _imageIndex = imageIndex _description = description def constructor(imageProvider, interpreter): super(imageProvider) _interpreter = interpreter override def GenerateCompletionData(fileName as string, textArea as TextArea, charTyped as System.Char) as (ICompletionData): globals = _interpreter.globals() data = array(ICompletionData, len(globals)) for index, key in enumerate(globals): value = _interpreter.GetValue(key) delegate = value as System.Delegate if delegate is null: if value is not null: description = "${key} as ${InteractiveInterpreter.GetBooTypeName(value.GetType())}" else: description = "null" item = GlobalCompletionData(key, _imageProvider.FieldIndex, description) else: item = GlobalCompletionData(key, _imageProvider.MethodIndex, InteractiveInterpreter.DescribeMethod(delegate.Method)) data[index] = item return data internal class CodeCompletionDataProvider(AbstractCompletionDataProvider): _codeCompletion as (IEntity) def constructor(imageProvider, [required] codeCompletion): super(imageProvider) _codeCompletion = codeCompletion override def GenerateCompletionData(fileName as string, textArea as TextArea, charTyped as System.Char) as (ICompletionData): values = {} for item in _codeCompletion: data as CodeCompletionData data = values[item.Name] if data is null: name = item.Name if "." in name: name = /\./.Split(name)[-1] data = CodeCompletionData(_imageProvider, name) values[item.Name] = data data.AddEntity(item) return array(ICompletionData, values.Values) nant-0.9.5~git20110729.r1.202a430/extras/booish.gui/src/InteractiveInterpreterControl.boo000066400000000000000000000211221161462365500303450ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion """ Interactive Forms-based Console """ namespace booish.gui import System import System.Drawing import System.IO import System.Windows.Forms import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document import ICSharpCode.TextEditor.Actions import ICSharpCode.TextEditor.Gui.CompletionWindow import Boo.Lang.Compiler.TypeSystem interface ICompletionWindowImageProvider: ImageList as ImageList: get NamespaceIndex as int: get ClassIndex as int: get InterfaceIndex as int: get EnumIndex as int: get StructIndex as int: get CallableIndex as int: get MethodIndex as int: get FieldIndex as int: get LiteralIndex as int: get PropertyIndex as int: get EventIndex as int: get class InteractiveInterpreterControl(TextEditorControl): enum InputState: SingleLine = 0 Block = 1 class NullCompletionWindowImageProvider(ICompletionWindowImageProvider): public static final Instance = NullCompletionWindowImageProvider() [getter(ImageList)] _imageList = System.Windows.Forms.ImageList() NamespaceIndex as int: get: return 0 ClassIndex as int: get: return 0 InterfaceIndex as int: get: return 0 EnumIndex as int: get: return 0 StructIndex as int: get: return 0 CallableIndex as int: get: return 0 MethodIndex as int: get: return 0 FieldIndex as int: get: return 0 LiteralIndex as int: get: return 0 PropertyIndex as int: get: return 0 EventIndex as int: get: return 0 class LineHistory: _lines = [] _current = 0 event CurrentLineChanged as EventHandler def Add([required] line as string): if len(line) > 0 and line != LastLine: _lines.Add(line) _current = len(_lines) LastLine as string: get: return null if 0 == len(_lines) return _lines[-1] CurrentLine as string: get: return null if 0 == len(_lines) return _lines[_current] def Up(): MoveTo(_current - 1) def Down(): MoveTo(_current + 1) def MoveTo(index as int): return if 0 == len(_lines) old = _current _current = index % len(_lines) if old != _current: CurrentLineChanged(self, EventArgs.Empty) _state = InputState.SingleLine _block = System.IO.StringWriter() [getter(Interpreter)] _interpreter as Boo.Lang.Interpreter.InteractiveInterpreter _codeCompletionWindow as CodeCompletionWindow [property(CompletionWindowImageProvider, value is not null)] _imageProvider as ICompletionWindowImageProvider = NullCompletionWindowImageProvider() _lineHistory as LineHistory // HACK: when the completion window is shown through CTRL+SPACE // it behaves very strangely, the best we can do right now is // to prevent the user from typing _blockKeys = false def constructor(): self._interpreter = Boo.Lang.Interpreter.InteractiveInterpreter( RememberLastValue: true, Print: self.print) self._interpreter.SetValue("cls", cls) self._lineHistory = LineHistory(CurrentLineChanged: _lineHistory_CurrentLineChanged) self.Document.HighlightingStrategy = GetBooHighlighting() self.EnableFolding = false self.ShowLineNumbers = false self.ShowSpaces = false self.ShowTabs = true self.ShowEOLMarkers = false self.AllowCaretBeyondEOL = false self.ShowInvalidLines = false self.Dock = DockStyle.Fill CaretColumn: get: return self.ActiveTextAreaControl.Caret.Column CurrentLineText: get: segment = GetLastLineSegment() return self.Document.GetText(segment)[4:] override def OnLoad(args as EventArgs): super(args) prompt() def Eval(code as string): try: _interpreter.LoopEval(code) ensure: _state = InputState.SingleLine private def ConsumeCurrentLine(): text as string = CurrentLineText # was accessing Control.text member _lineHistory.Add(text) print("") return text private def GetLastLineSegment(): return self.Document.GetLineSegment(self.Document.LineSegmentCollection.Count) private def SingleLineInputState(): code = ConsumeCurrentLine() if code[-1:] in (":", "\\"): _state = InputState.Block _block.GetStringBuilder().Length = 0 _block.WriteLine(code) else: Eval(code) private def BlockInputState(): code = ConsumeCurrentLine() if 0 == len(code): Eval(_block.ToString()) else: _block.WriteLine(code) def print(msg): AppendText("${msg}\r\n") def prompt(): AppendText((">>> ", "... ")[_state]) def ClearLine(): segment = GetLastLineSegment() self.Document.Replace(segment.Offset + 4, self.CurrentLineText.Length, "") def AppendText(text as string): segment = GetLastLineSegment() self.Document.Insert(segment.Offset + segment.TotalLength, text) MoveCaretToEnd() def MoveCaretToEnd(): segment = GetLastLineSegment() newOffset = segment.Offset + segment.TotalLength MoveCaretToOffset(newOffset) def MoveCaretToOffset(offset as int): self.ActiveTextAreaControl.Caret.Position = self.Document.OffsetToPosition(offset) override def InitializeTextAreaControl(newControl as TextAreaControl): super(newControl) newControl.TextArea.DoProcessDialogKey += HandleDialogKey newControl.TextArea.KeyEventHandler += HandleKeyPress InCodeCompletion: get: return _codeCompletionWindow is not null and not _codeCompletionWindow.IsDisposed private def DotComplete(ch as System.Char): ShowCompletionWindow( CodeCompletionDataProvider(_imageProvider, GetSuggestions()), ch) private def ShowCompletionWindow(completionDataProvider, ch as System.Char): _codeCompletionWindow = CodeCompletionWindow.ShowCompletionWindow( self.ParentForm, self, "", completionDataProvider, ch) if _codeCompletionWindow is not null: _codeCompletionWindow.Closed += def(): _blockKeys = false private def CtrlSpaceComplete(): _blockKeys = true ShowCompletionWindow( GlobalsCompletionDataProvider(_imageProvider, self._interpreter), Char.MinValue) private def GetSuggestions(): code = CurrentLineText.Insert(self.CaretColumn-4, ".__codecomplete__") code = code.Insert(0, _block.ToString()) if InputState.Block == _state return _interpreter.SuggestCodeCompletion(code) private def HandleDialogKey(key as Keys): return false if InCodeCompletion if key == Keys.Enter: try: (SingleLineInputState, BlockInputState)[_state]() except x as System.Reflection.TargetInvocationException: print(x.InnerException) except x: print(x) prompt() return true if key == Keys.Up: _lineHistory.Up() return true if key == Keys.Down: _lineHistory.Down() return true if key == (Keys.Control | Keys.Space): CtrlSpaceComplete() return true if key in (Keys.Home, Keys.Shift|Keys.Home, Keys.Control|Keys.Home): MoveCaretToOffset(GetLastLineSegment().Offset + 4) return true if key == Keys.Escape: ClearLine() return true if key in (Keys.Back, Keys.Left): if self.CaretColumn < 5: return true else: if self.CaretColumn < 4: MoveCaretToEnd() return false private def HandleKeyPress(ch as System.Char) as bool: return true if _blockKeys if InCodeCompletion: _codeCompletionWindow.ProcessKeyEvent(ch) if ch == "."[0]: DotComplete(ch) return false private def cls(): self.Document.TextContent = "" self.ActiveTextAreaControl.Refresh() private def _lineHistory_CurrentLineChanged(): segment = GetLastLineSegment() self.Document.Replace(segment.Offset + 4, self.CurrentLineText.Length, _lineHistory.CurrentLine) def GetBooHighlighting(): return HighlightingManager.Manager.FindHighlighter("Boo") static def InstallDefaultSyntaxModeProvider(): HighlightingManager.Manager.AddSyntaxModeFileProvider( FileSyntaxModeProvider(Path.GetDirectoryName(Application.ExecutablePath))) nant-0.9.5~git20110729.r1.202a430/extras/booish.gui/src/MainForm.boo000066400000000000000000000026271161462365500240240ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace booish.gui import System import System.Windows.Forms import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document partial class MainForm(Form): def constructor(): self.Text = "booish" self.Size = System.Drawing.Size(640, 480) self.Controls.Add(InteractiveInterpreterControl( Dock: DockStyle.Fill, Font: System.Drawing.Font("Lucida Console", 11))) [STAThread] def Main(argv as (string)): InteractiveInterpreterControl.InstallDefaultSyntaxModeProvider() Application.Run(MainForm()) nant-0.9.5~git20110729.r1.202a430/extras/booish.gui/src/booish.gui.cmbx000066400000000000000000000011101161462365500245160ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/booish.gui/src/booish.gui.prjx000066400000000000000000000045331161462365500245640ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/booish.in000066400000000000000000000003251161462365500205620ustar00rootroot00000000000000#!/bin/sh if [ -x @prefix@/bin/cli ]; then env @prefix@/bin/cli $MONO_OPTIONS @libdir@/boo/booish.exe $BOOISH_OPTIONS "$@" else env @RUNTIME@ $MONO_OPTIONS @libdir@/boo/booish.exe $BOOISH_OPTIONS "$@" fi nant-0.9.5~git20110729.r1.202a430/extras/boox/000077500000000000000000000000001161462365500177165ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/boox/BooExplorer.Common/000077500000000000000000000000001161462365500234055ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/boox/BooExplorer.Common/BooExplorer.Common.cmbx000066400000000000000000000011701161462365500277460ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/boox/BooExplorer.Common/BooExplorer.Common.prjx000066400000000000000000000033731161462365500300070ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/boox/BooExplorer.Common/CodeCompletion.boo000077500000000000000000000046571161462365500270310ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer.Common import Boo.Lang.Compiler import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Steps import Boo.Lang.Compiler.TypeSystem import System.IO class CodeCompletionHunter(ProcessMethodBodiesWithDuckTyping): static def GetCompletion(source as string): hunter = CodeCompletionHunter() compiler = BooCompiler() compiler.Parameters.OutputWriter = StringWriter() compiler.Parameters.Pipeline = MakePipeline(hunter) compiler.Parameters.Input.Add(StringInput("none", source)) result = compiler.Run() print(result.Errors.ToString(true)) return hunter.Members [getter(Members)] _members = array(IEntity, 0) override protected def ProcessMemberReferenceExpression(node as MemberReferenceExpression): if node.Name == '__codecomplete__': _members = TypeSystemServices.GetAllMembers(MyGetReferenceNamespace(node)) else: super(node) protected def MyGetReferenceNamespace(expression as MemberReferenceExpression) as INamespace: target as Expression = expression.Target if target.ExpressionType is not null: if target.ExpressionType.EntityType != EntityType.Error: return cast(INamespace, target.ExpressionType) return cast(INamespace, TypeSystemServices.GetOptionalEntity(target)) protected static def MakePipeline(hunter): pipeline = ResolveExpressions(BreakOnErrors: false) index = pipeline.Find(Boo.Lang.Compiler.Steps.ProcessMethodBodiesWithDuckTyping) pipeline[index] = hunter return pipeline nant-0.9.5~git20110729.r1.202a430/extras/boox/COPYING000066400000000000000000000431311161462365500207530ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. nant-0.9.5~git20110729.r1.202a430/extras/boox/README.TXT000066400000000000000000000023311161462365500212530ustar00rootroot00000000000000Boo Explorer is a simple development environment for boo. More information on boo can be found at http://boo.codehaus.org/. Acknowlegments ============== #develop team for providing us with a great GPL'd text editor component. http://www.icsharpcode.net/OpenSource/SD/ Weifen Luo for the great DockManager/DockPanel control suite. http://www.freewebs.com/weifenluo/ Getting the Sources =================== Boo Explorer ------------ svn co svn://svn.boo.codehaus.org/boo/scm/trunk/extras/boox boox -r The number is the numeric part of the downloaded zip file name. Boo --- http://boo.codehaus.org/Subversion ICSharpCode.TextEditor.dll WinfenLuo.WinformsUI.dll -------------------------- Information at their respective websites. Licenses ======== Boo Explorer is licensed under the terms of the GPL, check COPYING for details. Licensing information for the current boo language and compiler components can be found here: http://svn.boo.codehaus.org/trunk/license #develop's ICSharpCode.TextEditor.dll is licensed under the terms of GPL. WeinfenLuo.WinformsUI.dll is licensed under specific terms and conditions which can be found at this address: http://www.freewebs.com/weifenluo/DockPanel.htm nant-0.9.5~git20110729.r1.202a430/extras/boox/booxg/000077500000000000000000000000001161462365500210345ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/boox/booxg/ApplicationResources.boo000066400000000000000000000025441161462365500257000ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer class ApplicationResources: static _manager = System.Resources.ResourceManager("BooExplorer", typeof(MainWindow).Assembly) class Icons: public static final Class = LoadIcon("class") public static final Method = LoadIcon("method") public static final Field = LoadIcon("field") public static final Event = LoadIcon("event") public static final Property = LoadIcon("property") static def LoadIcon(name as string): return Gdk.Pixbuf(cast((byte), _manager.GetObject(name))) nant-0.9.5~git20110729.r1.202a430/extras/boox/booxg/BooEditor.boo000066400000000000000000000037441161462365500234330ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import System import System.Resources import System.IO import Boo.Lang.Useful.IO import Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Ast import Gtk import GtkSourceView class BooEditor(ScrolledWindow): [getter(FileName)] _fname as string _view = BooSourceView() [getter(Buffer)] _buffer = _view.SourceBuffer event LabelChanged as EventHandler Label: get: suffix = " *" if _buffer.Modified return System.IO.Path.GetFileName(_fname) + suffix if _fname return "unnamed.boo" + suffix def constructor(): self.SetPolicy(PolicyType.Automatic, PolicyType.Automatic) self.Add(_view) _buffer.ModifiedChanged += { LabelChanged(self, EventArgs.Empty) } def constructor(ptr as System.IntPtr): super(ptr) def Open([required] fname as string): fname = System.IO.Path.GetFullPath(fname) _buffer.Text = File.ReadAllText(fname) _buffer.Modified = false _fname = fname def SaveAs([required] fname as string): File.WriteAllText(fname, _buffer.Text) _fname = fname _buffer.Modified = false def Redo(): _buffer.Redo() def Undo(): _buffer.Undo() nant-0.9.5~git20110729.r1.202a430/extras/boox/booxg/BooSourceView.boo000066400000000000000000000041221161462365500242670ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import System import System.Resources import System.IO import Boo.Lang.Useful.IO import Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Ast import Gtk import GtkSourceView class BooSourceView(SourceView): static _booSourceLanguage = SourceLanguagesManager().GetLanguageFromMimeType("text/x-boo") [getter(SourceBuffer)] _buffer as SourceBuffer def constructor(): super(_buffer = GtkSourceView.SourceBuffer( _booSourceLanguage, Highlight: true)) self.ShowLineNumbers = true self.AutoIndent = false self.TabsWidth = 4 font = ( Pango.FontDescription.FromString("Lucida Console, 12") or Pango.FontDescription.FromString("monospaced, 12")) self.ModifyFont(font) override def OnKeyPressEvent(ev as Gdk.EventKey): if Gdk.Key.Return == ev.Key: iter = _buffer.GetIterAtMark(_buffer.InsertMark) if iter.BackwardChar(): line = GetLine(iter.Line) indent = /^(\s*)/.Match(line).Groups[0].Value if iter.Char == ":": indent += "\t" _buffer.InsertAtCursor("\n${indent}") return true return super(ev) def GetLine(line as int): start = _buffer.GetIterAtLine(line) end = start.Copy() end.ForwardLine() return _buffer.GetText(start, end, false) nant-0.9.5~git20110729.r1.202a430/extras/boox/booxg/DocumentOutline.boo000066400000000000000000000052411161462365500246550ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import System import Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Ast import Gtk class DocumentOutlineProcessor: _store = TreeStore(Gdk.Pixbuf, string) _documentOutline as TreeView _module as Boo.Lang.Compiler.Ast.Module static def SetUp(tree as TreeView): nameColumn = TreeViewColumn() pixbufRender = CellRendererPixbuf() nameColumn.PackStart(pixbufRender, false); nameColumn.AddAttribute(pixbufRender, "pixbuf", 0) labelRender = Gtk.CellRendererText() nameColumn.PackStart(labelRender, false); nameColumn.AddAttribute(labelRender, "text", 1) tree.AppendColumn(nameColumn) def constructor(documentOutline, editor as BooEditor): _module = Parse(editor.Label, editor.Buffer.Text) _documentOutline = documentOutline def Parse(fname, text): compiler = BooCompiler() compiler.Parameters.Input.Add(StringInput(fname, text)) compiler.Parameters.Pipeline = Pipelines.Parse() return compiler.Run().CompileUnit.Modules[0] def Update(): for type in _module.Members: iter = _store.AppendValues((GetIcon(type), type.Name)) if type isa TypeDefinition: UpdateType(iter, type) _documentOutline.Model = _store _documentOutline.ExpandAll() def UpdateType(parent, type as TypeDefinition): for member in type.Members: iter = _store.AppendValues(parent, (GetIcon(member), member.Name)) if member isa TypeDefinition: UpdateType(iter, member) def GetIcon(member as TypeMember): type = member.NodeType return ApplicationResources.Icons.Class if type == NodeType.ClassDefinition return ApplicationResources.Icons.Field if type == NodeType.Field return ApplicationResources.Icons.Event if type == NodeType.Event return ApplicationResources.Icons.Property if type == NodeType.Property return ApplicationResources.Icons.Method nant-0.9.5~git20110729.r1.202a430/extras/boox/booxg/MainWindow.boo000066400000000000000000000220441161462365500236130ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import System import System.Resources import System.IO import Boo.Lang.Useful.IO import Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Ast import Gtk import GtkSourceView class MainWindow(Window): _status = Statusbar(HasResizeGrip: false) _notebookEditors = Notebook(TabPos: PositionType.Top, Scrollable: true) _notebookHelpers = Notebook(TabPos: PositionType.Bottom, Scrollable: true) _notebookOutline = Notebook(TabPos: PositionType.Bottom, Scrollable: true) _documentOutline = TreeView() _output = TextView(Editable: false) _outputBuffer = _output.Buffer _accelGroup = AccelGroup() _editors = [] # workaround for gtk# bug #61703 def constructor(): super("Boo Explorer") self.AddAccelGroup(_accelGroup) self.Maximize() self.DeleteEvent += OnDelete DocumentOutlineProcessor.SetUp(_documentOutline) _notebookOutline.AppendPage(CreateScrolled(_documentOutline), Label("Document Outline")) _notebookOutline.AppendPage(CreateScrolled(CreateFileChooser()), Label("File System")) _notebookHelpers.AppendPage(CreateScrolled(_output), Label("Output")) vbox = VBox(false, 2) vbox.PackStart(CreateMenuBar(), false, false, 0) editPanel = VPaned() editPanel.Pack1(_notebookEditors, true, true) editPanel.Pack2(_notebookHelpers, false, true) mainPanel = HPaned() mainPanel.Pack2(_notebookOutline, false, false) mainPanel.Pack1(editPanel, true, true) vbox.PackStart(mainPanel, true, true, 0) vbox.PackStart(_status, false, false, 0) self.Add(vbox) self.NewDocument() Timeout.Add(3s.TotalMilliseconds, self.UpdateDocumentOutline) private def CreateScrolled(widget): sw = ScrolledWindow() sw.Add(widget) return sw private def AppendEditor(editor as BooEditor): pageIndex = _notebookEditors.AppendPage(editor, Label(editor.Label)) page = _notebookEditors.GetNthPage(pageIndex) editor.LabelChanged += def(): _notebookEditors.SetTabLabelText(page, editor.Label) _editors.Add(editor) editor.ShowAll() _notebookEditors.CurrentPage = _notebookEditors.NPages-1 def NewDocument(): self.AppendEditor(editor=BooEditor()) return editor def OpenDocument(fname as string): fname = System.IO.Path.GetFullPath(fname) i = 0 for editor as BooEditor in _editors: if fname == editor.FileName: _notebookEditors.CurrentPage = i return ++i editor = BooEditor() editor.Open(fname) self.AppendEditor(editor) return editor private def CreateMenuBar(): mb = MenuBar() file = Menu() file.Append(ImageMenuItem(Stock.New, _accelGroup, Activated: _menuItemNew_Activated)) file.Append(ImageMenuItem(Stock.Open, _accelGroup, Activated: _menuItemOpen_Activated)) file.Append(ImageMenuItem(Stock.Save, _accelGroup, Activated: _menuItemSave_Activated)) file.Append(SeparatorMenuItem()) file.Append(ImageMenuItem(Stock.Quit, _accelGroup, Activated: _menuItemExit_Activated)) edit = Menu() edit.Append(ImageMenuItem(Stock.Undo, _accelGroup, Activated: _menuItemUndo_Activated)) edit.Append(ImageMenuItem(Stock.Redo, _accelGroup, Activated: _menuItemRedo_Activated)) edit.Append(SeparatorMenuItem()) edit.Append(ImageMenuItem(Stock.Cut, _accelGroup, Activated: _menuItemCut_Activated)) edit.Append(ImageMenuItem(Stock.Copy, _accelGroup, Activated: _menuItemCopy_Activated)) edit.Append(ImageMenuItem(Stock.Paste, _accelGroup, Activated: _menuItemPaste_Activated)) edit.Append(ImageMenuItem(Stock.Delete, _accelGroup, Activated: _menuItemDelete_Activated)) edit.Append(SeparatorMenuItem()) edit.Append(ImageMenuItem(Stock.Preferences, _accelGroup)) tools = Menu() tools.Append(mi=ImageMenuItem(Stock.Execute, _accelGroup, Activated: _menuItemExecute_Activated)) mi.AddAccelerator("activate", _accelGroup, AccelKey(Gdk.Key.F5, Enum.ToObject(Gdk.ModifierType, 0), AccelFlags.Visible)) tools.Append(miExpand=MenuItem("Expand", Activated: _menuItemExpand_Activated)) miExpand.AddAccelerator("activate", _accelGroup, AccelKey(Gdk.Key.E, Gdk.ModifierType.ControlMask, AccelFlags.Visible)) documents = Menu() documents.Append(ImageMenuItem(Stock.Close, _accelGroup, Activated: _menuItemClose_Activated)) mb.Append(MenuItem("_File", Submenu: file)) mb.Append(MenuItem("_Edit", Submenu: edit)) mb.Append(MenuItem("_Tools", Submenu: tools)) mb.Append(MenuItem("_Documents", Submenu: documents)) return mb CurrentEditor as BooEditor: get: // can't do the simpler: // editor as BooEditor = _notebookEditors.CurrentPageWidget // because of gtk# bug #61703 return _editors[_notebookEditors.CurrentPage] if len(_editors) def AppendOutput(text as string): target = _outputBuffer.EndIter _outputBuffer.Insert(target, text) def DisplayErrors(errors as CompilerErrorCollection): self.AppendOutput(errors.ToString(true)) if (len(errors)) private def GetClipboard(): return Clipboard.Get(Gdk.Selection.Clipboard) CurrentBuffer: get: return null if CurrentEditor is null return CurrentEditor.Buffer private def _menuItemClose_Activated(): page = _notebookEditors.CurrentPage _notebookEditors.RemovePage(page) _editors.RemoveAt(page) private def _menuItemCut_Activated(): CurrentBuffer.CutClipboard(GetClipboard(), true) private def _menuItemCopy_Activated(): CurrentBuffer.CopyClipboard(GetClipboard()) private def _menuItemPaste_Activated(): CurrentBuffer.PasteClipboard(GetClipboard()) private def _menuItemDelete_Activated(): pass private def _menuItemExecute_Activated(): _outputBuffer.Clear() self.AppendOutput("${_outputBuffer.Text}****** Compiling ${CurrentEditor.Label} *******\n") compiler = CreateCompiler(Boo.Lang.Compiler.Pipelines.Run()) compiler.Parameters.Input.Add(StringInput(CurrentEditor.Label, CurrentEditor.Buffer.Text)) start = date.Now try: using console=Boo.Lang.Interpreter.ConsoleCapture(): result = compiler.Run() self.DisplayErrors(result.Errors) self.AppendOutput(console.ToString()) except x: self.AppendOutput(x.ToString()) self.AppendOutput("Complete in ${date.Now-start}.") private def _menuItemNew_Activated(): self.NewDocument() private def _menuItemOpen_Activated(): fs = FileChooserDialog("Open file", self, FileChooserAction.Open, (,)) SetUpFileChooser(fs) fs.Run() fs.Hide() private def CreateFileChooser(): fs = FileChooserWidget(FileChooserAction.Open) SetUpFileChooser(fs) return fs private def SetUpFileChooser(fs as FileChooser): filter = FileFilter(Name: "Boo Files (*.boo)") filter.AddPattern("*.boo") fs.AddFilter(filter) filter = FileFilter(Name: "All Files (*.*)") filter.AddPattern("*.*") fs.AddFilter(filter) fs.FileActivated += def(): self.OpenDocument(fs.Filename) self.UpdateDocumentOutline() private def UpdateDocumentOutline(): try: DocumentOutlineProcessor(_documentOutline, CurrentEditor).Update() except ignored: pass return true // to match Gdk.Function signature private def _menuItemExpand_Activated(): editor = CurrentEditor compiler = CreateCompiler(Boo.Lang.Compiler.Pipelines.CompileToBoo()) compiler.Parameters.OutputWriter = StringWriter() compiler.Parameters.Input.Add(StringInput(editor.Label, editor.Buffer.Text)) result = compiler.Run() self.DisplayErrors(result.Errors) unless len(result.Errors): NewDocument().Buffer.Text = compiler.Parameters.OutputWriter.ToString() private def CreateCompiler(pipeline): compiler = BooCompiler() compiler.Parameters.Pipeline = pipeline compiler.Parameters.References.Add(System.Reflection.Assembly.LoadWithPartialName("nunit.framework")) return compiler private def _menuItemSave_Activated(): editor = CurrentEditor fname = editor.FileName if fname is null: fs = FileSelection("Save As", SelectMultiple: false) if cast(int, ResponseType.Ok) != fs.Run(): return fs.Hide() fname = fs.Selections[0] editor.SaveAs(fname) _notebookEditors.SetTabLabelText(editor, editor.Label) private def _menuItemUndo_Activated(): CurrentEditor.Undo() private def _menuItemRedo_Activated(): CurrentEditor.Redo() private def _menuItemExit_Activated(): Application.Quit() def OnDelete(sender, args as DeleteEventArgs): Application.Quit() args.RetVal = true nant-0.9.5~git20110729.r1.202a430/extras/boox/booxg/booxg.boo000066400000000000000000000021021161462365500226460ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import Gtk from "gtk-sharp" import GtkSourceView from "gtksourceview-sharp" import Gdk from "gdk-sharp" import Pango from "pango-sharp" Application.Init() MainWindow().ShowAll() Application.Run() nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/000077500000000000000000000000001161462365500210545ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/default.build000066400000000000000000000107631161462365500235300ustar00rootroot00000000000000 import System.IO import System.Resources import System.Windows.Forms from System.Windows.Forms import System.Drawing from System.Drawing def MapPath(path): return Path.Combine(Project.BaseDirectory, path) def CreateImageList(): imageList = ImageList() images = ( "namespace.png", "class.png", "interface.png", "enum.png", "field.png", "property.png", "method.png", "internalclass.png", "internalinterface.png", "internalenum.png", "internalfield.png", "internalproperty.png", "internalmethod.png", "protectedclass.png", "protectedinterface.png", "protectedenum.png", "protectedfield.png", "protectedproperty.png", "protectedmethod.png", "privateclass.png", "privateinterface.png", "privateenum.png", "privatefield.png", "privateproperty.png", "privatemethod.png", "event.png" ) for image in images: fname = MapPath("../resources/ClassBrowserIcons/${image}") imageList.Images.Add(Image.FromFile(fname)) return imageList using writer=ResourceWriter(MapPath("../build/BooExplorer.DocumentOutline.resources")): using imageList=CreateImageList(): writer.AddResource("_imageList", imageList.ImageStream) using writer=ResourceWriter(MapPath("../build/BooExplorer.MainForm.resources")): writer.AddResource("_icon", Icon(MapPath("../resources/boo.ico"))) nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/scripts/000077500000000000000000000000001161462365500225435ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/scripts/AutomaticClose.int000077500000000000000000000021411161462365500261740ustar00rootroot00000000000000import BooExplorer class CompleteParentesis(ITextInterceptor): Name as string: get: return "Complete Parentesis" def Process(ch as System.Char, manipulator as TextManipulator) as bool: if ch == "("[0]: manipulator.Insert(")") return false return true class CompleteSquareBracket(ITextInterceptor): Name as string: get: return "Complete Bracket" def Process(ch as System.Char, manipulator as TextManipulator) as bool: if ch == "["[0]: manipulator.Insert("]") return false return true class CompleteBracket(ITextInterceptor): Name as string: get: return "Complete Bracket" def Process(ch as System.Char, manipulator as TextManipulator) as bool: if ch == "{"[0]: manipulator.Insert("}") return false return true class CompleteString(ITextInterceptor): Name as string: get: return "Complete String" def Process(ch as System.Char, manipulator as TextManipulator) as bool: if ch == "'"[0]: manipulator.Insert("'") return false if ch == '"'[0]: manipulator.Insert('"') return false return true nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/000077500000000000000000000000001161462365500216435ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/AssemblyInfo.boo000066400000000000000000000043501161462365500247410ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Rodrigo B. de Oliveira nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endregion import System.Reflection import System.Runtime.CompilerServices import System.Security.Permissions [assembly: AssemblyTitle("Boo Explorer")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("")] [assembly: AssemblyProduct("boo - an extensible programming language for the CLI")] [assembly: AssemblyCopyright("(C) 2003-2007 Rodrigo Barreto de Oliveira")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] [assembly: AssemblyVersion("2.0.9.5")] [assembly: ReflectionPermission(SecurityAction.RequestMinimum, ReflectionEmit: true, TypeInformation: true)] nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/BooEditor.boo000066400000000000000000000222721161462365500242370ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document import ICSharpCode.TextEditor.Actions import WeifenLuo.WinFormsUI import System import System.IO import System.ComponentModel import System.Windows.Forms import System.Drawing import Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Ast import BooExplorer.Common class WaitCursor(IDisposable): _saved _form as Form def constructor([required] form as Form): _form = form _saved = form.Cursor form.Cursor = Cursors.WaitCursor def Dispose(): _form.Cursor = _saved class BooEditor(DockContent): _editor as BooxTextAreaControl [getter(Main)] _main as MainForm [getter(FileName)] _fname as string [getter(IsDirty)] _dirty = false _moduleDirty = true [getter(Module)] _module as Module _compiler as BooCompiler def constructor(main as MainForm): _main = main settings = main.Settings _editor = BooxTextAreaControl(Dock: DockStyle.Fill, Font: settings.TextFont, EnableFolding: settings.EnableFolding, ShowLineNumbers: settings.ShowLineNumbers, ShowSpaces: settings.ShowSpaces, ShowTabs: settings.ShowTabs, ShowEOLMarkers: settings.ShowEOLMarkers, IndentStyle: settings.IndentStyle) _editor.Editor = self _editor.Encoding = System.Text.Encoding.UTF8 _editor.Document.FormattingStrategy = BooFormattingStrategy() _editor.Document.HighlightingStrategy = GetBooHighlighting() _editor.Document.DocumentChanged += _editor_DocumentChanged _editor.Document.FoldingManager.FoldingStrategy = BooFoldingStrategy.Instance SuspendLayout() Controls.Add(_editor) self.HideOnClose = false self.DockableAreas = DockAreas.Document self.Text = GetSafeFileName() self.DockPadding.All = 1 self.Menu = CreateMenu() ResumeLayout(false) TextArea: get: return _editor.ActiveTextAreaControl.TextArea TextContent: get: return _editor.Document.TextContent set: _editor.Document.TextContent = value def GoTo(line as int): document = _editor.Document segment = document.GetLineSegment(line) wsLen = /\s*/.Match(document.GetText(segment)).Groups[0].Length _editor.ActiveTextAreaControl.JumpTo(line, wsLen) self.TextArea.Focus() self.TextArea.Select() def Save(): if _fname: _editor.SaveFile(_fname) ClearDirtyFlag() else: SaveAs() def SaveAs(): dlg = SaveFileDialog(AddExtension: true, DefaultExt: ".boo", OverwritePrompt: true, Filter: "boo files (*.boo)|*.boo") if DialogResult.OK == dlg.ShowDialog(): _editor.SaveFile(dlg.FileName) _fname = dlg.FileName ClearDirtyFlag() def Open([required] fname as string): _editor.LoadFile(fname) _fname = fname ClearDirtyFlag() def ClearDirtyFlag(): _dirty = false self.Text = _fname def _editor_DocumentChanged(sender, args as DocumentEventArgs): _moduleDirty = true if not _dirty: self.Text = "${GetSafeFileName()} (modified)" _dirty = true def _menuItemUndo_Click(sender, args as EventArgs): _editor.Undo() def _menuItemRedo_Click(sender, args as EventArgs): _editor.Redo() def _menuItemSplit_Click(sender, args as EventArgs): _editor.Split() def _menuItemRemoveTrailingWS_Click(sender, args as EventArgs): RemoveTrailingWS().Execute(TextArea) def _menuItemCut_Click(sender, args as EventArgs): Cut().Execute(TextArea) def _menuItemCopy_Click(sender, args as EventArgs): Copy().Execute(TextArea) def _menuItemPaste_Click(sender, args as EventArgs): Paste().Execute(TextArea) def _menuItemGoTo_Click(sender, args as EventArgs): dlg = PromptDialog(Text: GetSafeFileName(), Message: "Line number: ") if DialogResult.OK == dlg.ShowDialog(): GoTo(int.Parse(dlg.Value)-1) def _menuItemRun_Click(sender, args as EventArgs): using WaitCursor(self): Run() def _menuItemExpand_Click(sender, args as EventArgs): using WaitCursor(self): Expand() private def Expand(): _main.Expand(self.GetSafeFileName(), self.TextContent) private def Run(): if _compiler is null: _compiler = BooCompiler() // enable duck typing _compiler.Parameters.Ducky = _main.Settings.Ducky _compiler.Parameters.Pipeline = CompileToMemory() _compiler.Parameters.References.Add(typeof(Form).Assembly) _compiler.Parameters.References.Add(typeof(System.Drawing.Size).Assembly) _compiler.Parameters.References.Add(System.Reflection.Assembly.GetExecutingAssembly()) _compiler.Parameters.References.Add(typeof(Boo.Lang.Extensions.MacroMacro).Assembly) _compiler.Parameters.Input.Add(StringInput(GetSafeFileName(), self.TextContent)) try: using console=Boo.Lang.Interpreter.ConsoleCapture(): started = date.Now result = _compiler.Run() finished = date.Now _main.StatusText = "Compilation finished in ${finished-started} with ${len(result.Errors)} error(s)." _main.UpdateTaskList(result.Errors) if len(result.Errors): UpdateOutputPane(result.Errors.ToString(true)) else: AppDomain_AssemblyResolve = def (sender, args as ResolveEventArgs): name = GetSimpleName(args.Name) compiledName = GetSimpleName(result.GeneratedAssembly.FullName) return result.GeneratedAssembly if name == compiledName current = AppDomain.CurrentDomain try: current.AssemblyResolve += AppDomain_AssemblyResolve result.GeneratedAssembly.EntryPoint.Invoke(null, (null,)) except x: print(x) ensure: current.AssemblyResolve -= AppDomain_AssemblyResolve UpdateOutputPane(console.ToString()) ensure: _compiler.Parameters.Input.Clear() private def GetSimpleName(name as string): return /,\s*/.Split(name)[0] def UpdateOutputPane(text as string): _main.OutputPane.SetBuildText(text) _main.ShowOutputPane() if len(text) def UpdateFoldings(fname as string, parseInformation): _editor.Document.FoldingManager.UpdateFoldings(fname, parseInformation) def UpdateModule(): return unless _moduleDirty fname = GetSafeFileName() code = self.TextContent try: _module = _main.ParseString(fname, code).Modules[0] _moduleDirty = false UpdateFoldings(fname, _module) except x: print(x) override protected def OnClosing(args as CancelEventArgs): super(args) return if args.Cancel or (not _dirty) or _main.IsQuitting result = MessageBox.Show("Save changes to ${GetSafeFileName()}?", "File not saved", MessageBoxButtons.YesNoCancel) if DialogResult.Yes == result: Save() if result in (DialogResult.Yes, DialogResult.No): _dirty = false args.Cancel = false else: args.Cancel = true def GetSafeFileName(): return _fname if _fname return "untitled.boo" def CreateMenu(): menu = MainMenu() edit = MenuItem(Text: "&Edit", MergeOrder: 1) edit.MenuItems.AddRange( ( MenuItem("&Undo", Click: _menuItemUndo_Click, Shortcut: Shortcut.CtrlZ), MenuItem("&Redo", Click: _menuItemRedo_Click, Shortcut: Shortcut.CtrlY), MenuItem("-"), MenuItem("Cu&t", Click: _menuItemCut_Click, Shortcut: Shortcut.CtrlX), MenuItem("&Copy", Click: _menuItemCopy_Click, Shortcut: Shortcut.CtrlC), MenuItem("&Paste", Click: _menuItemPaste_Click, Shortcut: Shortcut.CtrlV), MenuItem("-"), MenuItem("&Go to line...", Shortcut: Shortcut.CtrlG, Click: _menuItemGoTo_Click), MenuItem("&Split", Click: _menuItemSplit_Click), MenuItem("Remove trailing whitespace", Click: _menuItemRemoveTrailingWS_Click) )) tools = MenuItem(Text: "&Tools", MergeOrder: 2, MergeType: MenuMerge.MergeItems) tools.MenuItems.AddRange( ( MenuItem(Text: "Run", Click: _menuItemRun_Click, Shortcut: Shortcut.F5), MenuItem(Text: "Expand", Click: _menuItemExpand_Click, Shortcut: Shortcut.CtrlE), MenuItem(Text: "-") )) menu.MenuItems.AddRange((edit, tools)) return menu def GetBooHighlighting(): return HighlightingManager.Manager.FindHighlighter("Boo") override protected def GetPersistString(): return "BooEditor|${GetSafeFileName()}" nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/BooFoldingStrategy.boo000066400000000000000000000057111161462365500261150ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document import ICSharpCode.TextEditor.Actions import System.Collections import Boo.Lang.Compiler.Ast class BooFoldingStrategy(IFoldingStrategy): [getter(Instance)] static _instance = BooFoldingStrategy() class FoldingVisitor(DepthFirstVisitor): [getter(Markers)] _markers = ArrayList() _document as IDocument def constructor(document): _document = document override def LeaveConstructor(node as Constructor): LeaveMethod(node) override def LeaveMethod(node as Method): AddMarker(node.LexicalInfo, node.Body.EndSourceLocation, FoldType.MemberBody) override def LeaveProperty(node as Property): end as SourceLocation if node.Getter is not null: end = node.Getter.Body.EndSourceLocation if node.Setter is not null: if end is not null: candidate = node.Setter.Body.EndSourceLocation if candidate.Line > end.Line: end = candidate AddMarker(node.LexicalInfo, end, FoldType.MemberBody) override def LeaveClassDefinition(node as ClassDefinition): LeaveTypeDefinition(node) override def LeaveInterfaceDefinition(node as InterfaceDefinition): LeaveTypeDefinition(node) override def LeaveEnumDefinition(node as EnumDefinition): LeaveTypeDefinition(node) def LeaveTypeDefinition(node as Node): AddMarker(node, FoldType.TypeBody) def AddMarker(node as Node, type as FoldType): start = node.LexicalInfo end = node.EndSourceLocation AddMarker(start, end, type) def AddMarker(start as SourceLocation, end as SourceLocation, type as FoldType): return unless start.IsValid and end.IsValid _markers.Add( FoldMarker(_document, start.Line-1, _document.GetLineSegment(start.Line-1).Length + 1, end.Line-1, _document.GetLineSegment(end.Line-1).Length, type)) def GenerateFoldMarkers(document as IDocument, fileName as string, parseInformation as object) as ArrayList: module as Module = parseInformation visitor = FoldingVisitor(document) visitor.Visit(module) return visitor.Markers nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/BooFormattingStrategy.boo000066400000000000000000000030441161462365500266420ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document import ICSharpCode.TextEditor.Actions class BooFormattingStrategy(DefaultFormattingStrategy): override def SmartIndentLine(area as TextArea, line as int) as int: document = area.Document previousLine = document.GetLineSegment(line-1) if document.GetText(previousLine).EndsWith(":"): currentLine = document.GetLineSegment(line) indentation = GetIndentation(area, line-1) indentation += Tab.GetIndentationString(document) document.Replace(currentLine.Offset, currentLine.Length, indentation+document.GetText(currentLine)) return len(indentation) return super(area, line) nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/BooxSettings.boo000066400000000000000000000044161161462365500250010ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import System.Drawing import System.IO import System.ComponentModel import System.Xml.Serialization class BooxSettings: _textFont = System.Drawing.Font("Lucida Console", 11) [property(UseAntiAliasFont)] _useAntiAliasFont = true [property(ShowLineNumbers)] _showLineNumbers = true [property(ShowEOLMarkers)] _showEOLMarkers = true [property(ShowSpaces)] _showSpaces = true [property(ShowTabs)] _showTabs = true [property(EnableFolding)] _enableFolding = true [property(IndentStyle)] _indentStyle = ICSharpCode.TextEditor.Document.IndentStyle.Smart [property(LoadPlugins)] _loadPlugins = true [property(Ducky)] _ducky = false [XmlIgnore] TextFont: get: return _textFont set: assert value is not null _textFont = value [Browsable(false)] TextFontName: get: return FontConverter().ConvertToInvariantString(_textFont) set: _textFont = FontConverter().ConvertFromInvariantString(value) def Save(writer as TextWriter): XmlSerializer(BooxSettings).Serialize(writer, self) def Save(fname as string): using writer=StreamWriter(fname): Save(writer) static def Load(fname as string): using reader=File.OpenText(fname): return Load(reader) static def Load(reader as TextReader) as BooxSettings: return XmlSerializer(BooxSettings).Deserialize(reader) nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/BooxTextAreaControl.boo000077500000000000000000000042171161462365500262610ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document import ICSharpCode.TextEditor.Actions import ICSharpCode.TextEditor.Gui.CompletionWindow import BooExplorer.Common class BooxTextAreaControl(TextEditorControl): [property(Editor)] _editor as BooEditor _codeCompletionWindow as CodeCompletionWindow override def InitializeTextAreaControl(newControl as TextAreaControl): super(newControl) newControl.TextArea.KeyEventHandler += HandleKeyPress def HandleKeyPress(ch as System.Char) as bool: if _codeCompletionWindow is not null and not _codeCompletionWindow.IsDisposed: _codeCompletionWindow.ProcessKeyEvent(ch) if ch == "."[0]: CodeComplete(ch) return false for interceptor in Editor.Main.TextInterceptors: ret = interceptor.Process(ch, TextManipulator(self)) break unless ret return false def CodeComplete(ch as System.Char): _codeCompletionWindow = CodeCompletionWindow.ShowCompletionWindow( Editor.Main, self, Editor.GetSafeFileName(), CodeCompletionDataProvider(CodeCompletionHunter.GetCompletion(GetCompletionSource())), ch) private def GetCompletionSource(): newCaretOffset = self.ActiveTextAreaControl.TextArea.Caret.Offset return self.Document.TextContent.Insert(newCaretOffset, ".__codecomplete__") nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/CodeCompletionData.boo000077500000000000000000000065451161462365500260570ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document import ICSharpCode.TextEditor.Actions import ICSharpCode.TextEditor.Gui.CompletionWindow import System import BooExplorer.Common import Boo.Lang.Compiler.TypeSystem class CodeCompletionData(ICompletionData, IComparable): _description as string Description as string: get: if not _overloads: return _description else: return "${_description} (+${_overloads} overloads)" set: _description = value [getter(ImageIndex)] _imageIndex as int = 0 [getter(Text)] _text as (string) [property(Overloads)] _overloads as int def constructor(imageIndex as int, [required]text as string, [required]description as string): _imageIndex = imageIndex _text = (text,) _description = description def InsertAction(control as TextEditorControl): control.ActiveTextAreaControl.TextArea.InsertString(_text[0]) public def CompareTo(obj) as int: if obj is null or not obj isa CodeCompletionData: return -1 temp = obj as CodeCompletionData return _text[0].CompareTo(temp.Text[0]) class CodeCompletionDataProvider(ICompletionDataProvider): _codeCompletion as (IEntity) ImageList as System.Windows.Forms.ImageList: get: return BooxImageList.Instance PreSelection as string: get: return null def constructor(codeCompletion): _codeCompletion = codeCompletion def GenerateCompletionData(fileName as string, textArea as TextArea, charTyped as System.Char) as (ICompletionData): values = {} for item in _codeCompletion: continue if item.Name[:4] in ("add_", "remove_", "get_", "set_") if not "." in item.Name: if not values[item.Name]: values[item.Name] = CodeCompletionData(GetImageIndex(item), item.Name, item.ToString()) else: ++(values[item.Name] as CodeCompletionData).Overloads return array(ICompletionData, values.Values) def GetImageIndex(entity as IEntity): type = entity.EntityType if EntityType.Property == type: p as IProperty = entity return cast(int, TypeIcon.PublicProperty) if p.IsPublic return cast(int, TypeIcon.PrivateProperty) if EntityType.Field == type: f as IField = entity return cast(int, TypeIcon.PublicField) if f.IsPublic return cast(int, TypeIcon.PrivateField) if EntityType.Event == type: return cast(int, TypeIcon.PublicEvent) if type in (EntityType.Method, EntityType.Constructor): m as IMethod = entity return cast(int, TypeIcon.PublicMethod) if m.IsPublic return cast(int, TypeIcon.PrivateMethod) return 0 nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/CompileHelper.boo000077500000000000000000000041401161462365500250760ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import System.Text.RegularExpressions import Boo.Lang.Compiler import Boo.Lang.Compiler.IO import Boo.Lang.Compiler.Pipelines class CompiledScript: _ctx as CompilerContext Errors as CompilerErrorCollection: get: return _ctx.Errors def constructor(ctx as CompilerContext): _ctx = ctx def Execute(): return if len(_ctx.Errors) _ctx.GeneratedAssembly.EntryPoint.Invoke(null, (null,)) def GetType(typeName as string): return null if len(_ctx.Errors) return _ctx.GeneratedAssembly.GetType(typeName) def GetTypes(match as string): return null if len(_ctx.Errors) return [t for t in _ctx.GeneratedAssembly.GetTypes() if t.Name =~ Regex(match)] def GetTypes(): return null if len(_ctx.Errors) return _ctx.GeneratedAssembly.GetTypes() class ScriptCompiler: static def CompileFile([required] fileName as string) as CompiledScript: compiler = BooCompiler() compiler.Parameters.Input.Add(FileInput(fileName)) compiler.Parameters.Pipeline = CompileToMemory() compiler.Parameters.References.Add(System.Reflection.Assembly.GetExecutingAssembly()) compiler.Parameters.OutputType = CompilerOutputType.Library return CompiledScript(compiler.Run()) nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/DocumentOutline.boo000066400000000000000000000171001161462365500254610ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import WeifenLuo.WinFormsUI import System import System.IO import System.Windows.Forms import System.Drawing import Boo.Lang.Compiler.Ast enum TypeIcon: Namespace PublicClass PublicInterface PublicEnum PublicField PublicProperty PublicMethod InternalClass InternalInterface InternalEnum InternalField InternalProperty InternalMethod ProtectedClass ProtectedInterface ProtectedEnum ProtectedField ProtectedProperty ProtectedMethod PrivateClass PrivateInterface PrivateEnum PrivateField PrivateProperty PrivateMethod PublicEvent class TypeIconChooser: static def GetPropertyIcon(node as Property, inInterface as bool) as int: return cast(int, TypeIcon.PublicProperty) if inInterface if node.IsVisibilitySet: if node.IsInternal: return cast(int, TypeIcon.InternalProperty) if node.IsProtected: return cast(int, TypeIcon.ProtectedProperty) if node.IsPrivate: return cast(int, TypeIcon.PrivateProperty) return cast(int, TypeIcon.PublicProperty) static def GetFieldIcon(node as Field) as int: if node.IsVisibilitySet: if node.IsInternal: return cast(int, TypeIcon.InternalField) if node.IsPublic: return cast(int, TypeIcon.PublicField) if node.IsPrivate: return cast(int, TypeIcon.PrivateField) return cast(int, TypeIcon.ProtectedField) class BooxImageList: static _imageList as ImageList static Instance as ImageList: get: if _imageList is null: _imageList = ImageList() _imageList.ImageStream = System.Resources.ResourceManager(DocumentOutline).GetObject("_imageList") return _imageList class DocumentOutline(DockContent): _activeDocument as BooEditor _tree as TreeView _treeViewVisitor as TreeViewVisitor _timer = Timer(Tick: _timer_Tick, Interval: 3s.TotalMilliseconds) _module as Module def constructor(): InitTreeView() _treeViewVisitor = TreeViewVisitor(_tree) SuspendLayout() Controls.Add(_tree) self.DockableAreas = ( DockAreas.Float | DockAreas.DockLeft | DockAreas.DockRight) self.ClientSize = System.Drawing.Size(295, 347) self.DockPadding.Bottom = 2 self.DockPadding.Top = 26 self.ShowHint = DockState.DockRight; self.Text = "Document Outline" self.HideOnClose = true ResumeLayout(false) def InitTreeView(): _tree = TreeView(Dock: DockStyle.Fill, DoubleClick: _tree_DoubleClick, ImageIndex: cast(int, TypeIcon.Namespace), SelectedImageIndex: cast(int, TypeIcon.Namespace), ImageList: BooxImageList.Instance, Sorted: true) ActiveDocument as BooEditor: set: _activeDocument = value _timer.Enabled = value is not null Update() if self.Visible def Update(): if _activeDocument is null: _tree.Nodes.Clear() else: _activeDocument.UpdateModule() UpdateTree(_activeDocument.Module) def UpdateTree(module as Module): if module is not _module: _module = module _treeViewVisitor.Visit(_module) def GoToNode([required] treeNode as TreeNode): return unless _activeDocument node as Node = treeNode.Tag if node is not null: info = node.LexicalInfo _activeDocument.GoTo(info.Line-1) def _timer_Tick(sender, args as EventArgs): Update() def _tree_DoubleClick(sender, args as EventArgs): if (treeNode = _tree.SelectedNode) is not null: GoToNode(treeNode) override protected def GetPersistString(): return "DocumentOutline|" class TreeViewVisitor(DepthFirstVisitor): _tree as TreeView _current as TreeNode _inInterface as bool def constructor(tree): _tree = tree override def OnModule(node as Module): _current = TreeNode("root") VisitCollection(node.Members) _tree.BeginUpdate() //state = SaveTreeViewState() if len(_current.Nodes) _tree.Nodes.Clear() if len(_current.Nodes): _tree.Nodes.AddRange(array(TreeNode, _current.Nodes)) _tree.ExpandAll() //RestoreTreeViewState(state) if len(state) _tree.EndUpdate() override def OnProperty(node as Property): Add(node.Name, TypeIconChooser.GetPropertyIcon(node, _inInterface), node) override def OnField(node as Field): Add(node.Name, TypeIconChooser.GetFieldIcon(node), node) override def OnEvent(node as Event): Add(node.Name, cast(int, TypeIcon.PublicEvent), node) override def OnInterfaceDefinition(node as InterfaceDefinition): OnTypeDefinition(node, cast(int, TypeIcon.PublicInterface)) override def OnClassDefinition(node as ClassDefinition): OnTypeDefinition(node, cast(int, TypeIcon.PublicClass)) override def OnEnumDefinition(node as EnumDefinition): OnTypeDefinition(node, cast(int, TypeIcon.PublicEnum)) override def OnEnumMember(node as EnumMember): Add(node.Name, cast(int, TypeIcon.PublicField), node) def OnTypeDefinition(node as TypeDefinition, imageIndex as int): _inInterface = node isa InterfaceDefinition saved = _current _current = Add(node.Name, imageIndex, imageIndex, node) VisitCollection(node.Members) _current = saved override def OnConstructor(node as Constructor): OnMethod(node) override def OnMethod(node as Method): name = "${node.Name}(${join([p.Name for p as ParameterDeclaration in node.Parameters], ', ')})" Add(name, cast(int, TypeIcon.PublicMethod), node) def Add(text as string, data): node = _current.Nodes.Add(text) node.Tag = data return node def Add(text as string, imageIndex as int, data): Add(text, imageIndex, imageIndex, data) def Add(text as string, imageIndex as int, selectedImageIndex as int, data): node = _current.Nodes.Add(text) node.Tag = data node.ImageIndex = imageIndex node.SelectedImageIndex = selectedImageIndex return node def SaveTreeViewState(): return SaveTreeViewState([], _tree.Nodes) def SaveTreeViewState(state as List, nodes as TreeNodeCollection): for node as TreeNode in nodes: SaveTreeViewState(state, node) return state def SaveTreeViewState(state as List, node as TreeNode): if len(node.Nodes): state.Add((node.FullPath, node.IsExpanded)) SaveTreeViewState(state, node.Nodes) def RestoreTreeViewState(state): for fullpath as string, expanded as bool in state: if not expanded: node = SelectNode(fullpath) node.Collapse() if node def SelectNode(fullpath as string): parts = /\//.Split(fullpath) nodes = _tree.Nodes for part in parts: node = SelectNode(nodes, part) break if node is null nodes = node.Nodes return node def SelectNode(nodes as TreeNodeCollection, text as string): for node as TreeNode in nodes: if node.Text == text: return node return null nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/ITextInterceptor.boo000077500000000000000000000020241161462365500256210ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer interface ITextInterceptor: Name as string: get def Process(ch as System.Char, manipulator as TextManipulator) as bool nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/InteractiveConsole.boo000066400000000000000000000026411161462365500261470ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion """ Interactive Forms-based Console """ namespace BooExplorer import System import System.Windows.Forms import System.Drawing import WeifenLuo.WinFormsUI class InteractiveConsole(DockContent): def constructor(mainForm as MainForm): self.Text = "Interactive Console" self.DockPadding.Top = 2 self.Controls.Add(console=booish.gui.InteractiveInterpreterControl(Font: mainForm.Settings.TextFont)) console.Interpreter.SetValue("MainForm", mainForm) override def GetPersistString(): return "InteractiveConsole|" nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/MainForm.boo000066400000000000000000000271251161462365500240630ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import System import System.Environment import System.IO import System.ComponentModel import System.Windows.Forms import System.Drawing import WeifenLuo.WinFormsUI import Boo.Lang.Compiler import Boo.Lang.Compiler.Pipelines import Boo.Lang.Compiler.Ast import Boo.Lang.Compiler.IO class MainForm(Form): _dockPanel as DockPanel _status as StatusBar _statusPanel1 as StatusBarPanel _timer as Timer [property(IsQuitting)] _isQuitting = false [getter(Settings)] _settings as BooxSettings = LoadSettings() [getter(DocumentOutline)] _documentOutline = BooExplorer.DocumentOutline() [getter(TaskList)] _taskList = BooExplorer.TaskList(self) [getter(OutputPane)] _outputPane = BooExplorer.OutputPane() [getter(InteractiveConsole)] _interactiveConsole = BooExplorer.InteractiveConsole(self) [getter(TextInterceptors)] _textInterceptors as (ITextInterceptor) _argv as (string) _menuItemClose as MenuItem _menuItemSave as MenuItem _menuItemSaveAs as MenuItem _parser = BooCompiler() _resourceManager = System.Resources.ResourceManager(MainForm) _container = System.ComponentModel.Container() def constructor(argv as (string)): _argv = argv _dockPanel = DockPanel(Dock: DockStyle.Fill, ActiveAutoHideContent: null, TabIndex: 1, ActiveDocumentChanged: _dockPanel_ActiveDocumentChanged) _statusPanel1 = StatusBarPanel(AutoSize: StatusBarPanelAutoSize.Contents) _status = StatusBar(ShowPanels: true, TabIndex: 2) _status.Panels.Add(_statusPanel1) _parser.Parameters.Pipeline = Boo.Lang.Compiler.Pipelines.Parse() (_parser.Parameters.Pipeline[0] as duck).TabSize = 1 SuspendLayout() self.Icon = _resourceManager.GetObject("_icon") self.Size = System.Drawing.Size(800, 600) self.Menu = CreateMainMenu() self.Text = "Boo Explorer" self.IsMdiContainer = true _container.Add(_interactiveConsole) _container.Add(_documentOutline) _container.Add(_taskList) _container.Add(_outputPane) _container.Add(_dockPanel) _container.Add(_status) Controls.AddRange(( _dockPanel, _status)) ResumeLayout(false) LoadInterceptors() _timer = Timer(Tick: _timer_Tick, Interval: 50ms.TotalMilliseconds) _timer.Enabled = true override def Dispose(flag as bool): SaveDockState() super(flag) private def GetSettingsFileName(): return Path.Combine(GetApplicationDataFolder(), "settings.xml") private def SaveSettings(): _settings.Save(GetSettingsFileName()) private def LoadSettings(): fname = GetSettingsFileName() if File.Exists(fname): return BooxSettings.Load(fname) return BooxSettings() private def CreateMainMenu(): menu = MainMenu() file = MenuItem(Text: "&File", MergeOrder: 0) file.MenuItems.Add(MenuItem(Text: "&Open...", Click: _menuItemOpen_Click, Shortcut: Shortcut.CtrlO)) file.MenuItems.Add(MenuItem(Text: "&New", Click: _menuItemNew_Click, Shortcut: Shortcut.CtrlN)) file.MenuItems.Add(MenuItem("-")) file.MenuItems.Add(_menuItemSave = MenuItem(Text: "&Save", Enabled: false, Click: _menuItemSave_Click, Shortcut: Shortcut.CtrlS)) file.MenuItems.Add(_menuItemSaveAs = MenuItem(Text: "S&ave as...", Enabled: false, Click: _menuItemSaveAs_Click)) file.MenuItems.Add(_menuItemClose = MenuItem(Text: "&Close", Click: _menuItemClose_Click, Shortcut: Shortcut.CtrlW, Enabled: false)) file.MenuItems.Add(MenuItem("-")) file.MenuItems.Add(MenuItem(Text: "E&xit", Shortcut: Shortcut.CtrlQ, Click: _menuItemExit_Click)) tools = MenuItem(Text: "&Tools", MergeOrder: 2, MergeType: MenuMerge.MergeItems) tools.MenuItems.Add(MenuItem(Text: "&Options", Shortcut: Shortcut.CtrlO, Click: _menuItemOptions_Click, MergeOrder: int.MaxValue)) view = MenuItem(Text: "&View", MergeOrder: 4) view.MenuItems.AddRange( ( MenuItem(Text: "Document Outline", Click: _menuItemDocumentOutline_Click, Shortcut: Shortcut.CtrlShiftD), MenuItem(Text: "Task List", Click: _menuItemTaskList_Click, Shortcut: Shortcut.CtrlShiftT), MenuItem(Text: "Output", Click: _menuItemOutputPane_Click, Shortcut: Shortcut.CtrlShiftO), MenuItem(Text: "Prompt", Click: ShowPrompt, Shortcut: Shortcut.CtrlShiftP) )) menu.MenuItems.AddRange((file, tools, view)) return menu def _timer_Tick(sender, args as EventArgs): _timer.Enabled = false if File.Exists(GetDockStateXmlFileName()): LoadDockState() OpenDocuments(_argv) else: if len(_argv): OpenDocuments(_argv) else: NewDocument() ShowDocumentOutline() StatusText as string: set: _statusPanel1.Text = value def ParseString(fname as string, code as string): try: _parser.Parameters.Input.Add(StringInput(fname, code)) return _parser.Run().CompileUnit ensure: _parser.Parameters.Input.Clear() private def LoadInterceptors(): tempInterceptors = [] if _settings.LoadPlugins: for file in Directory.GetFiles(MapPath("scripts"), "*.int"): interceptors = LoadInterceptorsFromFile(file) tempInterceptors = tempInterceptors + interceptors if interceptors _textInterceptors = array(ITextInterceptor, tempInterceptors) StatusText = "Loaded ${len(_textInterceptors)} TextInterceptor(s)" def MapPath(path as string): return Path.Combine(Path.GetDirectoryName(Application.ExecutablePath), path) def LoadInterceptorsFromFile(fileName as string): script = ScriptCompiler.CompileFile(fileName) if len(script.Errors): for error in script.Errors: print("Compiler error: ${error}") return null retTypes = script.GetTypes() return null unless retTypes return [cast(ITextInterceptor, retType()) for retType in retTypes if retType() isa ITextInterceptor] def Expand(fname as string, code as string): compiler = BooCompiler() compiler.Parameters.OutputWriter = StringWriter() compiler.Parameters.Pipeline = CompileToBoo() compiler.Parameters.Input.Add(StringInput(fname, code)) result = compiler.Run() self.UpdateTaskList(result.Errors) NewDocument().TextContent = compiler.Parameters.OutputWriter.ToString() def ShowDocumentOutline(): ShowContent(_documentOutline) private def ShowContent(content as DockContent): content.Show(_dockPanel) if DockState.Unknown == content.DockState: content.Pane.DockState = content.ShowHint def NewDocument(): editor = BooEditor(self) editor.Show(_dockPanel) editor.TextArea.Focus() return editor def OpenDocuments([required] fnames): for fname in fnames: try: OpenDocument(fname) except x: print(x) def OpenDocument([required] filename as string): filename = Path.GetFullPath(filename) content = FindEditor(filename) if content is null: editor = CreateEditor(filename) editor.Show(_dockPanel) editor.TextArea.Focus() return editor else: content.Show(_dockPanel) content.TextArea.Focus() return content private def CreateEditor([required] fname as string): editor = BooEditor(self) editor.Open(fname) return editor def _dockPanel_ActiveDocumentChanged(sender, args as EventArgs): document = _dockPanel.ActiveDocument editor = document as BooEditor _documentOutline.ActiveDocument = editor _menuItemClose.Enabled = document is not null _menuItemSaveAs.Enabled = _menuItemSave.Enabled = document is not null def _menuItemSaveAs_Click(sender, args as EventArgs): cast(BooEditor, _dockPanel.ActiveDocument).SaveAs() def _menuItemSave_Click(sender, args as EventArgs): cast(BooEditor, _dockPanel.ActiveDocument).Save() def _menuItemExit_Click(sender, args as EventArgs): self.Close() def _menuItemClose_Click(sender, args as EventArgs): _dockPanel.ActiveDocument.Close() def _menuItemDocumentOutline_Click(sender, args as EventArgs): ShowDocumentOutline() def _menuItemTaskList_Click(sender, args as EventArgs): ShowTaskList() def UpdateTaskList(errors as CompilerErrorCollection): _taskList.Update(errors) ShowTaskList() if len(errors) def ShowTaskList(): ShowContent(_taskList) def ShowOutputPane(): ShowContent(_outputPane) def ShowPrompt(): if _interactiveConsole.IsDisposed: _interactiveConsole = BooExplorer.InteractiveConsole(self) ShowContent(_interactiveConsole) def _menuItemOutputPane_Click(sender, args as EventArgs): ShowOutputPane() def _menuItemOptions_Click(): using dlg = Form(Text: "Options"): dlg.Controls.Add(PropertyGrid( Dock: DockStyle.Fill, SelectedObject: _settings, Font: Font, PropertySort: PropertySort.Alphabetical)) dlg.ShowDialog() SaveSettings() def _menuItemOpen_Click(sender, args as EventArgs): using dlg = OpenFileDialog( Filter: "boo files (*.boo)|*.boo|All files (*.*)|*.*", Multiselect: true): if DialogResult.OK == dlg.ShowDialog(self): for fname in dlg.FileNames: OpenDocument(fname) def _menuItemNew_Click(sender, args as EventArgs): NewDocument() def GetApplicationDataFolder(): folder = Application.UserAppDataPath Directory.CreateDirectory(folder) unless Directory.Exists(folder) return folder def GetDockStateXmlFileName(): return Path.Combine(GetApplicationDataFolder(), "dockstate.xml") def SaveDockState(): _dockPanel.SaveAsXml(GetDockStateXmlFileName()) def LoadDockState(): _dockPanel.LoadFromXml(GetDockStateXmlFileName(), OnDeserializeDockContent) def OnDeserializeDockContent(persistString as string) as DockContent: type, content = /\|/.Split(persistString) print("type: ${type}, content: ${content}") if "DocumentOutline" == type: return _documentOutline if "InteractiveConsole" == type: return _interactiveConsole if "TaskList" == type: return _taskList if "OutputPane" == type: return _outputPane if "BooEditor" == type: editor = BooEditor(self) editor.Open(content) if File.Exists(content) return editor raise ArgumentException("Invalid persistence string: ${persistString}") override protected def OnClosing(args as CancelEventArgs): super(args) if not _isQuitting and not args.Cancel: dirtyDocuments = [ editor.GetSafeFileName() for document in _dockPanel.Documents if (editor=(document as BooEditor)) and editor.IsDirty ] return unless len(dirtyDocuments) args.Cancel = ( DialogResult.Yes != MessageBox.Show("The following files were modified:\n\n\t" + join(dirtyDocuments, "\n\t") + "\n\nAre you sure you want to leave and lose all your changes?", "Boo Explorer", MessageBoxButtons.YesNo)) _isQuitting = not args.Cancel def FindEditor(fname as string): for document in _dockPanel.Documents: editor = document as BooEditor if editor and editor.FileName == fname: return editor return null nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/OutputPane.boo000066400000000000000000000033431161462365500244530ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import WeifenLuo.WinFormsUI import System import System.Windows.Forms import System.Drawing class OutputPane(DockContent): _richBox as RichTextBox def constructor(): _richBox = RichTextBox(Dock: DockStyle.Fill, Multiline: true, ReadOnly: true, Font: System.Drawing.Font("Lucida Console", 10)) SuspendLayout() Controls.Add(_richBox) self.HideOnClose = true self.DockableAreas = ( DockAreas.Float | DockAreas.DockBottom | DockAreas.DockTop | DockAreas.DockLeft | DockAreas.DockRight) self.ClientSize = System.Drawing.Size(295, 347) self.ShowHint = DockState.DockBottom self.Text = "Output" ResumeLayout(false) def SetBuildText(text as string): _richBox.Text = text override protected def GetPersistString(): return "OutputPane|" nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/PromptDialog.boo000066400000000000000000000037061161462365500247530ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import System.Windows.Forms import System.Drawing import System class PromptDialog(Form): _value as TextBox _message as Label def constructor(): _message = Label(Location: Point(2, 2), Size: System.Drawing.Size(200, 18)) _value = TextBox( Location: Point(2, 20), Size: System.Drawing.Size(290, 18)) ok = Button(Text: "OK", Location: Point(50, 45), DialogResult: DialogResult.OK) cancel = Button(Text: "Cancel", Location: Point(150, 45), DialogResult: DialogResult.Cancel) SuspendLayout() self.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedDialog self.StartPosition = FormStartPosition.CenterParent self.Size = System.Drawing.Size(300, 100) self.AcceptButton = ok self.CancelButton = cancel Controls.Add(_message) Controls.Add(_value) Controls.Add(ok) Controls.Add(cancel) ResumeLayout(false) Message as string: set: _message.Text = value Value: get: return _value.Text set: _value.Text = value nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/TaskList.boo000066400000000000000000000056601161462365500241110ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import WeifenLuo.WinFormsUI import System import System.IO import System.Windows.Forms import System.Drawing import Boo.Lang.Compiler class TaskList(DockContent): _list as ListView _main as MainForm def constructor(main as MainForm): _main = main _list = ListView(Dock: DockStyle.Fill, View: View.Details, FullRowSelect: true, GridLines: true, Click: _list_Click) _list.Columns.Add("line" , 50, HorizontalAlignment.Left) _list.Columns.Add("column" , 50, HorizontalAlignment.Left) _list.Columns.Add("code" , 75, HorizontalAlignment.Left) _list.Columns.Add("description", 500, HorizontalAlignment.Left) _list.Columns.Add("module" , 150, HorizontalAlignment.Left) SuspendLayout() Controls.Add(_list) self.HideOnClose = true self.AllowRedocking = true self.DockableAreas = ( DockAreas.Float | DockAreas.DockBottom | DockAreas.DockTop | DockAreas.DockLeft | DockAreas.DockRight) self.ClientSize = System.Drawing.Size(295, 347) self.ShowHint = DockState.DockBottom self.Text = "Task List" ResumeLayout(false) def Clear(): _list.Items.Clear() def Update(errors as CompilerErrorCollection): _list.BeginUpdate() _list.Items.Clear() try: for error in errors: self.AddCompilerError(error) ensure: _list.EndUpdate() def AddCompilerError(error as CompilerError): item = _list.Items.Add(error.LexicalInfo.Line.ToString()) item.SubItems.AddRange(( error.LexicalInfo.Column.ToString(), error.Code, error.Message, error.LexicalInfo.FileName)) item.Tag = error def _list_Click(sender, args as EventArgs): selected = _list.SelectedItems return unless len(selected) > 0 error as CompilerError = selected[0].Tag fname = error.LexicalInfo.FileName if File.Exists(fname): document as BooEditor = _main.OpenDocument(fname) document.GoTo(error.LexicalInfo.Line-1) override protected def GetPersistString(): return "TaskList|" nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/TextManipulator.boo000077500000000000000000000030771161462365500255160ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document import ICSharpCode.TextEditor.Actions import System.Windows.Forms class TextManipulator: _editor as TextEditorControl def constructor(editor as TextEditorControl): _editor = editor def Insert(nextText as string): newCaretOffset = _editor.ActiveTextAreaControl.TextArea.Caret.Offset _editor.Document.Insert(newCaretOffset, nextText) def GetWordBeforeCaret() as string: start = TextUtilities.FindPrevWordStart(_editor.Document, _editor.ActiveTextAreaControl.TextArea.Caret.Offset); return _editor.Document.GetText(start, _editor.ActiveTextAreaControl.TextArea.Caret.Offset - start); nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/boox.boo000066400000000000000000000025541161462365500233210ustar00rootroot00000000000000#region license // Copyright (c) 2004, Rodrigo B. de Oliveira (rbo@acm.org) // All rights reserved. // // This file is part of Boo Explorer. // // Boo Explorer is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Boo Explorer is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Foobar; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #endregion namespace BooExplorer import System import System.Reflection import System.IO import System.Windows.Forms import ICSharpCode.TextEditor import ICSharpCode.TextEditor.Document def GetAssemblyFolder(): return Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location) [STAThread] def Main(argv as (string)): HighlightingManager.Manager.AddSyntaxModeFileProvider( FileSyntaxModeProvider(GetAssemblyFolder())) try: Application.Run(MainForm(argv)) except x: print x nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/booxw.cmbx000066400000000000000000000025401161462365500236550ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/src/booxw.prjx000066400000000000000000000076301161462365500237140ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/boox/booxw/todo.txt000066400000000000000000000024461161462365500225700ustar00rootroot00000000000000done * treeview para navegação do fonte done * sincronizar editor com a árvore automaticamente done * save done * save as... done * F5 executa o código done * avisar sobre arquivo modificado ao fechar janela e permitir salvá-lo done * avisar sobre arquivos modificados na saída e permitir salvá-los done * go to line/undo/redo/cut/copy/paste/etc done * imagens para as classes, campos e propriedades do document outline done * melhorar posicionamento do Document Outline: vai para o primeiro caracter não branco da linha done * saída do programa em um console done * lembrar a última configuração do editor (janelas abertas e posições) * projeto * arquivos * referências * recursos * compilar projeto (sem geração de codigo) em background cada vez que um arquivo for salvo para exibir as mensagens de erro adequadas (estilo eclipse) * listview para erros (automaticamente mostra os erros cada vez que o arquivo é salvo) * ícones nas tabs "Document Outline", "Task List" e "Output Pane" * opções (lembradas entre execuções) * executar código em um outro AppDomain para permitir descarga de código * mostrar tabs e espaços * mostrar caracter fim de linha * fonte * toolbar * class viewer do projeto inteiro * habilitar menus de edição somente quando for possível executar cada ação nant-0.9.5~git20110729.r1.202a430/extras/boox/default.build000066400000000000000000000053601161462365500223670ustar00rootroot00000000000000 import System.IO import System.Resources import Gdk from "gdk-sharp" import Gtk from "gtk-sharp" def MapPath(path): return Path.Combine(Project.BaseDirectory, path) def GetBytes(fname as string): using stream=File.OpenRead(fname): buffer = array(byte, stream.Length) stream.Read(buffer, 0, stream.Length) return buffer using writer=ResourceWriter(MapPath("build/BooExplorer.resources")): for fname in Directory.GetFiles(MapPath("resources/ClassBrowserIcons"), "*.png"): print Path.GetFileNameWithoutExtension(fname) writer.AddResource(Path.GetFileNameWithoutExtension(fname), GetBytes(fname)) nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/000077500000000000000000000000001161462365500217305ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/000077500000000000000000000000001161462365500253355ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/InternalClass.png000066400000000000000000000015611161462365500306100ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb?2^?> @(5k\5  $6b;wte@kmo0[ 9Eg.\ -'ϟ|}/3/(y*6R`CqtLL 0]p!ׯ ,@ ^>^YI)^!!Ά a >1ŋiw QQA|b<Ό L\B۷Y]!", j&fw ou@ 15 c8|4 f49H{z`JcZ0 з GL`0qePyϋ' cx' wtfΘҒiJOgϚ@,:x3öc}9kw53l=Y7ofX3`be*`5HIENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/InternalEnum.png000066400000000000000000000014651161462365500304520ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb,ӓ @``p@ç`7^2h8LeWb 1<*d+e*k/n-X`Yd𝡱Cߢdv!  cô ?~ t 7 B`o?m P30@!w3ȩ2L>;7ϟ 8~g3X-@/n؏ 'w -g~cƿ ( XA5hO_3ιӀ iU 086l$ 7Lc@ @`4 Z, m ~=W_K!5 z~ ) o"t1yqiU dG RcPni?~<pnZK30 kUւ5-WĦMhoTTw|4a{B b`[A. П?p‘%L^P'?/0߷ @,9x3öc灩0ë}x@12"|^0 $;IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/InternalField.png000066400000000000000000000014251161462365500305650ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb??@l(kN_p5+Å[l ^ @yLK OAr 2s`xw^130UB)O  gxۯ 30|+~l)?~F ,,zA L /]AO/e`f`xȾ@L L fz  %3ja\a@e`z:3 g`O &3*cd@/ B i?kO6@1;`LN )>4 lkˉ~S;`01?w.#cRf`x_gB3HW tt2{0x(3893°p&`L?o>#7'(A hWpnZϟ2b%_p_`D -0nDA.bȒ &Ρ Ҳ 3yu ^$h@,9x3öc}9kw5b<()w0dPfIENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/InternalInterface.png000066400000000000000000000014071161462365500314420ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_F}IDATxb?% ({_3xAJ\a˞@{GO1wK[S-^FMe)bb`c psWn3031;swb* ` q݇O3ć1'z1X233,^e*@1AIN{π\d+P34^N^^^9)A>^irB ?[@, Σ ?efxgvw1H0<~Ygß?;Oo{Ǐ .V &zJӐCgYO] #!W53d0)O  rSa-ޑ}S/14DB@0ͦ.! o?"@[DA.) )AJ8GL`0zA]Zxp~:g4 X@d Ҳ`/yu ێg0ë}L !XXXS/ ;fa$IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/InternalProperty.png000066400000000000000000000016751161462365500313750ustar00rootroot00000000000000PNG  IHDRa pHYs  gAMA|Q cHRMz%u0`:o_F3IDATxb,?# c #> $ @9FF7ހ(6 ’\ bS 5bu 3`d WvY @1uC P_s6Ex-7 R\,62lɻ xdžY !@e&ā 7 o=& @,@0H`A 1՘0䶞gȍadx fe*3/@t0Wgehlma(7dhx;aSH 23'2b4xI{ _9vy?vSaש?@|P|a0I )cNqCR)K=pmX3;0ϟX!÷/_^/f WTw ' F`a`aaʀ k@<RIENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/PrivateClass.png000066400000000000000000000016301161462365500304430ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb?2^?> @(5k\5  $6b;wte@kmo0[ 9Eg.\ -'ϟ|}/3/(y*6R`CqtLL 0]p!ׯ ,@ ^>^YI)^!!Ά a >1~ CӬ  ӗ_N0Xh8 b{5Eq(FFFF3gax#쥫^]\.3XͶn@1HS_T 뷟 ~OGL˛V=01]l30_ 1ן ~_?~ ODg``xiJ qirZvT`j 4~O+ zz3c Ӧl @6+*3 0)r ;7ϟ 8~g3X-@/n؏ 'w -g~cƿ ( X5_xf-~૳Я 0w=>@_?fHe6 ,.pk_3(ɋ1.VghvUai h߿0l{a akw3 ,`% ٌ =?bǯ?@`*,U ;v1|`gCl 30s) j@+J00` ts}O?2ZȐvnZe-XI { X>} !F3gz/ß? 5tE bz y`@o?~ 5e3 W>5vL`'$*&_ @1];ϟ?Q`k޻>A a2B'MMIENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/PrivateField.png000066400000000000000000000014771161462365500304320ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb??@l(kN_p5+Å[l ^ @yLK OAr 2s`xw^130UB)O  gxۯ 30|+~l)?~F ,,zA L /]AO/e`f`xȾ@L L fz YLUl%Vl9p#G@^X3 c;_Dy 48YN $pa`{?d3aؼ CLio}a;ïi /10<}߿20>~q6f 0|!1=̹Yvb#s22<{t:0^c{={<߸7G wfff >} rF( fb/6ce*Ȣ/f0 h$?l'/` 3XskO_2&2Gv?t*0ھ HޕӰ9h2 /50_IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/PrivateInterface.png000066400000000000000000000014151161462365500312770ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb?% ({_3xAJ\a˞@{GO1wK[S-^FMe)bb`c psWn3031;swb* ` q݇O3ć1'z1X233,^e*@1AIN{π\d+P34^N^^^9)A>^irB ?[@, A[U_  _>g "" `k֣$)&,.ab1㛦OswgN]zp3k3T&  w$+@ee}f:g/ư}3߿3T$2)M" @ iѫ;gl<ďi0>&@/r>($| L f-YðtR5#@,`gGHYsN (PT`4O]o?~L}knOpTǏ_  FP@dNOp|`{WO| FXvBtc4Qq;IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/PrivateMethod.png000066400000000000000000000014641161462365500306230ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb?18!?}}'?Df/;w3n_O4@4É7n?t=K=ؐge̐A C,cIfIbiS7122 廬 L 27cgca0`g`a`cx 9? ~Zpf6f`xv%X, |!"fd92.g`gfo`pBA O3/@0` $ @9FF7ހ(6 ’\ bS 5bu 3`d WvY @1uC P_s6Ex-7 R\,62lɻ xdžY !@e&ā 7 o=& @,@0H`A 1՘0䶞gȍadx fe*3/@t0Wgehlma(7dhx;aSH 23'2b48ym/_Or77Wi°Sb7éE=~w%y1{1{2LYRp'0<~ '~ev X~{0<7 f^qg?jӉg @8d_~\0yd ;v1| V0|_~<4@Y(o{?2Z_+a.`H O_~C3<* ""ih<P9##"y /x>1ӛ_}S Ao?~ Ҳ O'F  (_(1ן ?_?~1 @(5k\5  $6b;wte@kmo0[ 9Eg.\ -'ϟ|}/3/(y*6R`CqtLL 0]p!ׯ ,@ ^>^YI)^!!Ά a >1fH cf 4DPP lܐ24k0\@Lb g`~!:؝ç`1tttL_Xx1DQ]a{T Auck >=°j/؟}E`,1]f00m:-L?B #A| w3??S^Bx?1(b@ At GVd`N@(|zwGϊr_2nYX 4^|tT^ >g%IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/ProtectedEnum.png000066400000000000000000000014201161462365500306160ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb,ӓ @``p@ç`7^2h8LeWb 1<*d+e*k/n-X`Yd𝡱Cߢdv!  cô ?~ t 7 B`o?m P30@!w3ȩ2L>;7ϟ 8~g3X-@/n؏ 'w -g~cƿ ( XA5hO_3ιӀ iU 08AO;{p 7Lc@ @`:|ٌV-c8r3W+ԯ% ٌ =?bzрsR˜lվmnf` & a1_mE[Z0;!*p#sck >=°j/W` ̀4%)gW(gff '(0ucrY@h 5C' ݏ1 9 $9 C+@s 5IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/ProtectedField.png000066400000000000000000000014041161462365500307370ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FzIDATxb??@l(kN_p5+Å[l ^ @yLK OAr 2s`xw^130UB)O  gxۯ 30|+~l)?~F ,,zA L /]AO/e`f`xȾ@L L fz  %3ja\a@e`z:3 g`O &3aم_]}gFVgz{9YN $pa`{?d 3Gwolb+ 1DM*`dLN )߿20>~q6fڪ??I9fHLϐ?33Tt@Lb ھ!:؝ç,`YXXa|FnNm[6C@M%sLq@m…θ%h@JDkabANH 岀SqBI޽f]!4"@86$0@q!IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/ProtectedInterface.png000066400000000000000000000013611161462365500316160ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FgIDATxb?% ({_3xAJ\a˞@{GO1wK[S-^FMe)bb`c psWn3031;swb* ` q݇O3ć1'z1X233,^e*@1AIN{π\d+P34^N^^^9)A>^irB ?[@, Σ ?efxgvw1H0<~Ygß?;Oo{J$ٯ^cɜ9 >:| 0N !w2ټ$#TOy@J)K_ 3@AajkpMK2X9lb8mǻ=?NP"aff&@,b gu!:؝ ç2 \9nOlȶg\ 4 %50@ C؀( Ąlwz?a~4@(ΩM& `3V]&IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/ProtectedMethod.png000066400000000000000000000013751161462365500311430ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FsIDATxb?18!?}}'?Df/;w3n_O4@4É7n?t=K=ؐge̐A C,cIfIbiS7122 廬 L 27cgca0`g`a`cx 9? ~Zpf6f`xv%X, |!" dc¬]("ߘ6ʼBA O3/@0`_O/0^iRs nx8n2kO6@y`/##cÒKقO/6Nf6H/@1 d sR˜ o1?'?0/Vmf 3@190Ὸ?_  wdHzQ81\9nOJSIg\ 4 %50@ C݀A. Hw:޽f]!4"@86$0@IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/ProtectedProperty.png000066400000000000000000000017161161462365500315460ustar00rootroot00000000000000PNG  IHDRa pHYs  gAMA|Q cHRMz%u0`:o_FDIDATxb,?# c #> $ @9FF7ހ(6 ’\ bS 5bu 3`d WvY @1uC P_s6Ex-7 R\,62lɻ xdžY !@e&ā 7 o=& @,@0H`A 1՘0䶞gȍadx fe*3/@t0Wgehlma(7dhx;aSH 23'2b4xI{ _9vy?vSaש?@|POf063e8נSгaʒz+>18g`'~ev X~pt}5?ߑgOɓ|C~f3<6x CI6@19H_b0u2ܹ )a a蘰+0~~xdIBX@۷0D3`b ) >e  4i?;ÓdPQgaծ Nj2;0,L@匌1@, I 71k>́a.`\35?} h& @,ǰCvgH): lp?@(g`aazec@oY};Ā' 0\ N(b0@W_zuIENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/class.png000066400000000000000000000013351161462365500271520ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FSIDATxb?2^?> @(5k\5  $6b;wte@kmo0[ 9Eg.\ -'ϟ|}/3/(y*6R`CqtLL 0]p!ׯ ,@ ^>^YI)^!!Ά a >1;7ϟ 8~g3X-@/n؏ 'w -g~cƿ ( XA5hO_3ιӀ iU 086l$ 7Lc@ @`4 Z, m ~=W_K!5 z~ [뽯h!j#  ?f` f,sc'>#XSZ5įi`MK'3bqXXX2&@ȦB@@`!',IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/event.png000066400000000000000000000012001161462365500271550ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb?% X~~{)p\Fǿ^~ c`l &t}``w}1HJBn11\dfh[2bL=@a[ 6c5FGwLO6L=@aDO3:_5?1twpa "f6#O>4?P o> Fli  1>{7CޞY dDV @av{w04 lkk9,`X}oXP*Ƥ`ڼm+\3"XYA_Fx0̛Ɂ?zēB@@@`v>_IENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/interface.png000066400000000000000000000011421161462365500300010ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb?% ({_3xAJ\a˞@{GO1wK[S-^FMe)bb`c psWn3031;swb* ` q݇O3ć1'z1X233,^e*@1AIN{π\d+P34^N^^^9)A>^irB ?[@, Σ ?efxgvw1H0<~Ygß?;Oo{Ǐ .V &zJӐCgYO] #!W53d0)O  rSa-ޑ}S/1XEIENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/ClassBrowserIcons/method.png000066400000000000000000000011551161462365500273250ustar00rootroot00000000000000PNG  IHDRa pHYsss"gAMA|Q cHRMz%u0`:o_FIDATxb?18!?}}'?Df/;w3n_O4@4É7n?t=K=ؐge̐A C,cIfIbiS7122 廬 L 27cgca0`g`a`cx 9? ~Zpf6f`xv%X, |!" dc¬]("ߘ6ʼBA O3/@.`fp ӗB0gz?A!G 62oϟl"Wax ÒK?~dxs曟of4E%10 $ @9FF7ހ(6 ’\ bS 5bu 3`d WvY @1uC P_s6Ex-7 R\,62lɻ xdžY !@e&ā 7 o=& @,@0H`A 1՘0䶞gȍadx fe*3/@t0Wgehlma(7dhx;aSH 23'2b4xI{ _9vy?vSaש?@|P|a0I )cNqCR)K2H~g _!Td]U7P  wXM@E?ʳCf.ځ ~aa <<*no  z8"@,w>ĝ0 ̂?#;\;r $?d6`-+ؙIENDB`nant-0.9.5~git20110729.r1.202a430/extras/boox/resources/boo.ico000066400000000000000000000042761161462365500232140ustar00rootroot00000000000000 ( @aa߰--ׅJJؚrr<< $$}}PPގhhҒۨYYjj@@33++QQ͆CCuuJJ٢xx 88eeޟ77Ԗ00 ց׉11@@ޤ[[''ݭkk!!MM^^uu;;ڥffˀ ))یll{{..--1155::@@՘Ӕcchh>>٠ &&22BBυyyݰee  ""%%66ޮ99;;==??vv##&&((**,,22ۧAAڦ٣ZZdd !!""$$%%&&''߱ްޭxx4m.僺ԯcQY0mAW"䧸xjuۍUPP-qVPPbP̴~˵rPPPP!PPP!PPsPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP,PPPPPPPPPPPPPPPPPPPPPPPPPfHVPPPPPPPPPPPPPPPPPPPPPPPѢVPPPPPPPPPPPPP] \ĻPPPPPPPPPPPPPPPPPPn|˸PPPPPPPPPPPPPPPPPPPPVȻPPPPPPPPPPPPPPPP޾PPPPPPPPPPPPPPV#TwrPPPPPP֬๹N"/`Jnant-0.9.5~git20110729.r1.202a430/extras/common-properties.build000066400000000000000000000026561161462365500234630ustar00rootroot00000000000000 Invalid SharpDevelop application directory! ${sharpdevelop.dir} Please, create or edit your extras/build.properties file and include the full path to the SharpDevelop base directory in the property sharpdevelop.dir: <property name="sharpdevelop.dir" value="full-here" /> nant-0.9.5~git20110729.r1.202a430/extras/genchangelog.boo000066400000000000000000000052001161462365500220660ustar00rootroot00000000000000""" Usage: booi genchangelog.boo previousVersion nextVersion """ import System import System.Web from "System.Web.dll" import System.Collections.Generic import System.Text.RegularExpressions import Boo.Lang.PatternMatching highlights as (string) contributors = Dictionary[of string,string]() def GetName(ckey as string): match ckey: case "avish": return "Avishay Lavie" case "bamboo": return "Rodrigo B. De Oliveira" case "cedricv": return "Cedric Vivier" case "grunwald": return "Daniel Grunwald" case "neoeinstein": return "Marcus Griep" otherwise: return ckey def AuthorLink(ckey as string, name as string): return "${name}" def IssueLink(match as Match): return "${match}" def Item(match as Match): s = match.ToString() return if not s if s[0] != char(' '): #contributor ckey = s.Substring(0, s.IndexOf(char(' '))) name = s.Replace(ckey,GetName(ckey))[0:-1] contributors.Add(ckey, name) dl = ("" if match.Index else "") return "${dl}
${AuthorLink(ckey, name)}
" for h in highlights: if s.IndexOf(h) != -1: return "
${s}
" return "
${System.Web.HttpUtility.HtmlEncode(s)}
" args = Environment.GetCommandLineArgs() previous = (args[2] if len(args) > 2 else "") next = (args[3] if len(args) > 3 else "") highlights = (args[4:] if len(args) > 4 else (,)) shortlog = shell("git", "shortlog ${previous}..${next}") issue = /BOO-[0-9]+/ item = /.*/ print "" print "BOO: Changelog between ${previous} and ${next}" print "" print "" print "

Changelog between ${previous} and ${next}

" print "
" print "

Contributors to this release:

" print "

" shortlog = item.Replace(shortlog, Item) for c in contributors: print "${c.Value}" print "

" print "
" print issue.Replace(shortlog, IssueLink) print "
" nant-0.9.5~git20110729.r1.202a430/extras/gendarme.ignore000066400000000000000000000035711161462365500217440ustar00rootroot00000000000000# Gendarme ignore file # R: Gendarme.Rules.Performance.AvoidRepetitiveCastsRule M: System.Boolean Boo.Lang.Runtime.RuntimeServices::ToBool(System.Object) M: Boo.Lang.Compiler.TypeSystem.IEntity Boo.Lang.Compiler.Steps.MacroProcessing.MacroExpander::ResolveMacroName(Boo.Lang.Compiler.Ast.MacroStatement) M: System.Type Boo.Lang.Compiler.Steps.EmitAssembly::GetSystemType(Boo.Lang.Compiler.TypeSystem.IType) M: Boo.Lang.Compiler.Ast.Expression Boo.Lang.Compiler.Steps.InjectCallableConversions::Convert(Boo.Lang.Compiler.TypeSystem.IType,Boo.Lang.Compiler.Ast.Expression) M: System.Void Boo.Lang.Compiler.Steps.OptimizeIterationStatements::CheckForItemInRangeLoop(Boo.Lang.Compiler.Ast.ForStatement) M: System.Void Boo.Lang.Compiler.Steps.ProcessMethodBodies::PostProcessReferenceExpression(Boo.Lang.Compiler.Ast.ReferenceExpression) M: System.Boolean Boo.Lang.Compiler.Steps.ProcessMethodBodies::IsBeingAssignedTo(Boo.Lang.Compiler.Ast.MemberReferenceExpression) M: System.Int32 Boo.Lang.Compiler.TypeSystem.CallableResolutionService::CalculateArgumentScore(Boo.Lang.Compiler.TypeSystem.IParameter,Boo.Lang.Compiler.TypeSystem.IType,Boo.Lang.Compiler.Ast.Node) M: Boo.Lang.Compiler.TypeSystem.Core.GlobalNamespace Boo.Lang.Compiler.TypeSystem.Services.NameResolutionService::GetGlobalNamespace() M: Boo.Lang.Compiler.CompilerError Boo.Lang.Compiler.CompilerErrorFactory::ValueTypeParameterCannotUseDefaultAttribute(Boo.Lang.Compiler.Ast.Node,System.String) R: Gendarme.Rules.Performance.AvoidUnneededFieldInitializationRule M: System.Void Boo.Lang.Compiler.CompilerParameters::.ctor(Boo.Lang.Compiler.TypeSystem.Reflection.IReflectionTypeSystemProvider,System.Boolean) R: Gendarme.Rules.Design.EnsureSymmetryForOverloadedOperatorsRule T: Boo.Lang.List`1 T: Boo.Lang.List R: Gendarme.Rules.BadPractice.CheckNewExceptionWithoutThrowingRule M: System.Void Boo.Lang.Compiler.Ast.DepthFirstTransformer::.cctor() nant-0.9.5~git20110729.r1.202a430/extras/install-gtksourceview-mode000077500000000000000000000007771161462365500241770ustar00rootroot00000000000000#! /bin/bash cat >> /usr/share/mime-info/gtksourceview-sharp.keys <> /usr/share/mime-info/gtksourceview-sharp.mime < !!!!!! If you are making a deb for public distribution, please make sure you have set 'install.prefix' to '/usr', 'debug' property to false and 'optimize' property to true in default.build 'install.prefix' is currently set to ${install.prefix} 'debug' is currently set to ${debug} 'optimize' is currently set to ${optimize} 'install.nant.task' is currently set to ${install.nant.task} !!!!!! nant-0.9.5~git20110729.r1.202a430/extras/man/000077500000000000000000000000001161462365500175225ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/extras/man/booc.1000066400000000000000000000053221161462365500205300ustar00rootroot00000000000000.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BOOC 1 "maggio 31, 2005" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME booc \- BOO compiler .SH SYNOPSIS .B booc .RI [ options ] " files " ... .SH DESCRIPTION This manual page documents briefly .B booc, the compiler for the BOO language. .SH OPTIONS A summary of options is included below. .TP .B \-v, \-vv, \-vvv Compiles using various levels of verbosity. .TP .B \-r:refname Adds a reference where refname is the reference name. .TP .B \-pkg:pkgname Adds references described in the package pkgname, as returned by pkg-config. .TP .B \-o:outfile Place output in file outfile. .TP .B \-t:type Output file will be of given type. Available types are library (to create a \&.dll file) or exe and winexe (to create executable files.) .TP .B \-p:pipeline Adds the step pipeline to the compile process. .TP .B \-c:culture CultureInfo to use. .TP .B \-srcdir:dir Specifies where to look for source files. .TP .B \-resource:[,] Specify a resource file. .TP .B \-embedres:[,] Embed a file as resource. .TP .B \-debug Adds debug flags to your code. Good for non-production. (On by default) .TP .B \-debug- Does not add debug flags to your code. Good for production environment. .TP .B \-checked Arithmetic operations are checked and will throw an error if an overflow occurs. (On by default) .TP .B \-checked- Does not check arithmetic operations. Use this to get the best performance if you are confident that it won't affect negatively your code. .TP .B \-strict Turns on strict mode (off by default). This changes default visibility to private for all members (as in C#), requires explicitely declaring return type and parameter types of public methods (API) and enables a few warnings. .TP .B \-nowarn[:W1,Wn] Suppress all or a list of compiler warnings. .TP .B \-warnaserror[:W1,Wn] Treats all or a list of warnings as errors. .SH SEE ALSO .BR booi (1), .BR booish (1). .br .SH AUTHOR BOO was written by Rodrigo Barreto de Oliveira . .PP This manual page was written by Federico Di Gregorio , for the Debian project (but may be used by others). Updated by Cedric Vivier . nant-0.9.5~git20110729.r1.202a430/extras/man/booi.1000066400000000000000000000022461161462365500205400ustar00rootroot00000000000000.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BOOI 1 "maggio 31, 2005" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME booi \- BOO interpreter .SH SYNOPSIS .B booi .RI file .SH DESCRIPTION .B booi is the BOO interpreter. The .B booi command takes no parameters and the name of the file to be run as its only argument. .SH SEE ALSO .BR booc (1), .BR booish (1). .br .SH AUTHOR BOO was written by Rodrigo Barreto de Oliveira . .PP This manual page was written by Federico Di Gregorio , for the Debian project (but may be used by others). Updated by Cedric Vivier . nant-0.9.5~git20110729.r1.202a430/extras/man/booish.1000066400000000000000000000022361161462365500210720ustar00rootroot00000000000000.\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BOOISH 1 "maggio 31, 2005" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME booish \- BOO interactive shell .SH SYNOPSIS .B booish .SH DESCRIPTION .B booish is the BOO interactive shell. The command takes no parameters. Just invoke the shell and start coding in BOO. .SH SEE ALSO .BR booc (1), .BR booi (1). .br .SH AUTHOR BOO was written by Rodrigo Barreto de Oliveira . .PP This manual page was written by Federico Di Gregorio , for the Debian project (but may be used by others). Updated by Cedric Vivier . nant-0.9.5~git20110729.r1.202a430/extras/rules.xml000066400000000000000000000026111161462365500206230ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/extras/suse-install-gtksourceview-mode000077500000000000000000000010211161462365500251330ustar00rootroot00000000000000#! /bin/bash cat >> /opt/gnome/share/mime-info/gtksourceview-sharp.keys <> /opt/gnome/share/mime-info/gtksourceview-sharp.mime < @SUPPORTEDVERSIONS@ nant-0.9.5~git20110729.r1.202a430/il000077500000000000000000000001151161462365500157700ustar00rootroot00000000000000#!/bin/sh mono build/booc.exe -out:build/il.exe "$@" && monodis build/il.exe nant-0.9.5~git20110729.r1.202a430/il.bat000066400000000000000000000001431161462365500165330ustar00rootroot00000000000000@echo off build\booc.exe -out:build\il.exe %1 %2 %3 %4 %5 %6 %7 %8 %9 ildasm /text build\il.exe nant-0.9.5~git20110729.r1.202a430/lib/000077500000000000000000000000001161462365500162075ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/000077500000000000000000000000001161462365500177765ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/INSTALL.txt000066400000000000000000001025411161462365500216500ustar00rootroot00000000000000 A N T L R ====================================================================== Contents of this files is about i n s t a l l i n g ANTLR. If you want to get some general information please checkout file README.txt which is part of this package. You may als visit "doc/index.html" for further information as well as http://www.antlr.org in general. If you want to know about the very basic steps on u s i n g ANTLR then have a look into section ANTLR IS INSTALLED - WHAT'S NEXT? in file README.txt or in this file. You may also follow a simple mini-tutorial at http://www.antlr.org/article/cutpaste/index.html You may want to checkout also some target specific information on how to use ANTLR. Take a look at: doc/index.html - main entry for all documentation doc/runtime.html - when using JAVA doc/cpp-runtime.html - when using C++ doc/python-runtime.html - when using Python doc/csharp-runtime.html - when using C# If you don't have a precompiled version but rather a source code dis- tribution and you want to know some details on how to build and inst- all then you have come to the right place, PLEASE READ ON Otherwise please skip this document! ---------------------------------------------------------------------- TABLE OF CONTENTS * WHERE CAN I DOWNLOAD ANTLR? * REQUIREMENTS? * BUILD AND INSTALL? BRIEF VERSION! * BUILD AND INSTALL? LONG VERSION! * ANTLR IS INSTALLED - WHAT'S NEXT? * BUILD AND INSTALL ON WINDOWS NT/95/98/2000/XP? * FURTHER DETAILS ON ANTLR'S BUILD SETUP? * FAQ? * LIST OF CONFIGURED VARIABLES? * PLATFORMS? ______________________________________________________________________ WHERE CAN I DOWNLOAD ANTLR? ANTLR can be obtained from http://www.antlr.org either as * precompiled binary (RPM, MSI installer etc) * source distribution (tar + zipped) ______________________________________________________________________ REQUIREMENTS? To get most out of ANTLR you should build ANTLR on UNIX or UNIX like system. For example, you need a bourne shell and a couple of common UNIX tools like "grep", "mkdir", "cp", "rm", "sed", "cat" and "chmod". You need also a GNU make or compatible make. Such a ANTLR friendly eco-system is given on Windows by having CYGWIN or MSYS installed. An out-of-the-box Window system may will not work for installation. If you want to know on which systems ANTLR has been developed, which systems are known to work and known to fail then please move your cursor down near the end of this document. ______________________________________________________________________ BUILD AND INSTALL ANTLR? BRIEF VERSION! Here's a very brief summary of how to build and install ANTLR. $ cd $HOME $ gzip -d -c antlr-x.y.z.tar.gz | tar xvf - $ cd antlr-x.y.z $ ./configure --disable-examples $ make install Of course you can unpack ANTLR wherever you want on your machine - given enough space and permissions. For simplicuty, I'm just using $HOME and this may work for any user. ANTLR's source distribution contains lot's of examples. Making them can be quit longish. You may want to skip those examples if you are just interested in building the core libraries. Note that examples are not installed. Python users: Note that "make install" will not add antlr.py to your local Python installation. This needs to be done manually by you. To simplify this you may the script "pyantlr.sh". Checkout this script in directory "scripts" or, when installed, in directory "sbin". ______________________________________________________________________ BUILD AND INSTALL ANTLR? LONG VERSION! This section presents details on the build process. 1. Unpack ========= To build ANTLR you need to unpack ANTLR's source distribution. This will create a new directory named antlr-x.y.z in your current working directory. Typical unpacking steps are: $ cd $HOME ; $ gzip -d -c antlr-x.y.z.tar.gz | tar xvf - Here I'm unpacking ANTLR in my $HOME directory but you may unpack in any directory that suits you. If you have gnu tar, then you can use the simpler $ tar xvfz antlr-x.y.z.tar.gz to unpack the distro. For the rest of this document I will use ${srcdir} as a place holder to name the directory containing ANTLR sources. In the example given above, the following would be true: srcdir == $HOME/antlr-x.y.z 2. Build Directory ================== Create a build directory and change into that directory. A build dir- ectory can be any directory on your system. In the very brief version shown above I'm using the source directory -- ${srcdir} -- as build directory. However, the general way and also the better approach is to use a new, fresh and clean directory. If at any later stage some- thing goes wrong you can just remove the whole build directory and start from scratch again. You can also have ${srcdir} in read-only mode as no files are touched or changed with this approach. This is not the case if ${srcdir} and build directory are equal. $ mkdir -p $HOME/tmp/antlr $ cd $HOME/tmp/antlr 3. Configure ============ Building ANTLR is accomplished by the well known GNU autoconf/make approach. That is you run a shell script to create specialized files -- usually Makefiles -- for your particular environment. Within the build directory run configure like $ ${srcdir}/configure --prefix=${installdir} where ${srcdir} points to your package directory (see step 1). Option --prefix==${installdir} tells configure where you want to have ANTLR installed on your system. Using this option is not mandatory. If you are not using it then configure will use "/usr/local" as default installation directory. That is binaries will be installed in /usr/local/bin, libraries go into "/usr/local/lib" etc. Note that "configure" will search your $PATH for command line tools to compile Java, C++ and C# source code, execute Python etc. This behaviour of configure can be changed by using command line options and/or environment variables. To see a list of available configure options just run ${srcdir}/configure --help 4. Build ======== $ make This will (hopefully) build all needed ANTLR libraries within the "lib" directory. If something fails for your system, and you want to submit a bug report, you may wish to include your "config.status" file, your host type, operating system and compiler information, make output, and anything else you think will be helpful. If "make" complains about unknown rules etc. then it's usually because you are not running GNU make. Configure will search for a GNU make in $PATH and in some well known locations. If there's no GNU make on your system, then configure will report an error in step 3, otherwise the first suitable GNU make wins. However, if configure detects that "make" is not identical with the found one, configure will show a reminder message as last action. The reminder message will then tell you which make your are supposed to use. 5. Test ======= You may also wish to validate the correctness of the new ANTLR by running regression tests. It will makes you very confident if all test run through: $ make test However, please don't panic if a test fails. In most cases it is rather a problem of running the test itself than a problem with ANTLR. When sure about a bug let us know please. Be sure to include information on your system, the ANTLR version used and other relevant information. 6. Install ========== Depending on your ${installdir} directory you may need to have write permission to copy files. If you gained permissions required -- perhaps by becoming super user -- install ANTLR by $ make install Python users: Note that "make install" will NOT add antlr.py to your local Python installation. This needs to be done manually by you. To simplify this you may the script "pyantlr.sh". Checkout this script in directory "scripts" or, when installed, in directory "sbin". 7. Clean ======== Having installed ANTLR it is save to remove source and build directory. 8. Enjoy! ========= ______________________________________________________________________ ANTLR IS INSTALLED - WHAT'S NEXT? Please read "doc/getting-started.html" on what you are supposed to do. Here's a very brief summary for the impatient: ANTLR is a command line tool. To run ANTLR you need to have JAVA installed. The basic steps are: a. write a grammar file - mygrammar.g b. run ANTLR like $ CLASSPATH=/antlr.jar $ java antlr.Tool mygrammar.g c. write a driver program using source code generated by ANTLR, ie. Main.java, main.cpp, Main.cs or main.py d. link generated code, your driver code, ANTLR's core library and any additional library you are using together to get an executable f. run the executable on arbitrary input to be parsed For a set of standard examples have a look into directory "examples" and appropriate subdirectories. You may want to run make like $ make verbose=1 to see which compiler is used and which flags are passed etc. You may also follow a simple mini-tutorial at http://www.antlr.org/article/cutpaste/index.html if you are absolutly new to ANTLR. ______________________________________________________________________ BUILD AND INSTALL ON WINDOWS NT/95/98/2000/XP? There is no difference in building and installing Windows. However, you are need have either Cygwin or MSYS installed. We haven't tried MKS yet. If you run configure with Cygwin (or MSYS) then gcj will usually be chosen for compiling Java and gcc for compiling C++. In most cases however, C# will be automatically disabled as no compiler can be found. Configure is not looking up the registry to check for installed soft- ware. Instead, configure will just check the $PATH for known compiler names and also check some well know locations like "/usr/local/bin" etc. In order to make configure aware of a certain compiler or tool you need, make sure that your compiler or tool can be found by looking up $PATH. For example, Microsoft Visual C++ comes with a batch file named vcvars32.bat. Just run this batch file prior of running bash to have "cl" in your $PATH. Configure knows about this compiler names: bcc32 cl g++ for C++ jikes javac gcj for Java csc mcs cscc for C# python for Python The order in which the names appear is also the search order. That means that the whole $PATH gets searched for bcc32, then for cl and finally for g++. In other words, it is sufficient to have "cl" in $PATH to have it selected by configure, regardless whether there is g++ available or not. Similar, if you also have bcc32 in $PATH, then the Borland C++ Compiler will be choosen. If you have more that one compiler in your $PATH and the "wrong" compiler is selected - Do you have to give up on this? Not at all. In case you have more than one compiler/tool in your $PATH, you can tell configure which one you like to have. For example: --with-cxx=g++ This will favor g++ regardless whether there's a cl or bcc in PATH. You can archive the very same by CXX=g++ ${srcdir}/configure That is by using an environment variable. Try --help for the full list of --with-xxx options and environment variables. ______________________________________________________________________ DETAILS ON CONFIGURE? This section will present some further details on how you can tell configure to behave the way you want: 1. Choose Language ================== ANTLR is implemented in Java and has code generator plugins for Java, C++, C# and Python. The default behaviour of configure is to check whether a suitable compiler or tool for a particular language is available. If not, then configure will show a warning message and will automatically disable support for this language. In a very bizarre case it is therefore possible to end up with a configuration with no language support at all. Please study therefore configure's output whether you end up with the system you want. On the contrary, configure will enable, as mentioned, every target language with sufficient compiler/tool support. To speed up the build process you may also disable languages (and configure them later again). This can be done by command line options: --disable-cxx | --enable-cxx=no --> disable C++ --disable-java | --enable-java=no --> disable Java --disable-csharp | --enable-csharp=no --> disable C# --disable-python | --enable-python=no --> disable Python Be warned that when disabling Java you can't even build ANTLR itself. 2. Be Verbose ============= A typical 'make' run reports all kinds of actions exactly the way they get carried out. This makes the output of a make run hard to read and it's difficult to keep track "where" make is right now and what's going on. ANTLR's make run has been designed to be readable. By default make will report what's going on in a kind of logical way. For example, when compiling ANTLR itself you will see a message like *** compiling 209 Java file(s) This information is usually enough to keep track what's going on. If compilation fails, then the exact command line causing the problem will be shown and you can study the arguments whether additional flags are required etc. However, you can tell configure to be verbose as usual by --enable-verbose Having enabled verbosity, the command to compile ANTLR will be shown similar like this (using javac): CLASSPATH= /opt/jdk32/142_03/bin/javac \ -d . \ -sourcepath /home/geronimo/src/antlrmain \ -classpath /home/geronimo/obj/antlr-linux/lib/antlr.jar \ Version.java \ ANTLRParser.java \ ... [[skipped 206 files]] ... /home/geronimo/src/antlrmain/antlr/debug/misc/JTreeASTPanel.java You have used --enable-verbose and output is too much. Do I need to configure again? No. Just run make like $ make verbose=0 to turn off verbosity for this make run. You can also do this $ verbose=0 $ export verbose $ make to turn verbosity off without using arguments to make. Of course, you can also turn verbosity on (verbose=1) in the very same way, regardless of what you have configured earlier. 3. Debug Version ================ A typical open source configuration sets compiler options to contain debug information. ANTLR's approach is a bit different. We believe that you want to u s e ANTLR and not to d e b u g it. In other words, we believe you are voting for speed. Therefore configure will set compiler flags that go for speed and we omit all flags introducing some debug information. If you need to go for debug information, turn those flags on by using --enable-debug Similar to verbose discussed in the previous section you can override this configuration setting by using $ make debug=0 -- no debug information $ make debug=1 -- turn debugging on without the need to reconfigure. But be aware that --enable-debug is just changing flags given to your compiler. It will not change any names. For example, the name of ANTLR's core library is libantlr.a (using g++) regardless of whether debug is on or off. 4. Examples =========== You may leave out the examples just by --disable-examples Note that you can't undo this without reconfiguration. Nevertheless, we recommend to configure with examples, test them and to study them to get a full understanding on how ANTLR works and - last but not least - about all it's capabilities. 5. Bootstrap ============ ANTLR's parser engine (ie. the parser that accepts a grammar file) is written in ANTLR itself. From a logical point of view you would need ANTLR to build ANTLR. This chicken and egg problem is solved simply by having generated source files enclosed in the package. However, if you are doing some surgery on ANTLR's internals you need to have a existing ANTLR version around. You can tell configure about this external ANTLR version by --with-antlr-jar=${some-install-dir}/lib/antlr.jar or by --with-antlr-cmd=${some-tool-running-antlr} The former version will add given jar file to the $CLASSPATH when trying to compile grammar files ( *.g). Very similar you can also use --with-antlr-cmd to provide a program or shell script to compile grammar files. The name of the program does not matter but it needs to be exectuable and it should support all flags that can be given to ANTLR (check java antlr.Tools -h). NOTE: Bootstraping ANTLR with an external jar file or program does will only taken into account if there's no lib/antlr.jar available in the build directory. In other words, if you bootstraped once and you make changes later on ANTLR's internals, then the previously build jar file antlr.jar will be used - regardless of your configuration options. 6. Which Make? ============== You need a GNU make to run the build. This is especially true when building ANTLR on Windows. Nevertheless, some efforts has been done to lower the dependency on GNU make for portability reasons. But we are not done yet. If you have GNU make installed on your system but it's not in your PATH or you want to have a special version of make, you may tell this configure by either --with-make=${list-of-candidates} or by using environment variable $MAKE like MAKE=${list-of-candidates} ${srcdir}/configure Which variant you are using is a matter of your personal taste. But be aware that the command line argument is overriding the environment variable. Consider this example: MAKE=x-make ${srcdir}/configure --with-make="y-make z-make" Here configure will check your $PATH for y-make and z-make bug x-make is ignored. Note that the withespace seperated ${list-of-candidates} may also contain absolute path names. In that case $PATH is not consulted but the file is directly checked for existence. Here's an example: MAKE='make /usr/bin/make' ${srcdir}/configure Here your $PATH is consulted whether a "make" exsists. Then /usr/bin/make is checked for existence. The first make found which is then a GNU make is the one choose to be used to build ANTLR. For consistency reasons you may also use --with-makeflags or variable $MAKEFLAGS to pass specific flags to make. However, this information is not yet used. Right now the flags used are the flags provided when calling make. 7. Which Java? ============== ANTLR has been tested with SUN Java 1.4.x using either SUN's javac Java compiler, IBM's jikes or GNU's gcj. Other systems have not been tested and are not expected to work. The default search order is jikes javac gcj It is therefore sufficient to have "jikes" in your $PATH to get it choosen by configure - regardless whether it precedes a javac or not in $PATH. You may change this default search by providing a list of candidates by either --with-javac=${candidate-list} or by JAVAC=${candidate-list} The candidates should be seperated by whitespace and may be relative or absolute path names. Only in the former case a lookup in $PATH takes place. By default options passed to a Java compiler are those who are sufficient to compile ANTLR. You may pass other flags by either --with-javacflags=${flags} JAVACFLAGS=${flags} In most cases you want to leave the default flags intact but rather add your additional flags. To support this, ANTLR's build management interprets ${flags} like this: ${flags} STANDARD FLAGS RESULT --------------+-----------------+---------------------- + f1 f2 .. fn | F1 .. FN | F1 .. FN f1 f2 .. fn - f1 f2 .. fn | F1 .. FN | f1 f2 .. fn F1 .. FN = f1 f2 .. fn | F1 .. FN | f1 f2 .. fn f1 f2 .... fn | F1 .. F | f1 f2 .. fn --------------+-----------------+---------------------- In other words, you can either tell configure to append (+), to prepend (-) or to substitute(=) your flags. Note that this is also possible when running make. For example, $ make JAVAC=jikes JAVACFLAGS="+ -verbose" would use jikes to compile regardless of what has been configured before. Additionally the flag "-verbose" is used. So far we talked about compiling Java. Let's have now a look on how to execute class files. By default configure searches for java gij to run Java. As discussed before it is sufficient to have a Java in $PATH to get java selected. It has also to be noted that gij is still in experimental stage and not fully supported now. As before you may also provide additional flags to Java by using either --with-javaflags=${flags} or by using environment variable JAVAFLAGS=${flags} Again, ${flags} are getting interpreted according to table shown above. 8. Which C++ Compiler? ====================== The algorithm on how the C++ compiler is choosen and how to pass additional flags are very much the same as discussed before for the Java compiler. The default candidate list for choosing a C++ compiler is bcc32 cl g++ on Windows ; and aCC CC xlC xlC_r g++ cxx c++ on UNIX. To change this list use --with-cxx=${candidates} and use --with-cxxflags=${flags} to pass additional flags to the standard compiler flags. You can also use $CXX and $CXXFLAGS if you like. This will then also work when running make. 9. Which C# Compiler? ===================== The only candidate for C# as time of writing is csc for compiling C# source code. To change this option --with-csharpc=${candidates} and use --with-csharpcflags=${flags} for providing a list of additional options. Note that ${flags} are getting interpreted as discussed in the previous sections. Instead of command line arguments to configure you may also use $CSHARPC and $CSHARPCFLAGS. This variables are also working when running make. 10. Which Python? ================= Configure is searching for "python" in your $PATH and in some well known locations. If no "python" can be found, language support for Python is disabled. You may use --with-python="${candidates}" to provide a list of Python candidates and further you may use --with-pythonflags="${flags}" to provide a list of (additional) flags to be passed to Python on execution. Note that ${flags} is getting interpreted as discussed in the previous sections. You may also use $PYTHON and $PYTHONFLAGS instead. This variables are then also working when running make. Note that those variable may override what has been configured before without any warning. ______________________________________________________________________ FURTHER DETAILS ON ANTLR'S BUILD SETUP? In allmost all cases it is assumed to be sufficient to change compiler settings by using either --with-${lang}flags or by using ${LANG}FLAGS. It's most likely not sufficient if you want to support a new compiler, tool or a new operating system. The general idea is to have a configured shell script for each tool or compiler being used rather than doing the scripting within the Makefile itself. The reason for this is that scripting in Makefiles is rather hard to read and a pain to debug (for example, there's no line information). The configured shell scripts are to be found in subdirectory "scripts" within the build directory. Their originals are located in ${srcdir}/scripts This scripts are known at time of writing: antlr.sh.in - runs "java antlr.Tools" cxx.sh.in - run C++ compiler cpp.sh.in - run C/C++ preprocessor (for dependencies). jars.sh.in - how to make a static library (aka jar file)(Java) javac.sh.in - run Java compiler (also when using jikes etc) lib.sh.in - how to make a static library (C++) link.sh.in - how to make a executable (C++) python.sh.in - how to run python A Makefile contains nothing more than variables and rules and in all- most all cases (but simple exceptions) the command behind a rule is just executing a script. Note that these scripts are not intended to be used to compile C++, Java etc in general. Instead this scripts are specialized for ANTLR. For example, the script "lib.sh" has a very simple interface. All you can provide is a list of object files. There is no option to tell about the library name to be build. Instead the library name is set by configure and available by using @ANTLR_LIB@ in all configured files. Unfortunatly, all scripts are rather complex and limiting ourselfs of not using shell functions (portability) is not going make changes easier. In general you should only edit the upper part of any script till there's a line like "**NO CHANGE NECESSARY BELOW THIS LINE - EXPERTS ONLY**". Then let's have now a closer look at cxx.sh.in. The other scripts are very similar. I'm going to discuss here only the relevant parts - this is open source after all any you may try to understand it by reading the source code :-) ARGV="$*" In general all arguments given to the script file are collected with- in variable $ARGV. In some scripts, the very first argument has a special meaning - usually it tells about the target to be created. In such a case TARGET would hold the first argument given and ARGV would hold all others. if test -z "${CXX}" ; then CXX="@CXX@" cxx="@cxx@" else cxx="`basename $CXX`" cxx="`echo $cxx|sed 's,\..*$,,'`" fi This script snippet checks about the compiler to be used for compiling C++ source. Note that we have two variables, CXX and cxx. The former holds usually the absolute path of the compiler as configured. The later, cxx, contains the logical compiler name. The logical compiler name is "gcc" for GNU, "cl" for Microsoft C++ etc. etc. The logical compiler name is usually configured and available as @cxx@. However, a user overrides the configuration by using environment variable CXX, the logical compiler name is computed by removing any extension. In a further section (similar in other scripts) we set specific flags depending on the l o g i c a l compiler name. As you can see it is rather important to the logical name proper. case "${cxx}" in gcc) cxxflags="-felide-constructors -pipe" case "${DEBUG}" in 0) cxxflags="-O2 -DNDEBUG ${cxxflags}" ;; 1) cxxflags="-g ${cxxflags} -W -Wall" ;; esac ;; cl) cxxflags="/nologo -GX -GR" [[skipped]] esac In the snippet shown you can see the handling of "gcc" and "cl". Note that the compiler flags are saved in $cxxflags and not in $CXXFLAGS. Reason for this is that handling of environment variable $CXXFLAGS is rather longish due to ANTLR's special interpretation mechanism (as discussed in an earlier section). In some cases flags may depend on platform in use. In that case you may do something like: case ${cxx} in gcc) case @build_os@ in cygwin) ## cygwin specific flags .. ;; sparc) ## sparc specific .. ;; *) ## all others ;; esac .. esac Of course you can utilize here the full scripting power to set flags required to get compilation job done. ______________________________________________________________________ FAQ? 1. How to setup jikes boot classpath? ===================================== Jikes (http://www-124.ibm.com/developerworks/oss/jikes) is just a Java compiler that needs to know about Java's core classes, ie. rt.jar, for a successful build of ANTLR. By default configure tries to locate rt.jar by searching $PATH for a java executable and relative to where found, for ../jre/lib/rt.jar. If this search fails or if you want to pass a different Java core or further core libraries, you may use either option --with-bootclasspath="${args}" or environment variable BOOTCLASSPATH="${args}" The list of arguments, ${args}, is supposed to be list of whitespace seperated files or directories. Configure will validate that each argument exists and will fail otherwise. Besides this there are no further validations done - you are supposed to know what you are doing. Note also that configure will not perform any search in case a bootclasspath is given. The arguments given are concatenated using platform specific argument seperator (ie. ";" or ":") and passed to option --bootclasspath. 2. Can I just say "make" after having unpacked ANTLR? ===================================================== No - as explained previously you need to run "configure" before you can issue a "make". 3. How do I cleanup afterwards? =============================== You do a "make clean" to remove all object files. If you do a "make distclean", then all files generated by ANTLR are also removed. In this case you need a external antlr.jar somewhere to rebuild. If you are using a external build directory you may just remove the whole directory without any harm. 4. Is it safe to edit a Makefile? ================================= In general do not edit files named Makefile. Instead edit files named Makefile.in. Configure or config.status will override changes you made in a Makefile on any run. 5. I changed Makefile.in - what's next? ======================================= Just run make again. Each Makefile contains a rule that automatically remakes itself if corresponding Makefile.in has changed. This automatic rule works fine in general. However, if you change Makefile/Makefile.in in such a way that make rejects it's execution then you need to remake Makefile yourself. This can be done by $ cd ${builddir} $ ./config.status ${your-makefile} Here's an example. Assume that "antlr/Makefile" is corrupted. The do this: ./config.status antlr/Makefile You can also issue just a $ ./config.status In this case all configured files are regenerated. 6. My configure has changed - what's next? ============================================= You need to reconfigure each build directory to get the changes. So far no automatic rule has been implemented. The easiest way of being up-to-date is: $ cd ${builddir} && ./config.status --recheck && make 7. Where do I find antlr.jar? ============================= Have a look at "lib". Should be there - but it is subject of having enabled Java or not. Note that Java gets automatically disabled if there's no sufficient Java compiler found (warning message would be dumped in that case). 8. How can I make "make" nosiy? =============================== By default make just tells you what's going on in a very brief way. To change this just do this: $ make verbose=1 You can also tell configure to turn verbosity on by default by using option --enable-verbose. Run "configure --help" for a list of options available. 9. Am I able to run "make" in a subdirectory? ============================================= Sure. 10. Is it safe to remove configured subdirectory? ================================================= In general "yes" but you want to do this only within the build tree. For example, let's assume that you are tired of getting standard examples tested. Then just move on and remove subdirectory "examples". ______________________________________________________________________ LIST OF CONFIGURED VARIABLES? ANTLRFLAGS ANTLR_ACTION_FILES ANTLR_ANTLR_FILES ANTLR_COMPILE_CMD ANTLR_CYGWIN ANTLR_JAR ANTLR_LIB ANTLR_MINGW ANTLR_NET ANTLR_PY ANTLR_TOKDEF_FILES ANTLR_WIN32 ANTLR_WITH_ANTLR_CMD ANTLR_WITH_ANTLR_JAR AR ARFLAGS AS BOOTCLASSPATH CSHARPC CSHARPCFLAGS CSHARP_COMPILE_CMD CXX_COMPILE_CMD CXX_LIB_CMD CXX_LINK_CMD CYGPATH DEBUG EXEEXT JAR JARFLAGS JAR_CMD JAVA JAVAC JAVACFLAGS JAVAFLAGS JAVA_CMD JAVA_COMPILE_CMD LIBEXT MAKE OBJEXT]) PATCHLEVEL PYTHON PYTHONFLAGS SUBVERSION VERBOSE VERSION WITH_EXAMPLES abs_this_builddir cxx jar java javac ______________________________________________________________________ PLATFORMS? ANTLR has been developed and tested on platforms listed below. A platform is considered supported and tested if basically all standard examples are working. Devel for ANTLR 2.7.5 took place on: i686-linux2.6.3-7mk - Java 1.4.2, gcj 3.3.4, jikes 1.16, 1.17, 1.18, 1.19, 1.20, 1.21, 1.22 - gcc 3.3.2, gcc 3.3.4 - Python 2.3, 2.4 - DotGNU 0.6.0 i686-cygwin_nt-5.1 - Java 1.4.2, gcj 3.3.3, jikes 1.22 - gcc 3.3.3, bcc32 5.6 (Borland C++), cl 13.10.3077 (Microsoft C++) - csc 7.10.6001.4 (Visual C# .NET Compiler) - Python 2.2, 2.3, 2.4 - Mono 1.0.5 ANTLR has been tested on: MinGW-3.1.0.1 (needs manual install of cygpath!) - MSys 1.0.10 - Java 1.5.0-b64 - gcc 3.4.1 PowerPC or RS/6000: * powerpc-apple-darwin6.8 (MacOS 1o, "Jaguar") - Java 1.4.1, jikes 1.15 - gcc 3.1 - Python 2.2 - Mono (?) also reported to work. SPARC: * sparc-sun-solaris2.8 - Java 1.4.1 - SUN CC 5.6 Known n o t to work: - jikes 1.13 and older due to missing classpath and bootclasspath options. Jikes 1.14 and 1.15 are not tested due to compilation problems compiling jikes itself (on Mandrake 1o). - Python < 2.2 will not work. - gcc 2.* will not work. - You need to have GNU make (for building). ====================================================================== INSTALL.txt - last update January 11th, 2005 nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/LICENSE.txt000066400000000000000000000022531161462365500216230ustar00rootroot00000000000000 SOFTWARE RIGHTS ANTLR 1989-2004 Developed by Terence Parr Partially supported by University of San Francisco & jGuru.com We reserve no legal rights to the ANTLR--it is fully in the public domain. An individual or company may do whatever they wish with source code distributed with ANTLR or the code generated by ANTLR, including the incorporation of ANTLR, or its output, into commerical software. We encourage users to develop software with ANTLR. However, we do ask that credit is given to us for developing ANTLR. By "credit", we mean that if you use ANTLR or incorporate any source code into one of your programs (commercial product, research project, or otherwise) that you acknowledge this fact somewhere in the documentation, research report, etc... If you like ANTLR and have developed a nice tool with the output, please mention that you developed it using ANTLR. In addition, we ask that the headers remain intact in our source code. As long as these guidelines are kept, we expect to continue enhancing this system and expect to make other tools available as they are completed. The primary ANTLR guy: Terence Parr parrt@cs.usfca.edu parrt@antlr.org nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/Makefile.in000066400000000000000000000116531161462365500220510ustar00rootroot00000000000000## do not change this value subdir=. ## get standard variables from autoconf - autoconf will replace ## this variable with content of "scripts/config.vars". @stdvars@ ## By default, make will jump into any sub directory containing ## a file named "Makefile". This is done in the order implied by ## /bin/ls. You can override this by using variable SUBDIRS. For ## example, if not set, then make behaves as if ## SUBDIRS = antlr doc examples lib ## has been set. SUBDIRS = antlr lib doc ## When using stdmake before any other rule, then the default ## rule is "all" and behaviour of make is first to make all ## subdirectories and then all "local" targets with name all, ## clean, install, test etc. ## Sometimes it's usefull to make the local target first and then ## subdirs. To enforce this, just listen target in question be- ## fore "stdmake". By doing so, your target will become the ## default. This is usually not what you want. Therefore introduce ## a pseude rule (like this) to tell make about the default target. this : all ## get standard make rules from autoconf @stdmake@ test clean distclean install :: @ if test -f examples/Makefile ; then \ $(MAKE) -C examples $@ ; \ fi # Rule to remove all objects, cores, ANTLR generated, # configure generated, etc. This is not a recursive rule # because distclean removes files unconditionally # included by subdirectory Makefiles. # maintainer-clean: distclean -$(RM) -f configure # # Rule to make a tarball exclusive all kinds of fluff # TIMESTAMP = $(shell date +%Y%m%d) TAR_DIR = $(versioneddir) TAR_FILE = $(versioneddir).tar _tar: -rm -rf $(TAR_DIR) ln -s $(srcdir) $(TAR_DIR) $(TAR) cfh $(TAR_FILE) \ --exclude CVS \ --exclude *.pyc \ --exclude *.o \ --exclude *.d \ --exclude *.lo \ --exclude *.a \ --exclude *.la \ --exclude *.lai \ --exclude *.so \ --exclude *.class \ --exclude .deps \ --exclude .depend \ --exclude config.cache \ --exclude config.status \ --exclude Config.make \ --exclude antlr-config \ --exclude run-antlr \ --exclude *~ \ --exclude core \ --exclude dmalloc.log \ --exclude .gdb_history \ --exclude ".nfs*" \ --exclude "$(TAR_DIR)/gen_doc/html" \ --exclude "$(TAR_DIR)/gen_doc/html/*" \ --exclude Makefile \ $(TAR_DIR) @CHMOD@ 660 $(TAR_FILE) rm -rf $(TAR_DIR) tar backup : _tar gzip -f --best $(TAR_FILE) ## When building a release, it's crucial that time stamps are up-to-date and that ## files have proper permission bit set. Since $(srcdir) might be under the ## control of Perforce (or an other versioning system), I'm going to unpack the ## tar file again in a local directory and update time stamps as well as ## permission. release : @ANTLR_JAR@ _tar @TAR@ xpf $(TAR_FILE) cp @ANTLR_JAR@ $(TAR_DIR) find $(TAR_DIR) -type f | xargs @CHMOD@ 644 find $(TAR_DIR) -type d | xargs @CHMOD@ 755 @CHMOD@ 777 $(TAR_DIR)/configure find $(TAR_DIR) | xargs @TOUCH@ @TAR@ cf $(TAR_FILE) $(TAR_DIR) gzip -f --best $(TAR_FILE) gzip -t -v $(TAR_FILE).gz rm -rf $(TAR_DIR) ## This one for RK: new_version antlr/Version.java: @echo "Rebuilding Version.java" @echo "package antlr;" > antlr/Version.java @echo "public class Version {" >> antlr/Version.java @echo " public static final String version = \"$(VERSION)\";" >> antlr/Version.java @echo " public static final String subversion = \"$(SUBVERSION)\";" >> antlr/Version.java @echo " public static final String patchlevel = \"$(PATCHLEVEL)\";" >> antlr/Version.java @echo " public static final String datestamp = \"$(TIMESTAMP)\";" >> antlr/Version.java @echo " public static final String project_version = \"$(VERSION).$(SUBVERSION).$(PATCHLEVEL) ($(TIMESTAMP))\";" >> antlr/Version.java @echo "}" >> antlr/Version.java ## Installation is delegated to sub directories - as configured. ## Here we just create a bin directory that should contain ## scripts to mess up with ANTLR. Other things to do? docdir = $(datadir)/doc/$(versioneddir) extradir = $(datadir)/$(versioneddir) install :: $(MKDIR) -p "$(bindir)" $(MKDIR) -p "$(extradir)" $(MKDIR) -p "$(docdir)" $(INSTALL) -m 755 scripts/run-antlr "$(bindir)/antlr" $(INSTALL) -m 755 scripts/antlr-config "$(bindir)/antlr-config" $(INSTALL) -m 444 @abs_top_srcdir@/extras/antlr-mode.el "$(extradir)" $(INSTALL) -m 444 @abs_top_srcdir@/extras/antlr-jedit.xml "$(extradir)" $(INSTALL) -m 444 @abs_top_srcdir@/LICENSE.txt "$(docdir)" $(INSTALL) -m 444 @abs_top_srcdir@/README.txt "$(docdir)" $(INSTALL) -m 444 @abs_top_srcdir@/INSTALL.txt "$(docdir)" install :: echo "installation done" ### phony targets - make this targets even if file with same name exists. .PHONY: bootstrap backup maintainer-clean ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx @stddeps@ ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/README.txt000066400000000000000000000344341161462365500215040ustar00rootroot00000000000000 A N T L R ====================================================================== *SOFTWARE RIGHTS* ANTLR 1989-2004 Developed by Terence Parr Partially supported by University of San Francisco & jGuru.com We reserve no legal rights to the ANTLR--it is fully in the public domain. An individual or company may do whatever they wish with source code distributed with ANTLR or the code generated by ANTLR, including the incorporation of ANTLR, or its output, into commerical software. We encourage users to develop software with ANTLR. However, we do ask that credit is given to us for developing ANTLR. By "credit", we mean that if you use ANTLR or incorporate any source code into one of your programs (commercial product, research project, or otherwise) that you acknowledge this fact somewhere in the documentation, research report, etc... If you like ANTLR and have developed a nice tool with the output, please mention that you developed it using ANTLR. In addition, we ask that the headers remain intact in our source code. As long as these guidelines are kept, we expect to continue enhancing this system and expect to make other tools available as they are completed. The primary ANTLR guy: Terence Parr parrt@cs.usfca.edu parrt@antlr.org ______________________________________________________________________ WELCOME TO ANTLR! If you have problems or think you have found a bug in ANTLR, see the section BUGS in the ANTLR manual. Please consult the INSTALL.txt file for information on tested configurations. If you have a comment about an already tested configuration, or have tried ANTKR on a new configuration, please let us know as described in INSTALL.txt. Free software only works if we all help out. Finally, we cannot guarantee that this release will not completely wipe out all of your work from your system. We do some simple testing before each release, but you are completely on your own. We recommend testing this release on a source repository that is not critical to your work. THIS SOFTWARE IS SUPPLIED COMPLETELY "AS IS". NO WARRANTY.... Thanks for your support! -The ANTLR Team- ______________________________________________________________________ WHAT IS ANTLR? ANTLR, (AN)other (T)ool for (L)anguage (R)ecognition - formerly known as PCCTS - is a language tool that provides a framework for constructing recognizers, compilers, and translators from grammatical descriptions containing actions in the following languages: Java, C++, C# or Python (You can use PCCTS 1.xx to generate C-based parsers). Computer language translation has become a common task. While compilers and tools for traditional computer languages (such as C or Java) are still being built, their number is dwarfed by the thousands of mini-languages for which recognizers and translators are being developed. Programmers construct translators for database formats, graphical data files (e.g., PostScript, AutoCAD), text processing files (e.g., HTML, SGML). ANTLR is designed to handle all of your translation tasks. Prof. Terence Parr has been working on ANTLR since 1989 and, together with his colleagues, has made a number of fundamental contributions to parsing theory and language tool construction, leading to the resurgence of LL(k)-based recognition tools. Have a look at the history section at the end of this document on how ANTLR has evolved over time. For most up-to-date informaton read http://www.antlr.org/history.html. ______________________________________________________________________ UPGRADING? See http://www.antlr.org/blog/CHANGES-2.7.5.txt for a description of features new in this version. There are no incompatibilties known to a previous 2.7.x installation. If you found a problem please let us know. ______________________________________________________________________ INSTALLATION? Please read the INSTALL.txt file for installation instructions. The brief summary is: $ ./configure $ make $ make test # optional $ su root # optional $ make install ______________________________________________________________________ ANTLR IS INSTALLED - WHAT'S NEXT? Please read "doc/getting-started.html" on what you are supposed to do. Here's a very brief summary for the impatient: ANTLR is a command line tool. To run ANTLR you need to have JAVA installed. The basic steps are: a. write a grammar file - mygrammar.g b. run ANTLR like $ CLASSPATH=antlr.jar $ java antlr.Tool mygrammar.g c. write a driver program using source code generated by ANTLR, ie. Main.java, main.cpp, Main.cs or main.py d. link generated code, your driver code, ANTLR's core library and any additional library you are using together to get an executable f. run the executable on arbitrary input to be parsed For a set of standard examples have a look into directory "examples" and appropriate subdirectories. ______________________________________________________________________ WANT TO KNOW MORE? The documentation is in the "doc" subdirectory and "index.html" is the main entry point. Further information available at http://www.antlr.org ______________________________________________________________________ WHO CONTRIBUTED TO THIS MESS? Project Lead and Supreme Dictator Terence Parr, University of San Franciso Help with initial coding John Lilly, Empathy Software C++ code generator by Peter Wells and Ric Klaren C# code generation by Micheal Jordan, Kunle Odutola and Anthony Oguntimehin. Python's universe has been extended by Wolfgang Hfelinger and Marq Kole Substantial intellectual effort donated by Loring Craymer, Monty Zukowski, Jim Coker, Scott Stanchfield, John Mitchell, Chapman Flack (UNICODE, streams) Source changes for Eclipse and NetBeans by Marco van Meegen and Brian Smith Infrastructure support from Perforce - The world's best source code control system ______________________________________________________________________ WANNA KNOW ABOUT ANTLR's HISTORY? The PCCTS project began as a parser-generator project for a graduate course at Purdue University in the Fall of 1988 taught by Hank Dietz "translator-writing systems". Under the guidance of Professor Dietz, the parser generator, ANTLR (originally called YUCC), continued after the termination of the course and eventually became the subject of Terence Parrs Masters thesis. Originally, lexical analysis was performed via a simple scanner generator which was soon replaced by Will Cohens DLG in the Fall of 1989 (DFA-based lexical-analyzer generator, also an offshoot of the graduate translation course). The alpha version of ANTLR was totally rewritten resulting in 1.00B. Version 1.00B was released via an internet newsgroup (comp.compilers) posting in February of 1990 and quickly gathered a large following. 1.00B generated only LL(1) parsers, but allowed the merged description of lexical and syntactic analysis. It had rudimentary attribute handling similar to that of YACC and did not incorporate rule parameters or return values; downward inheritance was very awkward. 1.00B-generated parsers terminated upon the first syntax error. Lexical classes (modes) were not allowed and DLG did not have an interactive mode. Upon starting his Ph.D. at Purdue in the Fall of 1990, Terence Parr began the second total rewrite of ANTLR. The method by which grammars may be practically analyzed to generate LL(k) lookahead information was discovered in August of 1990 just before Terences return to Purdue. Version 1.00 incorporated this algorithm and included the AST mechanism, lexical classes, error classes, and automatic error recovery; code quality and portability were higher. In February of 1992 1.00 was released via an article in SIGPLAN Notices. Peter Dahl, then Ph.D. candidate, and Professor Matt OKeefe (both at the University of Minnesota) tested this version extensively. Dana Hoggatt (Micro Data Base Systems, Inc.) tested 1.00 heavily. Version 1.06 was released in December 1992 and represented a large feature enhancement over 1.00. For example, rudimentary semantic predicates were introduced, error messages were significantly improved for k>1 lookahead and ANTLR parsers could indicate that lookahead fetches were to occur only when necessary for the parse (normally, the lookahead "pipe" was constantly full). Russell Quong joined the project in the Spring of 1992 to aid in the semantic predicate design. Beginning and advanced tutorials were created and released as well. A makefile generator was included that sets up dependencies and such correctly for ANTLR and DLG. Very few 1.00 incompatibilities were introduced (1.00 was quite different from 1.00B in some areas). Version 1.10 was released on August 31, 1993 after Terences release from Purdue and incorporated bug fixes, a few feature enhancements and a major new capability -- an arbitrary lookahead operator (syntactic predicate), "(a)?b". This feature was codesigned with Professor Russell Quong also at Purdue. To support infinite lookahead, a preprocessor flag, ZZINF_LOOK, was created that forced the ANTLR() macro to tokenize all input prior to parsing. Hence, at any moment, an action or predicate could see the entire input sentence. The predicate mechanism of 1.06 was extended to allow multiple predicates to be hoisted; the syntactic context of a predicate could also be moved along with the predicate. In February of 1994, SORCERER was released. This tool allowed the user to parse child-sibling trees by specifying a grammar rather than building a recursive-descent tree walker by hand. Aaron Sawdey at The University of Minnesota became a second author of SORCERER after the initial release. On April 1, 1994, PCCTS 1.20 was released. This was the first version to actively support C++ output. It also included important fixes regarding semantic predicates and (..)+ subrules. This version also introduced token classes, the "not" operator, and token ranges. On June 19, 1994, SORCERER 1.00B9 was released. Gary Funck of Intrepid Technology joined the SORCERER team and provided very valuable suggestions regarding the "transform" mode of SORCERER. On August 8, 1994, PCCTS 1.21 was released. It mainly cleaned up the C++ output and included a number of bug fixes. From the 1.21 release forward, the maintenance and support of all PCCTS tools was picked up by Parr Research Corporation. A sophisticated error handling mechanism called "parser exception handling" was released for version 1.30. 1.31 fixed a few bugs. Release 1.33 is the version corresponding to the initial book release. ANTLR 2.0.0 came out around May 1997 and was partially funded so Terence hired John Lilley, a maniac coder and serious ANTLR hacker, to build much of the initial version. Terence did the grammar analyzer, naturally. John Mitchell, Jim Coker, Scott Stanchfield, and Monty Zukowski donate lots of brain power to ANTLR 2.xx in general. ANTLR 2.1.0, July 1997, mainly improved parsing performance, decreased parser memory requirements, and added a lot of cool lexer features including a case-insensitivity option. ANTLR 2.2.0, December 1997, saw the introduction of the new http://www.antlr.org website. This release also added grammar inheritance, enhanced AST support, and enhanced lexical translation support (each lexical rule now was considered to return a Token object even when referenced by another lexical rule). ANTLR 2.3.0, June 1998, was the first version to have Peter Wells C++ code generator. ANTLR 2.4.0, September 1998, introduced the ParseView parser debugger by Scott Stanchfield. This version also had a semi-functional -html option to generate HTML from your grammar for reading purposes. Scott and Terence updated the file I/O to be JDK 1.1. ANTLR 2.5.0, November 1998, introduced the filter option for the lexer that lets ANTLR behave like SED or AWK. ANTLR 2.6.0, March 1999, introduced token streams. Chapman Flack, Purdue Graduate student, pounded me at the right moment about streams, nudging me in the right direction. MageLang Institute currently provides support and continues development of ANTLR. MageLang becomes jGuru.com as we quit doing Java training and start building the jGuru Java developer's website. 2.7.0 released January 19, 2000 had the following enhancements: * Nongreedy subrules * Heterogeneous trees * Element options. To support heterogeneous trees, elements such as token references may now include options. * Exception hierarchy redesign * XML serialization * Improved C++ code generator * New Sather code generator 2.7.1 released October 1, 2000 had the following enhancements * ANTLR now allows UNICODE characters because Terence made case- statement expressions more efficient ;) See the unicode example in the distribution and the brief blurb in the documentation. * Massively improved C++ code generator (Thanks to Ric Klaren). * Added automatic column setting support. * Ter added throws to tree and regular parsers . 2.7.2 release January 19, 2003 was mainly a bug fix release, * but also included a C# code generator by Micheal Jordan, Kunle Odutola and Anthony Oguntimehin. :) * I (who, Ter?) added an antlr.build.Tool 'cause I hate ANT. This release does UNICODE properly now. Added limited lexical lookahead hoisting. Sather code generator disappears. Source changes for Eclipse and NetBeans by Marco van Meegen and Brian Smith. 2.7.3 released March 22, 2004 was mainly a bug fix release, * but included the parse-tree/derivation code to aid in debugging * plus the cool TokenStreamRewriteEngine that makes rewriting or tweaking input files particularly easy. 2.7.4 released May 9, 2004 was mainly a bug fix release. 2.7.5 release Xmas 2004 had the following enhancements: * A Python code generator has been implemented and contributed by Wolfang Haefelinger and Marq Kole. * A new make/autoconf framework as been contributed by Wolfgang Haefelinger * A MSI based installer has been contributed by Wolfgang Haefelinger. ====================================================================== README.txt - last update December 18th, 2004 nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/aclocal.m4000066400000000000000000000477101161462365500216470ustar00rootroot00000000000000dnl --*- sh -*-- ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx## ## This file is part of ANTLR. See LICENSE.txt for licence ## ## details. Written by W. Haefelinger - initial version by ## ## R. Laren. ## ## ...............Copyright (C) Wolfgang Haefelinger, 2004 ## ## ## ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx## dnl dnl =============================================================== dnl A couple of Macros have been copied from the GNU Autoconf Macro dnl Archive: dnl http://www.gnu.org/software/ac-archive dnl =============================================================== dnl AC_DEFUN( [AC_CHECK_CLASSPATH], [ test "x$CLASSPATH" = x && AC_MSG_ERROR( [CLASSPATH not set. Please set it to include the directory containing configure.]) if test "x$CLASSPATH" = x; then : else echo "CLASSPATH set to:" echo "$CLASSPATH" echo "IMPORTANT: make sure the current directory containing configure" echo "is in the CLASSPATH" fi ] ) #dnl /** #dnl * Test.java: used to test dynamicaly if a class exists. #dnl */ #dnl public class Test #dnl { #dnl #dnl public static void #dnl main( String[] argv ) #dnl { #dnl Class lib; #dnl if (argv.length < 1) #dnl { #dnl System.err.println ("Missing argument"); #dnl System.exit (77); #dnl } #dnl try #dnl { #dnl lib = Class.forName (argv[0]); #dnl } #dnl catch (ClassNotFoundException e) #dnl { #dnl System.exit (1); #dnl } #dnl lib = null; #dnl System.exit (0); #dnl } #dnl #dnl } AC_DEFUN( [AC_CHECK_CLASS],[ AC_REQUIRE([AC_PROG_JAVA]) ac_var_name=`echo $1 | sed 's/\./_/g'` #dnl Normaly I'd use a AC_CACHE_CHECK here but since the variable name is #dnl dynamic I need an extra level of extraction AC_MSG_CHECKING([for $1 class]) AC_CACHE_VAL(ac_cv_class_$ac_var_name,[ if test x$ac_cv_prog_uudecode_base64 = xyes; then cat << \EOF > Test.uue begin-base64 644 Test.class yv66vgADAC0AKQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51 bWJlclRhYmxlDAAKAAsBAANlcnIBABVMamF2YS9pby9QcmludFN0cmVhbTsJ AA0ACQcADgEAEGphdmEvbGFuZy9TeXN0ZW0IABABABBNaXNzaW5nIGFyZ3Vt ZW50DAASABMBAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWCgAV ABEHABYBABNqYXZhL2lvL1ByaW50U3RyZWFtDAAYABkBAARleGl0AQAEKEkp VgoADQAXDAAcAB0BAAdmb3JOYW1lAQAlKExqYXZhL2xhbmcvU3RyaW5nOylM amF2YS9sYW5nL0NsYXNzOwoAHwAbBwAgAQAPamF2YS9sYW5nL0NsYXNzBwAi AQAgamF2YS9sYW5nL0NsYXNzTm90Rm91bmRFeGNlcHRpb24BAAY8aW5pdD4B AAMoKVYMACMAJAoAAwAlAQAKU291cmNlRmlsZQEACVRlc3QuamF2YQAhAAEA AwAAAAAAAgAJAAUABgABAAcAAABtAAMAAwAAACkqvgSiABCyAAwSD7YAFBBN uAAaKgMyuAAeTKcACE0EuAAaAUwDuAAasQABABMAGgAdACEAAQAIAAAAKgAK AAAACgAAAAsABgANAA4ADgATABAAEwASAB4AFgAiABgAJAAZACgAGgABACMA JAABAAcAAAAhAAEAAQAAAAUqtwAmsQAAAAEACAAAAAoAAgAAAAQABAAEAAEA JwAAAAIAKA== ==== EOF if uudecode$EXEEXT Test.uue; then : else echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC echo "configure: failed file was:" >&AC_FD_CC cat Test.uue >&AC_FD_CC ac_cv_prog_uudecode_base64=no fi rm -f Test.uue if AC_TRY_COMMAND($JAVA $JAVAFLAGS Test $1) >/dev/null 2>&1; then eval "ac_cv_class_$ac_var_name=yes" else eval "ac_cv_class_$ac_var_name=no" fi rm -f Test.class else AC_TRY_COMPILE_JAVA([$1], , [eval "ac_cv_class_$ac_var_name=yes"], [eval "ac_cv_class_$ac_var_name=no"]) fi eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`" eval "HAVE_$ac_var_name=$`echo ac_cv_class_$ac_var_val`" HAVE_LAST_CLASS=$ac_var_val if test x$ac_var_val = xyes; then ifelse([$2], , :, [$2]) else ifelse([$3], , :, [$3]) fi ] ) #dnl for some reason the above statment didn't fall though here? #dnl do scripts have variable scoping? eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`" AC_MSG_RESULT($ac_var_val) ] ) AC_DEFUN([AC_CHECK_JAVA_HOME],[ AC_REQUIRE([AC_EXEEXT])dnl TRY_JAVA_HOME=`ls -dr /usr/java/* 2> /dev/null | head -n 1` if test x$TRY_JAVA_HOME != x; then PATH=$PATH:$TRY_JAVA_HOME/bin fi AC_PATH_PROG(JAVA_PATH_NAME, java$EXEEXT) if test x$JAVA_PATH_NAME != x; then JAVA_HOME=`echo $JAVA_PATH_NAME | sed "s/\(.*\)[[/]]bin[[/]]java$EXEEXT$/\1/"` fi;dnl ] ) AC_DEFUN([AC_PROG_JAR], [ AC_REQUIRE([AC_EXEEXT])dnl if test "x$JAVAPREFIX" = x; then test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar$EXEEXT) else test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar, $JAVAPREFIX) fi test "x$JAR" = x && AC_MSG_ERROR([no acceptable jar program found in \$PATH]) AC_PROVIDE([$0])dnl ] ) AC_DEFUN([AC_PROG_JAVA],[ AC_REQUIRE([AC_EXEEXT])dnl if test x$JAVAPREFIX = x; then test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe$EXEEXT java$EXEEXT) else test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe$EXEEXT java$EXEEXT, $JAVAPREFIX) fi test x$JAVA = x && AC_MSG_ERROR([no acceptable Java virtual machine found in \$PATH]) AC_PROG_JAVA_WORKS AC_PROVIDE([$0])dnl ] ) #dnl /** #dnl * Test.java: used to test if java compiler works. #dnl */ #dnl public class Test #dnl { #dnl #dnl public static void #dnl main( String[] argv ) #dnl { #dnl System.exit (0); #dnl } #dnl #dnl } AC_DEFUN([AC_PROG_JAVA_WORKS], [ AC_CHECK_PROG(uudecode, uudecode$EXEEXT, yes) if test x$uudecode = xyes; then AC_CACHE_CHECK([if uudecode can decode base 64 file], ac_cv_prog_uudecode_base64, [ cat << \EOF > Test.uue begin-base64 644 Test.class yv66vgADAC0AFQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51 bWJlclRhYmxlDAAKAAsBAARleGl0AQAEKEkpVgoADQAJBwAOAQAQamF2YS9s YW5nL1N5c3RlbQEABjxpbml0PgEAAygpVgwADwAQCgADABEBAApTb3VyY2VG aWxlAQAJVGVzdC5qYXZhACEAAQADAAAAAAACAAkABQAGAAEABwAAACEAAQAB AAAABQO4AAyxAAAAAQAIAAAACgACAAAACgAEAAsAAQAPABAAAQAHAAAAIQAB AAEAAAAFKrcAErEAAAABAAgAAAAKAAIAAAAEAAQABAABABMAAAACABQ= ==== EOF if uudecode$EXEEXT Test.uue; then ac_cv_prog_uudecode_base64=yes else echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC echo "configure: failed file was:" >&AC_FD_CC cat Test.uue >&AC_FD_CC ac_cv_prog_uudecode_base64=no fi rm -f Test.uue]) fi if test x$ac_cv_prog_uudecode_base64 != xyes; then rm -f Test.class AC_MSG_WARN([I have to compile Test.class from scratch]) if test x$ac_cv_prog_javac_works = xno; then AC_MSG_ERROR([Cannot compile java source. $JAVAC does not work properly]) fi if test x$ac_cv_prog_javac_works = x; then AC_PROG_JAVAC fi fi AC_CACHE_CHECK(if $JAVA works, ac_cv_prog_java_works, [ JAVA_TEST=Test.java CLASS_TEST=Test.class TEST=Test changequote(, )dnl cat << \EOF > $JAVA_TEST /* [#]line __oline__ "configure" */ public class Test { public static void main (String args[]) { System.exit (0); } } EOF changequote([, ])dnl if test x$ac_cv_prog_uudecode_base64 != xyes; then if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) && test -s $CLASS_TEST; then : else echo "configure: failed program was:" >&AC_FD_CC cat $JAVA_TEST >&AC_FD_CC AC_MSG_ERROR(The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)) fi fi if AC_TRY_COMMAND($JAVA $JAVAFLAGS $TEST) >/dev/null 2>&1; then ac_cv_prog_java_works=yes else echo "configure: failed program was:" >&AC_FD_CC cat $JAVA_TEST >&AC_FD_CC AC_MSG_ERROR(The Java VM $JAVA failed (see config.log, check the CLASSPATH?)) fi rm -fr $JAVA_TEST $CLASS_TEST Test.uue ]) AC_PROVIDE([$0])dnl ] ) AC_DEFUN([AC_PROG_JAVAC], [ AC_REQUIRE([AC_EXEEXT])dnl if test "x$JAVAPREFIX" = x; then test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT) else test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT, $JAVAPREFIX) fi test "x$JAVAC" = x && AC_MSG_ERROR([no acceptable Java compiler found in \$PATH]) AC_PROG_JAVAC_WORKS AC_PROVIDE([$0])dnl ] ) AC_DEFUN([AC_PROG_JAVAC_WORKS],[ AC_CACHE_CHECK([if $JAVAC works], ac_cv_prog_javac_works, [ JAVA_TEST=Test.java CLASS_TEST=Test.class cat << \EOF > $JAVA_TEST /* [#]line __oline__ "configure" */ public class Test { } EOF if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) >/dev/null 2>&1; then ac_cv_prog_javac_works=yes else AC_MSG_ERROR([The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)]) echo "configure: failed program was:" >&AC_FD_CC cat $JAVA_TEST >&AC_FD_CC fi rm -f $JAVA_TEST $CLASS_TEST ]) AC_PROVIDE([$0])dnl ]) AC_DEFUN([AC_TRY_COMPILE_JAVA],[ AC_REQUIRE([AC_PROG_JAVAC])dnl cat << \EOF > Test.java /* [#]line __oline__ "configure" */ ifelse([$1], , , [import $1;]) public class Test { [$2] } EOF if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class ; then #dnl Don't remove the temporary files here, so they can be examined. ifelse([$3], , :, [$3]) else echo "configure: failed program was:" >&AC_FD_CC cat Test.java >&AC_FD_CC ifelse([$4], , , [ rm -fr Test* $4 ])dnl fi rm -fr Test* ] ) AC_DEFUN([AC_TRY_RUN_JAVA],[ AC_REQUIRE([AC_PROG_JAVAC])dnl AC_REQUIRE([AC_PROG_JAVA])dnl cat << \EOF > Test.java /* [#]line __oline__ "configure" */ ifelse([$1], , , [include $1;]) public class Test { [$2] } EOF if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class && ($JAVA $JAVAFLAGS Test; exit) 2>/dev/null then #dnl Don't remove the temporary files here, so they can be examined. ifelse([$3], , :, [$3]) else echo "configure: failed program was:" >&AC_FD_CC cat Test.java >&AC_FD_CC ifelse([$4], , , [ rm -fr Test* $4 ])dnl fi rm -fr Test*]) #dnl#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx #dnl# AX_TRY_COMPILE_JAVA #dnl#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx #dnl $1 => import section #dnl $2 => class body section #dnl $3 => if_good_action #dnl $4 => if_fails_action [implicit action: candidate is removed from #dnl list]. This cannot be overridden by providing a action. AC_DEFUN([AX_TRY_COMPILE_JAVA], [ ## make sure that we do not use an existing file i=0;cn="Test\${i}"; eval "fj=${cn}.java" while test -f "${fj}" do i=`expr $i + 1` eval "fj=${cn}.java" done eval "fc=${cn}.class" eval "cn=${cn}" cat << [_ACEOF] > ${fj} [$1] public class ${cn} { [$2] } [_ACEOF] ## wh: how do I check that a file has a non-zero size (test -s) ## wh: is not portable. if AC_TRY_COMMAND($JAVAC $JAVACFLAGS ${fj}) && test -f "${fc}" then $3 else ifelse([$4], ,[ echo "" echo "@configure:__oline__: failed to compile java input ...." echo "=======================================================" cat ${fj} echo "=======================================================" echo "exec $JAVAC $JAVACFLAGS ${fj}" echo "" rm -rf "${fc}" "${fj}" ],[$4]) fi rm -rf "${fc}" "${fj}" ## eof [AX_TRY_COMPILE_JAVA] ])dnl #dnl AX_GNU_MAKE #dnl $1->var that contains list of suitable candidates [not empty] #dnl $2->action_if_not_found || empty #dnl $3->action_if_found || empty #dnl => $MAKE AC_DEFUN( [AX_GNU_MAKE], [ #Search all the common names for GNU make ax_gnu_make_list="${[$1]}" [$1]= for a in . ${ax_gnu_make_list} ; do if test "$a" == "." ; then continue fi AC_MSG_CHECKING([whether ${a} is GNU make]) if (/bin/sh -c "$a --version" 2> /dev/null | grep GNU 2>&1 > /dev/null ); then [$1]="$a" AC_MSG_RESULT(yes) break else AC_MSG_RESULT(no) fi done ## handle search result if test "x${[$1]}" == "x" ; then : $2 else : $3 fi ] )dnl ###dnl Like AC_PATH_PROGS. However, each argument in $2 will be checked. ###dnl The result will be added to $1. There's no caching etc. ###dnl AC_DEFUN( [AX_TYPE_DASHA], [ for ac_prog in [$2] ; do set dummy $ac_prog; ac_word=${2} ## if argument is absolute we check whether such a file exists, ## otherwise we lookup PATH. Each hit will be added to main ## variable. case $ac_word in @<:@\\/@:>@* | ?:@<:@\\/@:>@*) AC_MSG_CHECKING([for $ac_word]) if test -f $ac_word ; then [$1]="${[$1]} ${ac_word}" AC_MSG_RESULT(yes) else AC_MSG_RESULT(no) fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then [$1]="${[$1]} $as_dir/$ac_word$ac_exec_ext" AC_MSG_CHECKING([for $ac_word]) AC_MSG_RESULT([$as_dir/$ac_word$ac_exec_ext]) as_found=1 fi done done test "x$as_found" == "x" && { AC_MSG_CHECKING([for $ac_word]) AC_MSG_RESULT([no]) } ;; esac done ] )dnl ###dnl Like AC_PATH_PROGS but if is given, then it's argument ###dnl is taken unconditionally(?). AC_DEFUN( [AX_PATH_PROGS], [ ax_arg_list="[$2]" if test "x${[$1]}" != "x" ; then ax_arg_list="${[$1]}" fi [$1]="" AX_TYPE_DASHA([$1],[${ax_arg_list}]) if test "x${[$1]}" != "x" ; then ifelse([$3], ,[:],$3) else ifelse([$4], ,[ AC_MSG_ERROR([no suitable value has been found for [$1]]) ],$4) fi ] ) AC_DEFUN([AX_JAVAC], [ ## make sure that we do not use an existing file i=0;cn="Test\${i}"; eval "fj=${cn}.java" while test -f "${fj}" do i=`expr $i + 1` eval "fj=${cn}.java" done eval "fc=${cn}.class" eval "cn=${cn}" cat << [_ACEOF] > ${fj} [$1] public class ${cn} { [$2] } [_ACEOF] ## wh: how do I check that a file has a non-zero size (test -s) ## wh: is not portable. if AC_TRY_COMMAND($JAVAC $JAVACFLAGS ${fj}) && test -f "${fc}" then $3 else ifelse([$4], ,[ echo "" echo "@configure:__oline__: failed to compile java input ...." echo "=======================================================" cat ${fj} echo "=======================================================" echo "exec $JAVAC $JAVACFLAGS ${fj}" echo "" rm -rf "${fc}" "${fj}" ],[$4]) fi rm -rf "${fc}" "${fj}" ## eof [AX_TRY_COMPILE_JAVA] ])dnl AC_DEFUN([AX_WHICH_JAVAC],[ AC_SUBST([$1]) if (/bin/sh -c "$JAVAC --version" 2>&1 | grep -i 'GCC' 2>&1 > /dev/null ) ; then [$1]=gcj elif (/bin/sh -c "$JAVAC --version" 2>&1 | grep -i 'jikes' 2>&1 > /dev/null ) ; then [$1]=jikes else [$1]=javac fi ] ) AC_DEFUN([AX_VAR_HEAD],[ set x ${[$1]} [$1]="${2}" ] ) AC_DEFUN([AX_VAR_ADD],[ ifelse([$3], ,,[$1=$3]) $1="${[$1]} $2" ] ) AC_DEFUN([AX_JAVA_PROGS],[ case $LANG_JAVA in 1) AX_PATH_PROGS([$1],[$2],[$3],[ LANG_JAVA=0 cat <> "$[$1]" < "ANTLRLexer.java"$ package antlr; import java.io.InputStream; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.TokenStreamRecognitionException; import antlr.CharStreamException; import antlr.CharStreamIOException; import antlr.ANTLRException; import java.io.Reader; import java.util.Hashtable; import antlr.CharScanner; import antlr.InputBuffer; import antlr.ByteBuffer; import antlr.CharBuffer; import antlr.Token; import antlr.CommonToken; import antlr.RecognitionException; import antlr.NoViableAltForCharException; import antlr.MismatchedCharException; import antlr.TokenStream; import antlr.ANTLRHashString; import antlr.LexerSharedInputState; import antlr.collections.impl.BitSet; import antlr.SemanticException; public class ANTLRLexer extends antlr.CharScanner implements ANTLRTokenTypes, TokenStream { /**Convert 'c' to an integer char value. */ public static int escapeCharValue(String cs) { //System.out.println("escapeCharValue("+cs+")"); if ( cs.charAt(1)!='\\' ) return 0; switch ( cs.charAt(2) ) { case 'b' : return '\b'; case 'r' : return '\r'; case 't' : return '\t'; case 'n' : return '\n'; case 'f' : return '\f'; case '"' : return '\"'; case '\'' :return '\''; case '\\' :return '\\'; case 'u' : // Unicode char if (cs.length() != 8) { return 0; } else { return Character.digit(cs.charAt(3), 16) * 16 * 16 * 16 + Character.digit(cs.charAt(4), 16) * 16 * 16 + Character.digit(cs.charAt(5), 16) * 16 + Character.digit(cs.charAt(6), 16); } case '0' : case '1' : case '2' : case '3' : if ( cs.length()>5 && Character.isDigit(cs.charAt(4)) ) { return (cs.charAt(2)-'0')*8*8 + (cs.charAt(3)-'0')*8 + (cs.charAt(4)-'0'); } if ( cs.length()>4 && Character.isDigit(cs.charAt(3)) ) { return (cs.charAt(2)-'0')*8 + (cs.charAt(3)-'0'); } return cs.charAt(2)-'0'; case '4' : case '5' : case '6' : case '7' : if ( cs.length()>4 && Character.isDigit(cs.charAt(3)) ) { return (cs.charAt(2)-'0')*8 + (cs.charAt(3)-'0'); } return cs.charAt(2)-'0'; default : return 0; } } public static int tokenTypeForCharLiteral(String lit) { if ( lit.length()>3 ) { // does char contain escape? return escapeCharValue(lit); } else { return lit.charAt(1); } } public ANTLRLexer(InputStream in) { this(new ByteBuffer(in)); } public ANTLRLexer(Reader in) { this(new CharBuffer(in)); } public ANTLRLexer(InputBuffer ib) { this(new LexerSharedInputState(ib)); } public ANTLRLexer(LexerSharedInputState state) { super(state); caseSensitiveLiterals = true; setCaseSensitive(true); literals = new Hashtable(); literals.put(new ANTLRHashString("public", this), new Integer(31)); literals.put(new ANTLRHashString("class", this), new Integer(10)); literals.put(new ANTLRHashString("header", this), new Integer(5)); literals.put(new ANTLRHashString("throws", this), new Integer(37)); literals.put(new ANTLRHashString("lexclass", this), new Integer(9)); literals.put(new ANTLRHashString("catch", this), new Integer(40)); literals.put(new ANTLRHashString("private", this), new Integer(32)); literals.put(new ANTLRHashString("options", this), new Integer(51)); literals.put(new ANTLRHashString("extends", this), new Integer(11)); literals.put(new ANTLRHashString("protected", this), new Integer(30)); literals.put(new ANTLRHashString("TreeParser", this), new Integer(13)); literals.put(new ANTLRHashString("Parser", this), new Integer(29)); literals.put(new ANTLRHashString("Lexer", this), new Integer(12)); literals.put(new ANTLRHashString("returns", this), new Integer(35)); literals.put(new ANTLRHashString("charVocabulary", this), new Integer(18)); literals.put(new ANTLRHashString("tokens", this), new Integer(4)); literals.put(new ANTLRHashString("exception", this), new Integer(39)); } public Token nextToken() throws TokenStreamException { Token theRetToken=null; tryAgain: for (;;) { Token _token = null; int _ttype = Token.INVALID_TYPE; resetText(); try { // for char stream error handling try { // for lexical error handling switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(true); theRetToken=_returnToken; break; } case '/': { mCOMMENT(true); theRetToken=_returnToken; break; } case '<': { mOPEN_ELEMENT_OPTION(true); theRetToken=_returnToken; break; } case '>': { mCLOSE_ELEMENT_OPTION(true); theRetToken=_returnToken; break; } case ',': { mCOMMA(true); theRetToken=_returnToken; break; } case '?': { mQUESTION(true); theRetToken=_returnToken; break; } case '#': { mTREE_BEGIN(true); theRetToken=_returnToken; break; } case '(': { mLPAREN(true); theRetToken=_returnToken; break; } case ')': { mRPAREN(true); theRetToken=_returnToken; break; } case ':': { mCOLON(true); theRetToken=_returnToken; break; } case '*': { mSTAR(true); theRetToken=_returnToken; break; } case '+': { mPLUS(true); theRetToken=_returnToken; break; } case ';': { mSEMI(true); theRetToken=_returnToken; break; } case '^': { mCARET(true); theRetToken=_returnToken; break; } case '!': { mBANG(true); theRetToken=_returnToken; break; } case '|': { mOR(true); theRetToken=_returnToken; break; } case '~': { mNOT_OP(true); theRetToken=_returnToken; break; } case '}': { mRCURLY(true); theRetToken=_returnToken; break; } case '\'': { mCHAR_LITERAL(true); theRetToken=_returnToken; break; } case '"': { mSTRING_LITERAL(true); theRetToken=_returnToken; break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mINT(true); theRetToken=_returnToken; break; } case '[': { mARG_ACTION(true); theRetToken=_returnToken; break; } case '{': { mACTION(true); theRetToken=_returnToken; break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { mTOKEN_REF(true); theRetToken=_returnToken; break; } case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mRULE_REF(true); theRetToken=_returnToken; break; } default: if ((LA(1)=='=') && (LA(2)=='>')) { mIMPLIES(true); theRetToken=_returnToken; } else if ((LA(1)=='.') && (LA(2)=='.')) { mRANGE(true); theRetToken=_returnToken; } else if ((LA(1)=='=') && (true)) { mASSIGN(true); theRetToken=_returnToken; } else if ((LA(1)=='.') && (true)) { mWILDCARD(true); theRetToken=_returnToken; } else { if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);} else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } } if ( _returnToken==null ) continue tryAgain; // found SKIP token _ttype = _returnToken.getType(); _returnToken.setType(_ttype); return _returnToken; } catch (RecognitionException e) { throw new TokenStreamRecognitionException(e); } } catch (CharStreamException cse) { if ( cse instanceof CharStreamIOException ) { throw new TokenStreamIOException(((CharStreamIOException)cse).io); } else { throw new TokenStreamException(cse.getMessage()); } } } } public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WS; int _saveIndex; { switch ( LA(1)) { case ' ': { match(' '); break; } case '\t': { match('\t'); break; } case '\n': { match('\n'); newline(); break; } default: if ((LA(1)=='\r') && (LA(2)=='\n')) { match('\r'); match('\n'); newline(); } else if ((LA(1)=='\r') && (true)) { match('\r'); newline(); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _ttype = Token.SKIP; if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = COMMENT; int _saveIndex; Token t=null; { if ((LA(1)=='/') && (LA(2)=='/')) { mSL_COMMENT(false); } else if ((LA(1)=='/') && (LA(2)=='*')) { mML_COMMENT(true); t=_returnToken; _ttype = t.getType(); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _ttype != DOC_COMMENT ) _ttype = Token.SKIP; if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SL_COMMENT; int _saveIndex; match("//"); { _loop153: do { if ((_tokenSet_0.member(LA(1)))) { { match(_tokenSet_0); } } else { break _loop153; } } while (true); } { if ((LA(1)=='\r') && (LA(2)=='\n')) { match('\r'); match('\n'); } else if ((LA(1)=='\r') && (true)) { match('\r'); } else if ((LA(1)=='\n')) { match('\n'); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } newline(); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ML_COMMENT; int _saveIndex; match("/*"); { if (((LA(1)=='*') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')))&&( LA(2)!='/' )) { match('*'); _ttype = DOC_COMMENT; } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { _loop159: do { // nongreedy exit test if ((LA(1)=='*') && (LA(2)=='/')) break _loop159; if ((LA(1)=='\r') && (LA(2)=='\n')) { match('\r'); match('\n'); newline(); } else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { match('\r'); newline(); } else if ((_tokenSet_0.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { { match(_tokenSet_0); } } else if ((LA(1)=='\n')) { match('\n'); newline(); } else { break _loop159; } } while (true); } match("*/"); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mOPEN_ELEMENT_OPTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = OPEN_ELEMENT_OPTION; int _saveIndex; match('<'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mCLOSE_ELEMENT_OPTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = CLOSE_ELEMENT_OPTION; int _saveIndex; match('>'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mCOMMA(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = COMMA; int _saveIndex; match(','); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mQUESTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = QUESTION; int _saveIndex; match('?'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mTREE_BEGIN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TREE_BEGIN; int _saveIndex; match("#("); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mLPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = LPAREN; int _saveIndex; match('('); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mRPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = RPAREN; int _saveIndex; match(')'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mCOLON(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = COLON; int _saveIndex; match(':'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mSTAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STAR; int _saveIndex; match('*'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mPLUS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = PLUS; int _saveIndex; match('+'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ASSIGN; int _saveIndex; match('='); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mIMPLIES(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = IMPLIES; int _saveIndex; match("=>"); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mSEMI(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SEMI; int _saveIndex; match(';'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mCARET(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = CARET; int _saveIndex; match('^'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mBANG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = BANG; int _saveIndex; match('!'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = OR; int _saveIndex; match('|'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mWILDCARD(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WILDCARD; int _saveIndex; match('.'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mRANGE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = RANGE; int _saveIndex; match(".."); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mNOT_OP(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = NOT_OP; int _saveIndex; match('~'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mRCURLY(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = RCURLY; int _saveIndex; match('}'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mCHAR_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = CHAR_LITERAL; int _saveIndex; match('\''); { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_1.member(LA(1)))) { matchNot('\''); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } match('\''); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ESC; int _saveIndex; match('\\'); { switch ( LA(1)) { case 'n': { match('n'); break; } case 'r': { match('r'); break; } case 't': { match('t'); break; } case 'b': { match('b'); break; } case 'f': { match('f'); break; } case 'w': { match('w'); break; } case 'a': { match('a'); break; } case '"': { match('"'); break; } case '\'': { match('\''); break; } case '\\': { match('\\'); break; } case '0': case '1': case '2': case '3': { { matchRange('0','3'); } { if (((LA(1) >= '0' && LA(1) <= '7')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchRange('0','7'); { if (((LA(1) >= '0' && LA(1) <= '7')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchRange('0','7'); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } case '4': case '5': case '6': case '7': { { matchRange('4','7'); } { if (((LA(1) >= '0' && LA(1) <= '7')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchRange('0','7'); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } case 'u': { match('u'); mXDIGIT(false); mXDIGIT(false); mXDIGIT(false); mXDIGIT(false); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mSTRING_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STRING_LITERAL; int _saveIndex; match('"'); { _loop184: do { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_2.member(LA(1)))) { matchNot('"'); } else { break _loop184; } } while (true); } match('"'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mXDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = XDIGIT; int _saveIndex; switch ( LA(1)) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': { matchRange('a','f'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': { matchRange('A','F'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = DIGIT; int _saveIndex; matchRange('0','9'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = INT; int _saveIndex; { int _cnt196=0; _loop196: do { if (((LA(1) >= '0' && LA(1) <= '9'))) { matchRange('0','9'); } else { if ( _cnt196>=1 ) { break _loop196; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt196++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mARG_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ARG_ACTION; int _saveIndex; mNESTED_ARG_ACTION(false); setText(StringUtils.stripFrontBack(getText(), "[", "]")); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mNESTED_ARG_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = NESTED_ARG_ACTION; int _saveIndex; match('['); { _loop200: do { switch ( LA(1)) { case '[': { mNESTED_ARG_ACTION(false); break; } case '\n': { match('\n'); newline(); break; } case '\'': { mCHAR_LITERAL(false); break; } case '"': { mSTRING_LITERAL(false); break; } default: if ((LA(1)=='\r') && (LA(2)=='\n')) { match('\r'); match('\n'); newline(); } else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { match('\r'); newline(); } else if ((_tokenSet_3.member(LA(1)))) { matchNot(']'); } else { break _loop200; } } } while (true); } match(']'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ACTION; int _saveIndex; int actionLine=getLine(); int actionColumn = getColumn(); mNESTED_ACTION(false); { if ((LA(1)=='?')) { match('?'); _ttype = SEMPRED; } else { } } if ( _ttype==ACTION ) { setText(StringUtils.stripFrontBack(getText(), "{", "}")); } else { setText(StringUtils.stripFrontBack(getText(), "{", "}?")); } CommonToken t = new CommonToken(_ttype,new String(text.getBuffer(),_begin,text.length()-_begin)); t.setLine(actionLine); // set action line to start t.setColumn(actionColumn); _token = t; if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mNESTED_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = NESTED_ACTION; int _saveIndex; match('{'); { _loop206: do { // nongreedy exit test if ((LA(1)=='}') && (true)) break _loop206; if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { { if ((LA(1)=='\r') && (LA(2)=='\n')) { match('\r'); match('\n'); newline(); } else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { match('\r'); newline(); } else if ((LA(1)=='\n')) { match('\n'); newline(); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if ((LA(1)=='{') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mNESTED_ACTION(false); } else if ((LA(1)=='\'') && (_tokenSet_4.member(LA(2)))) { mCHAR_LITERAL(false); } else if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) { mCOMMENT(false); } else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mSTRING_LITERAL(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop206; } } while (true); } match('}'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mTOKEN_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TOKEN_REF; int _saveIndex; matchRange('A','Z'); { _loop209: do { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '_': { match('_'); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } default: { break _loop209; } } } while (true); } _ttype = testLiteralsTable(_ttype); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mRULE_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = RULE_REF; int _saveIndex; int t=0; t=mINTERNAL_RULE_REF(false); _ttype=t; { if (( true )&&(t==LITERAL_options)) { mWS_LOOP(false); { if ((LA(1)=='{')) { match('{'); _ttype = OPTIONS; } else { } } } else if (( true )&&(t==LITERAL_tokens)) { mWS_LOOP(false); { if ((LA(1)=='{')) { match('{'); _ttype = TOKENS; } else { } } } else { } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final int mINTERNAL_RULE_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int t; int _ttype; Token _token=null; int _begin=text.length(); _ttype = INTERNAL_RULE_REF; int _saveIndex; t = RULE_REF; matchRange('a','z'); { _loop219: do { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '_': { match('_'); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } default: { break _loop219; } } } while (true); } t = testLiteralsTable(t); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; return t; } protected final void mWS_LOOP(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WS_LOOP; int _saveIndex; { _loop216: do { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '/': { mCOMMENT(false); break; } default: { break _loop216; } } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mWS_OPT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WS_OPT; int _saveIndex; { if ((_tokenSet_5.member(LA(1)))) { mWS(false); } else { } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } private static final long[] mk_tokenSet_0() { long[] data = new long[8]; data[0]=-9224L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = new long[8]; data[0]=-549755813896L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); private static final long[] mk_tokenSet_2() { long[] data = new long[8]; data[0]=-17179869192L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2()); private static final long[] mk_tokenSet_3() { long[] data = new long[8]; data[0]=-566935692296L; data[1]=-671088641L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3()); private static final long[] mk_tokenSet_4() { long[] data = new long[8]; data[0]=-549755813896L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4()); private static final long[] mk_tokenSet_5() { long[] data = { 4294977024L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ANTLRParser.java000066400000000000000000001474501161462365500240310ustar00rootroot00000000000000// $ANTLR 2.7.3rc3: "antlr.g" -> "ANTLRParser.java"$ package antlr; import antlr.TokenBuffer; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.ANTLRException; import antlr.LLkParser; import antlr.Token; import antlr.TokenStream; import antlr.RecognitionException; import antlr.NoViableAltException; import antlr.MismatchedTokenException; import antlr.SemanticException; import antlr.ParserSharedInputState; import antlr.collections.impl.BitSet; import java.util.Enumeration; import java.io.DataInputStream; import java.io.InputStream; import java.io.FileInputStream; import java.io.IOException; public class ANTLRParser extends antlr.LLkParser implements ANTLRTokenTypes { private static final boolean DEBUG_PARSER = false; ANTLRGrammarParseBehavior behavior; Tool antlrTool; protected int blockNesting= -1; public ANTLRParser( TokenBuffer tokenBuf, ANTLRGrammarParseBehavior behavior_, Tool tool_ ) { super(tokenBuf, 1); tokenNames = _tokenNames; behavior = behavior_; antlrTool = tool_; } public void reportError(String s) { antlrTool.error(s, getFilename(), -1, -1); } public void reportError(RecognitionException e) { reportError(e, e.getErrorMessage()); } public void reportError(RecognitionException e, String s) { antlrTool.error(s, e.getFilename(), e.getLine(), e.getColumn()); } public void reportWarning(String s) { antlrTool.warning(s, getFilename(), -1, -1); } private boolean lastInRule() throws TokenStreamException { if ( blockNesting==0 && (LA(1)==SEMI || LA(1)==LITERAL_exception || LA(1)==OR) ) { return true; } return false; } private void checkForMissingEndRule(Token label) { if ( label.getColumn()==1 ) { antlrTool.warning("did you forget to terminate previous rule?", getFilename(), label.getLine(), label.getColumn()); } } protected ANTLRParser(TokenBuffer tokenBuf, int k) { super(tokenBuf,k); tokenNames = _tokenNames; } public ANTLRParser(TokenBuffer tokenBuf) { this(tokenBuf,2); } protected ANTLRParser(TokenStream lexer, int k) { super(lexer,k); tokenNames = _tokenNames; } public ANTLRParser(TokenStream lexer) { this(lexer,2); } public ANTLRParser(ParserSharedInputState state) { super(state,2); tokenNames = _tokenNames; } public final void grammar() throws RecognitionException, TokenStreamException { Token n = null; Token h = null; try { // for error handling { _loop4: do { if ((LA(1)==LITERAL_header)) { if ( inputState.guessing==0 ) { n = null; // RK: prevent certain orders of header actions // overwriting eachother. } match(LITERAL_header); { switch ( LA(1)) { case STRING_LITERAL: { n = LT(1); match(STRING_LITERAL); break; } case ACTION: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } h = LT(1); match(ACTION); if ( inputState.guessing==0 ) { // store the header action // FIXME: 'n' should be checked for validity behavior.refHeaderAction(n,h); } } else { break _loop4; } } while (true); } { switch ( LA(1)) { case OPTIONS: { fileOptionsSpec(); break; } case EOF: case ACTION: case DOC_COMMENT: case LITERAL_lexclass: case LITERAL_class: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { _loop7: do { if (((LA(1) >= ACTION && LA(1) <= LITERAL_class))) { classDef(); } else { break _loop7; } } while (true); } match(Token.EOF_TYPE); } catch (RecognitionException ex) { if (inputState.guessing==0) { reportError(ex, "rule grammar trapped:\n"+ex.toString()); consumeUntil(EOF); } else { throw ex; } } } public final void fileOptionsSpec() throws RecognitionException, TokenStreamException { Token idTok; Token value; match(OPTIONS); { _loop18: do { if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) { idTok=id(); match(ASSIGN); value=optionValue(); if ( inputState.guessing==0 ) { behavior.setFileOption(idTok, value,getInputState().filename); } match(SEMI); } else { break _loop18; } } while (true); } match(RCURLY); } public final void classDef() throws RecognitionException, TokenStreamException { Token a = null; Token d = null; String doc=null; try { // for error handling { switch ( LA(1)) { case ACTION: { a = LT(1); match(ACTION); if ( inputState.guessing==0 ) { behavior.refPreambleAction(a); } break; } case DOC_COMMENT: case LITERAL_lexclass: case LITERAL_class: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case DOC_COMMENT: { d = LT(1); match(DOC_COMMENT); if ( inputState.guessing==0 ) { doc=d.getText(); } break; } case LITERAL_lexclass: case LITERAL_class: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { boolean synPredMatched13 = false; if (((LA(1)==LITERAL_lexclass||LA(1)==LITERAL_class) && (LA(2)==TOKEN_REF||LA(2)==RULE_REF))) { int _m13 = mark(); synPredMatched13 = true; inputState.guessing++; try { { switch ( LA(1)) { case LITERAL_lexclass: { match(LITERAL_lexclass); break; } case LITERAL_class: { match(LITERAL_class); id(); match(LITERAL_extends); match(LITERAL_Lexer); break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } } catch (RecognitionException pe) { synPredMatched13 = false; } rewind(_m13); inputState.guessing--; } if ( synPredMatched13 ) { lexerSpec(doc); } else { boolean synPredMatched15 = false; if (((LA(1)==LITERAL_class) && (LA(2)==TOKEN_REF||LA(2)==RULE_REF))) { int _m15 = mark(); synPredMatched15 = true; inputState.guessing++; try { { match(LITERAL_class); id(); match(LITERAL_extends); match(LITERAL_TreeParser); } } catch (RecognitionException pe) { synPredMatched15 = false; } rewind(_m15); inputState.guessing--; } if ( synPredMatched15 ) { treeParserSpec(doc); } else if ((LA(1)==LITERAL_class) && (LA(2)==TOKEN_REF||LA(2)==RULE_REF)) { parserSpec(doc); } else { throw new NoViableAltException(LT(1), getFilename()); } } } rules(); if ( inputState.guessing==0 ) { behavior.endGrammar(); } } catch (RecognitionException ex) { if (inputState.guessing==0) { if ( ex instanceof NoViableAltException ) { NoViableAltException e = (NoViableAltException)ex; // RK: These probably generate inconsequent error messages... // have to see how this comes out.. if ( e.token.getType()==DOC_COMMENT ) { reportError(ex, "JAVADOC comments may only prefix rules and grammars"); } else { reportError(ex, "rule classDef trapped:\n"+ex.toString()); } } else { reportError(ex, "rule classDef trapped:\n"+ex.toString()); } behavior.abortGrammar(); boolean consuming = true; // consume everything until the next class definition or EOF while (consuming) { consume(); switch(LA(1)) { case LITERAL_class: case LITERAL_lexclass: case EOF: consuming = false; break; } } } else { throw ex; } } } public final Token id() throws RecognitionException, TokenStreamException { Token idTok ; Token a = null; Token b = null; idTok = null; switch ( LA(1)) { case TOKEN_REF: { a = LT(1); match(TOKEN_REF); if ( inputState.guessing==0 ) { idTok = a; } break; } case RULE_REF: { b = LT(1); match(RULE_REF); if ( inputState.guessing==0 ) { idTok = b; } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } return idTok ; } public final void lexerSpec( String doc ) throws RecognitionException, TokenStreamException { Token lc = null; Token a = null; Token idTok; String sup=null; { switch ( LA(1)) { case LITERAL_lexclass: { lc = LT(1); match(LITERAL_lexclass); idTok=id(); if ( inputState.guessing==0 ) { antlrTool.warning("lexclass' is deprecated; use 'class X extends Lexer'", getFilename(), lc.getLine(), lc.getColumn()); // System.out.println("warning: line " + lc.getLine() + ": 'lexclass' is deprecated; use 'class X extends Lexer'"); } break; } case LITERAL_class: { match(LITERAL_class); idTok=id(); match(LITERAL_extends); match(LITERAL_Lexer); { switch ( LA(1)) { case LPAREN: { sup=superClass(); break; } case SEMI: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.startLexer(getFilename(), idTok,sup,doc); } match(SEMI); { switch ( LA(1)) { case OPTIONS: { lexerOptionsSpec(); break; } case ACTION: case DOC_COMMENT: case TOKENS: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.endOptions(); } { switch ( LA(1)) { case TOKENS: { tokensSpec(); break; } case ACTION: case DOC_COMMENT: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case ACTION: { a = LT(1); match(ACTION); if ( inputState.guessing==0 ) { behavior.refMemberAction(a); } break; } case DOC_COMMENT: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } } public final void treeParserSpec( String doc ) throws RecognitionException, TokenStreamException { Token a = null; Token idTok; String sup=null; match(LITERAL_class); idTok=id(); match(LITERAL_extends); match(LITERAL_TreeParser); { switch ( LA(1)) { case LPAREN: { sup=superClass(); break; } case SEMI: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.startTreeWalker(getFilename(), idTok,sup,doc); } match(SEMI); { switch ( LA(1)) { case OPTIONS: { treeParserOptionsSpec(); break; } case ACTION: case DOC_COMMENT: case TOKENS: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.endOptions(); } { switch ( LA(1)) { case TOKENS: { tokensSpec(); break; } case ACTION: case DOC_COMMENT: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case ACTION: { a = LT(1); match(ACTION); if ( inputState.guessing==0 ) { behavior.refMemberAction(a); } break; } case DOC_COMMENT: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } } public final void parserSpec( String doc ) throws RecognitionException, TokenStreamException { Token a = null; Token idTok; String sup=null; match(LITERAL_class); idTok=id(); { switch ( LA(1)) { case LITERAL_extends: { match(LITERAL_extends); match(LITERAL_Parser); { switch ( LA(1)) { case LPAREN: { sup=superClass(); break; } case SEMI: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } break; } case SEMI: { if ( inputState.guessing==0 ) { antlrTool.warning("use 'class X extends Parser'", getFilename(), idTok.getLine(), idTok.getColumn()); // System.out.println("warning: line " + // idTok.getLine() + ": use 'class X extends Parser'"); } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.startParser(getFilename(), idTok, sup, doc); } match(SEMI); { switch ( LA(1)) { case OPTIONS: { parserOptionsSpec(); break; } case ACTION: case DOC_COMMENT: case TOKENS: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.endOptions(); } { switch ( LA(1)) { case TOKENS: { tokensSpec(); break; } case ACTION: case DOC_COMMENT: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case ACTION: { a = LT(1); match(ACTION); if ( inputState.guessing==0 ) { behavior.refMemberAction(a); } break; } case DOC_COMMENT: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } } public final void rules() throws RecognitionException, TokenStreamException { { int _cnt68=0; _loop68: do { if ((_tokenSet_0.member(LA(1))) && (_tokenSet_1.member(LA(2)))) { rule(); } else { if ( _cnt68>=1 ) { break _loop68; } else {throw new NoViableAltException(LT(1), getFilename());} } _cnt68++; } while (true); } } public final Token optionValue() throws RecognitionException, TokenStreamException { Token retval ; Token sl = null; Token cl = null; Token il = null; retval = null; switch ( LA(1)) { case TOKEN_REF: case RULE_REF: { retval=qualifiedID(); break; } case STRING_LITERAL: { sl = LT(1); match(STRING_LITERAL); if ( inputState.guessing==0 ) { retval = sl; } break; } case CHAR_LITERAL: { cl = LT(1); match(CHAR_LITERAL); if ( inputState.guessing==0 ) { retval = cl; } break; } case INT: { il = LT(1); match(INT); if ( inputState.guessing==0 ) { retval = il; } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } return retval ; } public final void parserOptionsSpec() throws RecognitionException, TokenStreamException { Token idTok; Token value; match(OPTIONS); { _loop21: do { if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) { idTok=id(); match(ASSIGN); value=optionValue(); if ( inputState.guessing==0 ) { behavior.setGrammarOption(idTok, value); } match(SEMI); } else { break _loop21; } } while (true); } match(RCURLY); } public final void treeParserOptionsSpec() throws RecognitionException, TokenStreamException { Token idTok; Token value; match(OPTIONS); { _loop24: do { if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) { idTok=id(); match(ASSIGN); value=optionValue(); if ( inputState.guessing==0 ) { behavior.setGrammarOption(idTok, value); } match(SEMI); } else { break _loop24; } } while (true); } match(RCURLY); } public final void lexerOptionsSpec() throws RecognitionException, TokenStreamException { Token idTok; Token value; BitSet b; match(OPTIONS); { _loop27: do { switch ( LA(1)) { case LITERAL_charVocabulary: { match(LITERAL_charVocabulary); match(ASSIGN); b=charSet(); match(SEMI); if ( inputState.guessing==0 ) { behavior.setCharVocabulary(b); } break; } case TOKEN_REF: case RULE_REF: { idTok=id(); match(ASSIGN); value=optionValue(); if ( inputState.guessing==0 ) { behavior.setGrammarOption(idTok, value); } match(SEMI); break; } default: { break _loop27; } } } while (true); } match(RCURLY); } public final BitSet charSet() throws RecognitionException, TokenStreamException { BitSet b ; b = null; BitSet tmpSet = null; b=setBlockElement(); { _loop34: do { if ((LA(1)==OR)) { match(OR); tmpSet=setBlockElement(); if ( inputState.guessing==0 ) { b.orInPlace(tmpSet); } } else { break _loop34; } } while (true); } return b ; } public final void subruleOptionsSpec() throws RecognitionException, TokenStreamException { Token idTok; Token value; match(OPTIONS); { _loop30: do { if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) { idTok=id(); match(ASSIGN); value=optionValue(); if ( inputState.guessing==0 ) { behavior.setSubruleOption(idTok, value); } match(SEMI); } else { break _loop30; } } while (true); } match(RCURLY); } /** Match a.b.c.d qualified ids; WILDCARD here is overloaded as * id separator; that is, I need a reference to the '.' token. */ public final Token qualifiedID() throws RecognitionException, TokenStreamException { Token qidTok=null; StringBuffer buf = new StringBuffer(30); Token a; a=id(); if ( inputState.guessing==0 ) { buf.append(a.getText()); } { _loop144: do { if ((LA(1)==WILDCARD)) { match(WILDCARD); a=id(); if ( inputState.guessing==0 ) { buf.append('.'); buf.append(a.getText()); } } else { break _loop144; } } while (true); } if ( inputState.guessing==0 ) { // can use either TOKEN_REF or RULE_REF; should // really create a QID or something instead. qidTok = new CommonToken(TOKEN_REF, buf.toString()); qidTok.setLine(a.getLine()); } return qidTok; } public final BitSet setBlockElement() throws RecognitionException, TokenStreamException { BitSet b ; Token c1 = null; Token c2 = null; b = null; int rangeMin = 0; c1 = LT(1); match(CHAR_LITERAL); if ( inputState.guessing==0 ) { rangeMin = ANTLRLexer.tokenTypeForCharLiteral(c1.getText()); b = BitSet.of(rangeMin); } { switch ( LA(1)) { case RANGE: { match(RANGE); c2 = LT(1); match(CHAR_LITERAL); if ( inputState.guessing==0 ) { int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(c2.getText()); if (rangeMax < rangeMin) { antlrTool.error("Malformed range line ", getFilename(), c1.getLine(), c1.getColumn()); } for (int i = rangeMin+1; i <= rangeMax; i++) { b.add(i); } } break; } case SEMI: case OR: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } return b ; } public final void tokensSpec() throws RecognitionException, TokenStreamException { Token t1 = null; Token s1 = null; Token s3 = null; match(TOKENS); { int _cnt43=0; _loop43: do { if ((LA(1)==STRING_LITERAL||LA(1)==TOKEN_REF)) { { switch ( LA(1)) { case TOKEN_REF: { if ( inputState.guessing==0 ) { s1=null; } t1 = LT(1); match(TOKEN_REF); { switch ( LA(1)) { case ASSIGN: { match(ASSIGN); s1 = LT(1); match(STRING_LITERAL); break; } case SEMI: case OPEN_ELEMENT_OPTION: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.defineToken(t1, s1); } { switch ( LA(1)) { case OPEN_ELEMENT_OPTION: { tokensSpecOptions(t1); break; } case SEMI: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } break; } case STRING_LITERAL: { s3 = LT(1); match(STRING_LITERAL); if ( inputState.guessing==0 ) { behavior.defineToken(null, s3); } { switch ( LA(1)) { case OPEN_ELEMENT_OPTION: { tokensSpecOptions(s3); break; } case SEMI: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } match(SEMI); } else { if ( _cnt43>=1 ) { break _loop43; } else {throw new NoViableAltException(LT(1), getFilename());} } _cnt43++; } while (true); } match(RCURLY); } public final void tokensSpecOptions( Token t ) throws RecognitionException, TokenStreamException { Token o=null, v=null; match(OPEN_ELEMENT_OPTION); o=id(); match(ASSIGN); v=optionValue(); if ( inputState.guessing==0 ) { behavior.refTokensSpecElementOption(t,o,v); } { _loop46: do { if ((LA(1)==SEMI)) { match(SEMI); o=id(); match(ASSIGN); v=optionValue(); if ( inputState.guessing==0 ) { behavior.refTokensSpecElementOption(t,o,v); } } else { break _loop46; } } while (true); } match(CLOSE_ELEMENT_OPTION); } public final String superClass() throws RecognitionException, TokenStreamException { String sup; sup=null; match(LPAREN); if ( inputState.guessing==0 ) { sup = LT(1).getText(); sup = StringUtils.stripFrontBack(sup, "\"", "\""); } { match(STRING_LITERAL); } match(RPAREN); return sup; } public final void rule() throws RecognitionException, TokenStreamException { Token d = null; Token p1 = null; Token p2 = null; Token p3 = null; Token aa = null; Token rt = null; Token a = null; String access="public"; Token idTok; String doc=null; boolean ruleAutoGen = true; blockNesting = -1; // block increments, so -1 to make rule at level 0 { switch ( LA(1)) { case DOC_COMMENT: { d = LT(1); match(DOC_COMMENT); if ( inputState.guessing==0 ) { doc=d.getText(); } break; } case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case LITERAL_protected: { p1 = LT(1); match(LITERAL_protected); if ( inputState.guessing==0 ) { access=p1.getText(); } break; } case LITERAL_public: { p2 = LT(1); match(LITERAL_public); if ( inputState.guessing==0 ) { access=p2.getText(); } break; } case LITERAL_private: { p3 = LT(1); match(LITERAL_private); if ( inputState.guessing==0 ) { access=p3.getText(); } break; } case TOKEN_REF: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } idTok=id(); { switch ( LA(1)) { case BANG: { match(BANG); if ( inputState.guessing==0 ) { ruleAutoGen = false; } break; } case ACTION: case OPTIONS: case ARG_ACTION: case LITERAL_returns: case COLON: case LITERAL_throws: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.defineRuleName(idTok, access, ruleAutoGen, doc); } { switch ( LA(1)) { case ARG_ACTION: { aa = LT(1); match(ARG_ACTION); if ( inputState.guessing==0 ) { behavior.refArgAction(aa); } break; } case ACTION: case OPTIONS: case LITERAL_returns: case COLON: case LITERAL_throws: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case LITERAL_returns: { match(LITERAL_returns); rt = LT(1); match(ARG_ACTION); if ( inputState.guessing==0 ) { behavior.refReturnAction(rt); } break; } case ACTION: case OPTIONS: case COLON: case LITERAL_throws: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case LITERAL_throws: { throwsSpec(); break; } case ACTION: case OPTIONS: case COLON: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case OPTIONS: { ruleOptionsSpec(); break; } case ACTION: case COLON: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case ACTION: { a = LT(1); match(ACTION); if ( inputState.guessing==0 ) { behavior.refInitAction(a); } break; } case COLON: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } match(COLON); block(); match(SEMI); { switch ( LA(1)) { case LITERAL_exception: { exceptionGroup(); break; } case EOF: case ACTION: case DOC_COMMENT: case LITERAL_lexclass: case LITERAL_class: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.endRule(idTok.getText()); } } public final void throwsSpec() throws RecognitionException, TokenStreamException { String t=null; Token a,b; match(LITERAL_throws); a=id(); if ( inputState.guessing==0 ) { t=a.getText(); } { _loop84: do { if ((LA(1)==COMMA)) { match(COMMA); b=id(); if ( inputState.guessing==0 ) { t+=","+b.getText(); } } else { break _loop84; } } while (true); } if ( inputState.guessing==0 ) { behavior.setUserExceptions(t); } } public final void ruleOptionsSpec() throws RecognitionException, TokenStreamException { Token idTok; Token value; match(OPTIONS); { _loop81: do { if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) { idTok=id(); match(ASSIGN); value=optionValue(); if ( inputState.guessing==0 ) { behavior.setRuleOption(idTok, value); } match(SEMI); } else { break _loop81; } } while (true); } match(RCURLY); } public final void block() throws RecognitionException, TokenStreamException { if ( inputState.guessing==0 ) { blockNesting++; } alternative(); { _loop87: do { if ((LA(1)==OR)) { match(OR); alternative(); } else { break _loop87; } } while (true); } if ( inputState.guessing==0 ) { blockNesting--; } } public final void exceptionGroup() throws RecognitionException, TokenStreamException { if ( inputState.guessing==0 ) { behavior.beginExceptionGroup(); } { int _cnt95=0; _loop95: do { if ((LA(1)==LITERAL_exception)) { exceptionSpec(); } else { if ( _cnt95>=1 ) { break _loop95; } else {throw new NoViableAltException(LT(1), getFilename());} } _cnt95++; } while (true); } if ( inputState.guessing==0 ) { behavior.endExceptionGroup(); } } public final void alternative() throws RecognitionException, TokenStreamException { boolean altAutoGen = true; { switch ( LA(1)) { case BANG: { match(BANG); if ( inputState.guessing==0 ) { altAutoGen=false; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case LPAREN: case RPAREN: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.beginAlt(altAutoGen); } { _loop91: do { if ((_tokenSet_2.member(LA(1)))) { element(); } else { break _loop91; } } while (true); } { switch ( LA(1)) { case LITERAL_exception: { exceptionSpecNoLabel(); break; } case SEMI: case OR: case RPAREN: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.endAlt(); } } public final void element() throws RecognitionException, TokenStreamException { elementNoOptionSpec(); { switch ( LA(1)) { case OPEN_ELEMENT_OPTION: { elementOptionSpec(); break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case LPAREN: case RPAREN: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } } public final void exceptionSpecNoLabel() throws RecognitionException, TokenStreamException { match(LITERAL_exception); if ( inputState.guessing==0 ) { behavior.beginExceptionSpec(null); } { _loop102: do { if ((LA(1)==LITERAL_catch)) { exceptionHandler(); } else { break _loop102; } } while (true); } if ( inputState.guessing==0 ) { behavior.endExceptionSpec(); } } public final void exceptionSpec() throws RecognitionException, TokenStreamException { Token aa = null; Token labelAction = null; match(LITERAL_exception); { switch ( LA(1)) { case ARG_ACTION: { aa = LT(1); match(ARG_ACTION); if ( inputState.guessing==0 ) { labelAction = aa; } break; } case EOF: case ACTION: case DOC_COMMENT: case LITERAL_lexclass: case LITERAL_class: case TOKEN_REF: case LITERAL_protected: case LITERAL_public: case LITERAL_private: case LITERAL_exception: case LITERAL_catch: case RULE_REF: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.beginExceptionSpec(labelAction); } { _loop99: do { if ((LA(1)==LITERAL_catch)) { exceptionHandler(); } else { break _loop99; } } while (true); } if ( inputState.guessing==0 ) { behavior.endExceptionSpec(); } } public final void exceptionHandler() throws RecognitionException, TokenStreamException { Token a1 = null; Token a2 = null; Token exType; Token exName; match(LITERAL_catch); a1 = LT(1); match(ARG_ACTION); a2 = LT(1); match(ACTION); if ( inputState.guessing==0 ) { behavior.refExceptionHandler(a1, a2); } } public final void elementNoOptionSpec() throws RecognitionException, TokenStreamException { Token rr = null; Token aa = null; Token tr = null; Token aa2 = null; Token r2 = null; Token aa3 = null; Token a = null; Token p = null; Token label = null; Token assignId = null; Token args = null; int autoGen = GrammarElement.AUTO_GEN_NONE; switch ( LA(1)) { case ACTION: { a = LT(1); match(ACTION); if ( inputState.guessing==0 ) { behavior.refAction(a); } break; } case SEMPRED: { p = LT(1); match(SEMPRED); if ( inputState.guessing==0 ) { behavior.refSemPred(p); } break; } case TREE_BEGIN: { tree(); break; } default: if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==ASSIGN)) { assignId=id(); match(ASSIGN); { if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==COLON)) { label=id(); match(COLON); if ( inputState.guessing==0 ) { checkForMissingEndRule(label); } } else if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (_tokenSet_3.member(LA(2)))) { } else { throw new NoViableAltException(LT(1), getFilename()); } } { switch ( LA(1)) { case RULE_REF: { rr = LT(1); match(RULE_REF); { switch ( LA(1)) { case ARG_ACTION: { aa = LT(1); match(ARG_ACTION); if ( inputState.guessing==0 ) { args=aa; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case BANG: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case BANG: { match(BANG); if ( inputState.guessing==0 ) { autoGen = GrammarElement.AUTO_GEN_BANG; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.refRule(assignId, rr, label, args, autoGen); } break; } case TOKEN_REF: { tr = LT(1); match(TOKEN_REF); { switch ( LA(1)) { case ARG_ACTION: { aa2 = LT(1); match(ARG_ACTION); if ( inputState.guessing==0 ) { args=aa2; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.refToken(assignId, tr, label, args, false, autoGen, lastInRule()); } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } } else if ((_tokenSet_4.member(LA(1))) && (_tokenSet_5.member(LA(2)))) { { if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==COLON)) { label=id(); match(COLON); if ( inputState.guessing==0 ) { checkForMissingEndRule(label); } } else if ((_tokenSet_4.member(LA(1))) && (_tokenSet_6.member(LA(2)))) { } else { throw new NoViableAltException(LT(1), getFilename()); } } { switch ( LA(1)) { case RULE_REF: { r2 = LT(1); match(RULE_REF); { switch ( LA(1)) { case ARG_ACTION: { aa3 = LT(1); match(ARG_ACTION); if ( inputState.guessing==0 ) { args=aa3; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case BANG: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case BANG: { match(BANG); if ( inputState.guessing==0 ) { autoGen = GrammarElement.AUTO_GEN_BANG; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.refRule(assignId, r2, label, args, autoGen); } break; } case NOT_OP: { match(NOT_OP); { switch ( LA(1)) { case CHAR_LITERAL: case TOKEN_REF: { notTerminal(label); break; } case LPAREN: { ebnf(label,true); break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } break; } case LPAREN: { ebnf(label,false); break; } default: if ((LA(1)==STRING_LITERAL||LA(1)==CHAR_LITERAL||LA(1)==TOKEN_REF) && (LA(2)==RANGE)) { range(label); } else if ((_tokenSet_7.member(LA(1))) && (_tokenSet_8.member(LA(2)))) { terminal(label); } else { throw new NoViableAltException(LT(1), getFilename()); } } } } else { throw new NoViableAltException(LT(1), getFilename()); } } } public final void elementOptionSpec() throws RecognitionException, TokenStreamException { Token o=null, v=null; match(OPEN_ELEMENT_OPTION); o=id(); match(ASSIGN); v=optionValue(); if ( inputState.guessing==0 ) { behavior.refElementOption(o,v); } { _loop108: do { if ((LA(1)==SEMI)) { match(SEMI); o=id(); match(ASSIGN); v=optionValue(); if ( inputState.guessing==0 ) { behavior.refElementOption(o,v); } } else { break _loop108; } } while (true); } match(CLOSE_ELEMENT_OPTION); } public final void range( Token label ) throws RecognitionException, TokenStreamException { Token crLeft = null; Token crRight = null; Token t = null; Token u = null; Token v = null; Token w = null; Token trLeft=null; Token trRight=null; int autoGen=GrammarElement.AUTO_GEN_NONE; switch ( LA(1)) { case CHAR_LITERAL: { crLeft = LT(1); match(CHAR_LITERAL); match(RANGE); crRight = LT(1); match(CHAR_LITERAL); { switch ( LA(1)) { case BANG: { match(BANG); if ( inputState.guessing==0 ) { autoGen = GrammarElement.AUTO_GEN_BANG; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.refCharRange(crLeft, crRight, label, autoGen, lastInRule()); } break; } case STRING_LITERAL: case TOKEN_REF: { { switch ( LA(1)) { case TOKEN_REF: { t = LT(1); match(TOKEN_REF); if ( inputState.guessing==0 ) { trLeft=t; } break; } case STRING_LITERAL: { u = LT(1); match(STRING_LITERAL); if ( inputState.guessing==0 ) { trLeft=u; } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } match(RANGE); { switch ( LA(1)) { case TOKEN_REF: { v = LT(1); match(TOKEN_REF); if ( inputState.guessing==0 ) { trRight=v; } break; } case STRING_LITERAL: { w = LT(1); match(STRING_LITERAL); if ( inputState.guessing==0 ) { trRight=w; } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } autoGen=ast_type_spec(); if ( inputState.guessing==0 ) { behavior.refTokenRange(trLeft, trRight, label, autoGen, lastInRule()); } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } public final void terminal( Token label ) throws RecognitionException, TokenStreamException { Token cl = null; Token tr = null; Token aa = null; Token sl = null; Token wi = null; int autoGen=GrammarElement.AUTO_GEN_NONE; Token args=null; switch ( LA(1)) { case CHAR_LITERAL: { cl = LT(1); match(CHAR_LITERAL); { switch ( LA(1)) { case BANG: { match(BANG); if ( inputState.guessing==0 ) { autoGen = GrammarElement.AUTO_GEN_BANG; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.refCharLiteral(cl, label, false, autoGen, lastInRule()); } break; } case TOKEN_REF: { tr = LT(1); match(TOKEN_REF); autoGen=ast_type_spec(); { switch ( LA(1)) { case ARG_ACTION: { aa = LT(1); match(ARG_ACTION); if ( inputState.guessing==0 ) { args=aa; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.refToken(null, tr, label, args, false, autoGen, lastInRule()); } break; } case STRING_LITERAL: { sl = LT(1); match(STRING_LITERAL); autoGen=ast_type_spec(); if ( inputState.guessing==0 ) { behavior.refStringLiteral(sl, label, autoGen, lastInRule()); } break; } case WILDCARD: { wi = LT(1); match(WILDCARD); autoGen=ast_type_spec(); if ( inputState.guessing==0 ) { behavior.refWildcard(wi, label, autoGen); } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } public final void notTerminal( Token label ) throws RecognitionException, TokenStreamException { Token cl = null; Token tr = null; int autoGen=GrammarElement.AUTO_GEN_NONE; switch ( LA(1)) { case CHAR_LITERAL: { cl = LT(1); match(CHAR_LITERAL); { switch ( LA(1)) { case BANG: { match(BANG); if ( inputState.guessing==0 ) { autoGen = GrammarElement.AUTO_GEN_BANG; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.refCharLiteral(cl, label, true, autoGen, lastInRule()); } break; } case TOKEN_REF: { tr = LT(1); match(TOKEN_REF); autoGen=ast_type_spec(); if ( inputState.guessing==0 ) { behavior.refToken(null, tr, label, null, true, autoGen, lastInRule()); } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } public final void ebnf( Token label, boolean not ) throws RecognitionException, TokenStreamException { Token lp = null; Token aa = null; Token ab = null; lp = LT(1); match(LPAREN); if ( inputState.guessing==0 ) { behavior.beginSubRule(label, lp, not); } { if ((LA(1)==OPTIONS)) { subruleOptionsSpec(); { switch ( LA(1)) { case ACTION: { aa = LT(1); match(ACTION); if ( inputState.guessing==0 ) { behavior.refInitAction(aa); } break; } case COLON: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } match(COLON); } else if ((LA(1)==ACTION) && (LA(2)==COLON)) { ab = LT(1); match(ACTION); if ( inputState.guessing==0 ) { behavior.refInitAction(ab); } match(COLON); } else if ((_tokenSet_9.member(LA(1))) && (_tokenSet_10.member(LA(2)))) { } else { throw new NoViableAltException(LT(1), getFilename()); } } block(); match(RPAREN); { switch ( LA(1)) { case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case BANG: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case QUESTION: case STAR: case PLUS: case WILDCARD: { { switch ( LA(1)) { case QUESTION: { match(QUESTION); if ( inputState.guessing==0 ) { behavior.optionalSubRule(); } break; } case STAR: { match(STAR); if ( inputState.guessing==0 ) { behavior.zeroOrMoreSubRule(); } break; } case PLUS: { match(PLUS); if ( inputState.guessing==0 ) { behavior.oneOrMoreSubRule(); } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case BANG: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case BANG: { match(BANG); if ( inputState.guessing==0 ) { behavior.noASTSubRule(); } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } break; } case IMPLIES: { match(IMPLIES); if ( inputState.guessing==0 ) { behavior.synPred(); } break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( inputState.guessing==0 ) { behavior.endSubRule(); } } public final void tree() throws RecognitionException, TokenStreamException { Token lp = null; lp = LT(1); match(TREE_BEGIN); if ( inputState.guessing==0 ) { behavior.beginTree(lp); } rootNode(); if ( inputState.guessing==0 ) { behavior.beginChildList(); } { int _cnt122=0; _loop122: do { if ((_tokenSet_2.member(LA(1)))) { element(); } else { if ( _cnt122>=1 ) { break _loop122; } else {throw new NoViableAltException(LT(1), getFilename());} } _cnt122++; } while (true); } if ( inputState.guessing==0 ) { behavior.endChildList(); } match(RPAREN); if ( inputState.guessing==0 ) { behavior.endTree(); } } public final void rootNode() throws RecognitionException, TokenStreamException { Token label = null; { if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==COLON)) { label=id(); match(COLON); if ( inputState.guessing==0 ) { checkForMissingEndRule(label); } } else if ((_tokenSet_7.member(LA(1))) && (_tokenSet_11.member(LA(2)))) { } else { throw new NoViableAltException(LT(1), getFilename()); } } terminal(label); } public final int ast_type_spec() throws RecognitionException, TokenStreamException { int autoGen ; autoGen = GrammarElement.AUTO_GEN_NONE; { switch ( LA(1)) { case CARET: { match(CARET); if ( inputState.guessing==0 ) { autoGen = GrammarElement.AUTO_GEN_CARET; } break; } case BANG: { match(BANG); if ( inputState.guessing==0 ) { autoGen = GrammarElement.AUTO_GEN_BANG; } break; } case STRING_LITERAL: case ACTION: case SEMI: case CHAR_LITERAL: case OR: case TOKEN_REF: case OPEN_ELEMENT_OPTION: case LPAREN: case RPAREN: case ARG_ACTION: case LITERAL_exception: case RULE_REF: case NOT_OP: case SEMPRED: case TREE_BEGIN: case WILDCARD: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } return autoGen ; } public static final String[] _tokenNames = { "<0>", "EOF", "<2>", "NULL_TREE_LOOKAHEAD", "\"tokens\"", "\"header\"", "STRING_LITERAL", "ACTION", "DOC_COMMENT", "\"lexclass\"", "\"class\"", "\"extends\"", "\"Lexer\"", "\"TreeParser\"", "OPTIONS", "ASSIGN", "SEMI", "RCURLY", "\"charVocabulary\"", "CHAR_LITERAL", "INT", "OR", "RANGE", "TOKENS", "TOKEN_REF", "OPEN_ELEMENT_OPTION", "CLOSE_ELEMENT_OPTION", "LPAREN", "RPAREN", "\"Parser\"", "\"protected\"", "\"public\"", "\"private\"", "BANG", "ARG_ACTION", "\"returns\"", "COLON", "\"throws\"", "COMMA", "\"exception\"", "\"catch\"", "RULE_REF", "NOT_OP", "SEMPRED", "TREE_BEGIN", "QUESTION", "STAR", "PLUS", "IMPLIES", "CARET", "WILDCARD", "\"options\"", "WS", "COMMENT", "SL_COMMENT", "ML_COMMENT", "ESC", "DIGIT", "XDIGIT", "NESTED_ARG_ACTION", "NESTED_ACTION", "WS_LOOP", "INTERNAL_RULE_REF", "WS_OPT" }; private static final long[] mk_tokenSet_0() { long[] data = { 2206556225792L, 0L}; return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = { 2472844214400L, 0L}; return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); private static final long[] mk_tokenSet_2() { long[] data = { 1158885407195328L, 0L}; return data; } public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2()); private static final long[] mk_tokenSet_3() { long[] data = { 1159461236965568L, 0L}; return data; } public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3()); private static final long[] mk_tokenSet_4() { long[] data = { 1132497128128576L, 0L}; return data; } public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4()); private static final long[] mk_tokenSet_5() { long[] data = { 1722479914074304L, 0L}; return data; } public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5()); private static final long[] mk_tokenSet_6() { long[] data = { 1722411194597568L, 0L}; return data; } public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6()); private static final long[] mk_tokenSet_7() { long[] data = { 1125899924144192L, 0L}; return data; } public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7()); private static final long[] mk_tokenSet_8() { long[] data = { 1722411190386880L, 0L}; return data; } public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8()); private static final long[] mk_tokenSet_9() { long[] data = { 1159444023476416L, 0L}; return data; } public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9()); private static final long[] mk_tokenSet_10() { long[] data = { 2251345007067328L, 0L}; return data; } public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10()); private static final long[] mk_tokenSet_11() { long[] data = { 1721861130420416L, 0L}; return data; } public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ANTLRStringBuffer.java000066400000000000000000000041161161462365500251640ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ANTLRStringBuffer.java#1 $ */ // Implementation of a StringBuffer-like object that does not have the // unfortunate side-effect of creating Strings with very large buffers. public class ANTLRStringBuffer { protected char[] buffer = null; protected int length = 0; // length and also where to store next char public ANTLRStringBuffer() { buffer = new char[50]; } public ANTLRStringBuffer(int n) { buffer = new char[n]; } public final void append(char c) { // This would normally be an "ensureCapacity" method, but inlined // here for speed. if (length >= buffer.length) { // Compute a new length that is at least double old length int newSize = buffer.length; while (length >= newSize) { newSize *= 2; } // Allocate new array and copy buffer char[] newBuffer = new char[newSize]; for (int i = 0; i < length; i++) { newBuffer[i] = buffer[i]; } buffer = newBuffer; } buffer[length] = c; length++; } public final void append(String s) { for (int i = 0; i < s.length(); i++) { append(s.charAt(i)); } } public final char charAt(int index) { return buffer[index]; } final public char[] getBuffer() { return buffer; } public final int length() { return length; } public final void setCharAt(int index, char ch) { buffer[index] = ch; } public final void setLength(int newLength) { if (newLength < length) { length = newLength; } else { while (newLength > length) { append('\0'); } } } public final String toString() { return new String(buffer, 0, length); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ANTLRTokdefLexer.java000066400000000000000000000372041161462365500250040ustar00rootroot00000000000000// $ANTLR : "tokdef.g" -> "ANTLRTokdefLexer.java"$ package antlr; import java.io.InputStream; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.TokenStreamRecognitionException; import antlr.CharStreamException; import antlr.CharStreamIOException; import antlr.ANTLRException; import java.io.Reader; import java.util.Hashtable; import antlr.CharScanner; import antlr.InputBuffer; import antlr.ByteBuffer; import antlr.CharBuffer; import antlr.Token; import antlr.CommonToken; import antlr.RecognitionException; import antlr.NoViableAltForCharException; import antlr.MismatchedCharException; import antlr.TokenStream; import antlr.ANTLRHashString; import antlr.LexerSharedInputState; import antlr.collections.impl.BitSet; import antlr.SemanticException; public class ANTLRTokdefLexer extends antlr.CharScanner implements ANTLRTokdefParserTokenTypes, TokenStream { public ANTLRTokdefLexer(InputStream in) { this(new ByteBuffer(in)); } public ANTLRTokdefLexer(Reader in) { this(new CharBuffer(in)); } public ANTLRTokdefLexer(InputBuffer ib) { this(new LexerSharedInputState(ib)); } public ANTLRTokdefLexer(LexerSharedInputState state) { super(state); caseSensitiveLiterals = true; setCaseSensitive(true); literals = new Hashtable(); } public Token nextToken() throws TokenStreamException { Token theRetToken=null; tryAgain: for (;;) { Token _token = null; int _ttype = Token.INVALID_TYPE; resetText(); try { // for char stream error handling try { // for lexical error handling switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(true); theRetToken=_returnToken; break; } case '(': { mLPAREN(true); theRetToken=_returnToken; break; } case ')': { mRPAREN(true); theRetToken=_returnToken; break; } case '=': { mASSIGN(true); theRetToken=_returnToken; break; } case '"': { mSTRING(true); theRetToken=_returnToken; break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mID(true); theRetToken=_returnToken; break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mINT(true); theRetToken=_returnToken; break; } default: if ((LA(1)=='/') && (LA(2)=='/')) { mSL_COMMENT(true); theRetToken=_returnToken; } else if ((LA(1)=='/') && (LA(2)=='*')) { mML_COMMENT(true); theRetToken=_returnToken; } else { if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);} else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } } if ( _returnToken==null ) continue tryAgain; // found SKIP token _ttype = _returnToken.getType(); _returnToken.setType(_ttype); return _returnToken; } catch (RecognitionException e) { throw new TokenStreamRecognitionException(e); } } catch (CharStreamException cse) { if ( cse instanceof CharStreamIOException ) { throw new TokenStreamIOException(((CharStreamIOException)cse).io); } else { throw new TokenStreamException(cse.getMessage()); } } } } public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WS; int _saveIndex; { switch ( LA(1)) { case ' ': { match(' '); break; } case '\t': { match('\t'); break; } case '\r': { match('\r'); { if ((LA(1)=='\n')) { match('\n'); } else { } } newline(); break; } case '\n': { match('\n'); newline(); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _ttype = Token.SKIP; if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SL_COMMENT; int _saveIndex; match("//"); { _loop234: do { if ((_tokenSet_0.member(LA(1)))) { { match(_tokenSet_0); } } else { break _loop234; } } while (true); } { switch ( LA(1)) { case '\n': { match('\n'); break; } case '\r': { match('\r'); { if ((LA(1)=='\n')) { match('\n'); } else { } } break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _ttype = Token.SKIP; newline(); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ML_COMMENT; int _saveIndex; match("/*"); { _loop239: do { if ((LA(1)=='*') && (_tokenSet_1.member(LA(2)))) { match('*'); matchNot('/'); } else if ((LA(1)=='\n')) { match('\n'); newline(); } else if ((_tokenSet_2.member(LA(1)))) { matchNot('*'); } else { break _loop239; } } while (true); } match("*/"); _ttype = Token.SKIP; if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mLPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = LPAREN; int _saveIndex; match('('); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mRPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = RPAREN; int _saveIndex; match(')'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ASSIGN; int _saveIndex; match('='); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STRING; int _saveIndex; match('"'); { _loop245: do { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_3.member(LA(1)))) { matchNot('"'); } else { break _loop245; } } while (true); } match('"'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ESC; int _saveIndex; match('\\'); { switch ( LA(1)) { case 'n': { match('n'); break; } case 'r': { match('r'); break; } case 't': { match('t'); break; } case 'b': { match('b'); break; } case 'f': { match('f'); break; } case '"': { match('"'); break; } case '\'': { match('\''); break; } case '\\': { match('\\'); break; } case '0': case '1': case '2': case '3': { { matchRange('0','3'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mDIGIT(false); { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } case '4': case '5': case '6': case '7': { { matchRange('4','7'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } case 'u': { match('u'); mXDIGIT(false); mXDIGIT(false); mXDIGIT(false); mXDIGIT(false); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = DIGIT; int _saveIndex; matchRange('0','9'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mXDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = XDIGIT; int _saveIndex; switch ( LA(1)) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': { matchRange('a','f'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': { matchRange('A','F'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID; int _saveIndex; { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop258: do { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '_': { match('_'); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } default: { break _loop258; } } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = INT; int _saveIndex; { int _cnt261=0; _loop261: do { if (((LA(1) >= '0' && LA(1) <= '9'))) { mDIGIT(false); } else { if ( _cnt261>=1 ) { break _loop261; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt261++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } private static final long[] mk_tokenSet_0() { long[] data = new long[8]; data[0]=-9224L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = new long[8]; data[0]=-140737488355336L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); private static final long[] mk_tokenSet_2() { long[] data = new long[8]; data[0]=-4398046512136L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2()); private static final long[] mk_tokenSet_3() { long[] data = new long[8]; data[0]=-17179869192L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ANTLRTokdefParser.java000066400000000000000000000133651161462365500251630ustar00rootroot00000000000000// $ANTLR : "tokdef.g" -> "ANTLRTokdefParser.java"$ package antlr; import antlr.TokenBuffer; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.ANTLRException; import antlr.LLkParser; import antlr.Token; import antlr.TokenStream; import antlr.RecognitionException; import antlr.NoViableAltException; import antlr.MismatchedTokenException; import antlr.SemanticException; import antlr.ParserSharedInputState; import antlr.collections.impl.BitSet; /** Simple lexer/parser for reading token definition files in support of the import/export vocab option for grammars. */ public class ANTLRTokdefParser extends antlr.LLkParser implements ANTLRTokdefParserTokenTypes { // This chunk of error reporting code provided by Brian Smith private antlr.Tool antlrTool; /** In order to make it so existing subclasses don't break, we won't require * that the antlr.Tool instance be passed as a constructor element. Instead, * the antlr.Tool instance should register itself via {@link #initTool(antlr.Tool)} * @throws IllegalStateException if a tool has already been registered * @since 2.7.2 */ public void setTool(antlr.Tool tool) { if (antlrTool == null) { antlrTool = tool; } else { throw new IllegalStateException("antlr.Tool already registered"); } } /** @since 2.7.2 */ protected antlr.Tool getTool() { return antlrTool; } /** Delegates the error message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportError(String s) { if (getTool() != null) { getTool().error(s, getFilename(), -1, -1); } else { super.reportError(s); } } /** Delegates the error message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportError(RecognitionException e) { if (getTool() != null) { getTool().error(e.getErrorMessage(), e.getFilename(), e.getLine(), e.getColumn()); } else { super.reportError(e); } } /** Delegates the warning message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportWarning(String s) { if (getTool() != null) { getTool().warning(s, getFilename(), -1, -1); } else { super.reportWarning(s); } } protected ANTLRTokdefParser(TokenBuffer tokenBuf, int k) { super(tokenBuf,k); tokenNames = _tokenNames; } public ANTLRTokdefParser(TokenBuffer tokenBuf) { this(tokenBuf,3); } protected ANTLRTokdefParser(TokenStream lexer, int k) { super(lexer,k); tokenNames = _tokenNames; } public ANTLRTokdefParser(TokenStream lexer) { this(lexer,3); } public ANTLRTokdefParser(ParserSharedInputState state) { super(state,3); tokenNames = _tokenNames; } public final void file( ImportVocabTokenManager tm ) throws RecognitionException, TokenStreamException { Token name = null; try { // for error handling name = LT(1); match(ID); { _loop225: do { if ((LA(1)==ID||LA(1)==STRING)) { line(tm); } else { break _loop225; } } while (true); } } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_0); } } public final void line( ImportVocabTokenManager tm ) throws RecognitionException, TokenStreamException { Token s1 = null; Token lab = null; Token s2 = null; Token id = null; Token para = null; Token id2 = null; Token i = null; Token t=null; Token s=null; try { // for error handling { if ((LA(1)==STRING)) { s1 = LT(1); match(STRING); s = s1; } else if ((LA(1)==ID) && (LA(2)==ASSIGN) && (LA(3)==STRING)) { lab = LT(1); match(ID); t = lab; match(ASSIGN); s2 = LT(1); match(STRING); s = s2; } else if ((LA(1)==ID) && (LA(2)==LPAREN)) { id = LT(1); match(ID); t=id; match(LPAREN); para = LT(1); match(STRING); match(RPAREN); } else if ((LA(1)==ID) && (LA(2)==ASSIGN) && (LA(3)==INT)) { id2 = LT(1); match(ID); t=id2; } else { throw new NoViableAltException(LT(1), getFilename()); } } match(ASSIGN); i = LT(1); match(INT); Integer value = Integer.valueOf(i.getText()); // if literal found, define as a string literal if ( s!=null ) { tm.define(s.getText(), value.intValue()); // if label, then label the string and map label to token symbol also if ( t!=null ) { StringLiteralSymbol sl = (StringLiteralSymbol) tm.getTokenSymbol(s.getText()); sl.setLabel(t.getText()); tm.mapToTokenSymbol(t.getText(), sl); } } // define token (not a literal) else if ( t!=null ) { tm.define(t.getText(), value.intValue()); if ( para!=null ) { TokenSymbol ts = tm.getTokenSymbol(t.getText()); ts.setParaphrase( para.getText() ); } } } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_1); } } public static final String[] _tokenNames = { "<0>", "EOF", "<2>", "NULL_TREE_LOOKAHEAD", "ID", "STRING", "ASSIGN", "LPAREN", "RPAREN", "INT", "WS", "SL_COMMENT", "ML_COMMENT", "ESC", "DIGIT", "XDIGIT" }; private static final long[] mk_tokenSet_0() { long[] data = { 2L, 0L}; return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = { 50L, 0L}; return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ANTLRTokdefParserTokenTypes.java000066400000000000000000000005551161462365500272060ustar00rootroot00000000000000// $ANTLR : "tokdef.g" -> "ANTLRTokdefParser.java"$ package antlr; public interface ANTLRTokdefParserTokenTypes { int EOF = 1; int NULL_TREE_LOOKAHEAD = 3; int ID = 4; int STRING = 5; int ASSIGN = 6; int LPAREN = 7; int RPAREN = 8; int INT = 9; int WS = 10; int SL_COMMENT = 11; int ML_COMMENT = 12; int ESC = 13; int DIGIT = 14; int XDIGIT = 15; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ANTLRTokenTypes.java000066400000000000000000000026631161462365500246760ustar00rootroot00000000000000// $ANTLR 2.7.3rc3: "antlr.g" -> "ANTLRLexer.java"$ package antlr; public interface ANTLRTokenTypes { int EOF = 1; int NULL_TREE_LOOKAHEAD = 3; int LITERAL_tokens = 4; int LITERAL_header = 5; int STRING_LITERAL = 6; int ACTION = 7; int DOC_COMMENT = 8; int LITERAL_lexclass = 9; int LITERAL_class = 10; int LITERAL_extends = 11; int LITERAL_Lexer = 12; int LITERAL_TreeParser = 13; int OPTIONS = 14; int ASSIGN = 15; int SEMI = 16; int RCURLY = 17; int LITERAL_charVocabulary = 18; int CHAR_LITERAL = 19; int INT = 20; int OR = 21; int RANGE = 22; int TOKENS = 23; int TOKEN_REF = 24; int OPEN_ELEMENT_OPTION = 25; int CLOSE_ELEMENT_OPTION = 26; int LPAREN = 27; int RPAREN = 28; int LITERAL_Parser = 29; int LITERAL_protected = 30; int LITERAL_public = 31; int LITERAL_private = 32; int BANG = 33; int ARG_ACTION = 34; int LITERAL_returns = 35; int COLON = 36; int LITERAL_throws = 37; int COMMA = 38; int LITERAL_exception = 39; int LITERAL_catch = 40; int RULE_REF = 41; int NOT_OP = 42; int SEMPRED = 43; int TREE_BEGIN = 44; int QUESTION = 45; int STAR = 46; int PLUS = 47; int IMPLIES = 48; int CARET = 49; int WILDCARD = 50; int LITERAL_options = 51; int WS = 52; int COMMENT = 53; int SL_COMMENT = 54; int ML_COMMENT = 55; int ESC = 56; int DIGIT = 57; int XDIGIT = 58; int NESTED_ARG_ACTION = 59; int NESTED_ACTION = 60; int WS_LOOP = 61; int INTERNAL_RULE_REF = 62; int WS_OPT = 63; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ASTFactory.java000066400000000000000000000276121161462365500237500ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ASTFactory.java#1 $ */ import antlr.collections.AST; import antlr.collections.impl.ASTArray; import java.util.Hashtable; import java.lang.reflect.Constructor; /** AST Support code shared by TreeParser and Parser. * We use delegation to share code (and have only one * bit of code to maintain) rather than subclassing * or superclassing (forces AST support code to be * loaded even when you don't want to do AST stuff). * * Typically, setASTNodeType is used to specify the * homogeneous type of node to create, but you can override * create to make heterogeneous nodes etc... */ public class ASTFactory { /** Name of AST class to create during tree construction. * Null implies that the create method should create * a default AST type such as CommonAST. This is for * homogeneous nodes. */ protected String theASTNodeType = null; protected Class theASTNodeTypeClass = null; /** How to specify the classname to create for a particular * token type. Note that ANTLR allows you to say, for example, * tokens { PLUS; ... } * * and it tracks everything statically. #[PLUS] will make you * a PLUSNode w/o use of this table. * * For tokens that ANTLR cannot track statically like #[i], * you can use this table to map PLUS (Integer) -> PLUSNode (Class) * etc... ANTLR sets the class map from the tokens {...} section * via the ASTFactory(Hashtable) ctor in antlr.Parser. */ protected Hashtable tokenTypeToASTClassMap = null; public ASTFactory() { } /** Create factory with a specific mapping from token type * to Java AST node type. Your subclasses of ASTFactory * can override and reuse the map stuff. */ public ASTFactory(Hashtable tokenTypeToClassMap) { setTokenTypeToASTClassMap(tokenTypeToClassMap); } /** Specify an "override" for the Java AST object created for a * specific token. It is provided as a convenience so * you can specify node types dynamically. ANTLR sets * the token type mapping automatically from the tokens{...} * section, but you can change that mapping with this method. * ANTLR does it's best to statically determine the node * type for generating parsers, but it cannot deal with * dynamic values like #[LT(1)]. In this case, it relies * on the mapping. Beware differences in the tokens{...} * section and what you set via this method. Make sure * they are the same. * * Set className to null to remove the mapping. * * @since 2.7.2 */ public void setTokenTypeASTNodeType(int tokenType, String className) throws IllegalArgumentException { if ( tokenTypeToASTClassMap==null ) { tokenTypeToASTClassMap = new Hashtable(); } if ( className==null ) { tokenTypeToASTClassMap.remove(new Integer(tokenType)); return; } Class c = null; try { c = Class.forName(className); tokenTypeToASTClassMap.put(new Integer(tokenType), c); } catch (Exception e) { throw new IllegalArgumentException("Invalid class, "+className); } } /** For a given token type, what is the AST node object type to create * for it? * @since 2.7.2 */ public Class getASTNodeType(int tokenType) { // try node specific class if ( tokenTypeToASTClassMap!=null ) { Class c = (Class)tokenTypeToASTClassMap.get(new Integer(tokenType)); if ( c!=null ) { return c; } } // try a global specified class if (theASTNodeTypeClass != null) { return theASTNodeTypeClass; } // default to the common type return CommonAST.class; } /** Add a child to the current AST */ public void addASTChild(ASTPair currentAST, AST child) { if (child != null) { if (currentAST.root == null) { // Make new child the current root currentAST.root = child; } else { if (currentAST.child == null) { // Add new child to current root currentAST.root.setFirstChild(child); } else { currentAST.child.setNextSibling(child); } } // Make new child the current child currentAST.child = child; currentAST.advanceChildToEnd(); } } /** Create a new empty AST node; if the user did not specify * an AST node type, then create a default one: CommonAST. */ public AST create() { return create(Token.INVALID_TYPE); } public AST create(int type) { Class c = getASTNodeType(type); AST t = create(c); if ( t!=null ) { t.initialize(type, ""); } return t; } public AST create(int type, String txt) { AST t = create(type); if ( t!=null ) { t.initialize(type, txt); } return t; } /** Create an AST node with the token type and text passed in, but * with a specific Java object type. Typically called when you * say @[PLUS,"+",PLUSNode] in an antlr action. * @since 2.7.2 */ public AST create(int type, String txt, String className) { AST t = create(className); if ( t!=null ) { t.initialize(type, txt); } return t; } /** Create a new empty AST node; if the user did not specify * an AST node type, then create a default one: CommonAST. */ public AST create(AST tr) { if (tr == null) return null; // create(null) == null AST t = create(tr.getType()); if ( t!=null ) { t.initialize(tr); } return t; } public AST create(Token tok) { AST t = create(tok.getType()); if ( t!=null ) { t.initialize(tok); } return t; } /** ANTLR generates reference to this when you reference a token * that has a specified heterogeneous AST node type. This is * also a special case node creation routine for backward * compatibility. Before, ANTLR generated "new T(tokenObject)" * and so I must call the appropriate constructor not T(). * * @since 2.7.2 */ public AST create(Token tok, String className) { AST t = createUsingCtor(tok,className); return t; } /** * @since 2.7.2 */ public AST create(String className) { Class c = null; try { c = Class.forName(className); } catch (Exception e) { throw new IllegalArgumentException("Invalid class, "+className); } return create(c); } /** * @since 2.7.2 */ protected AST createUsingCtor(Token token, String className) { Class c = null; AST t = null; try { c = Class.forName(className); Class[] tokenArgType = new Class[] { antlr.Token.class }; try { Constructor ctor = c.getConstructor(tokenArgType); t = (AST)ctor.newInstance(new Object[]{token}); // make a new one } catch (NoSuchMethodException e){ // just do the regular thing if you can't find the ctor // Your AST must have default ctor to use this. t = create(c); if ( t!=null ) { t.initialize(token); } } } catch (Exception e) { throw new IllegalArgumentException("Invalid class or can't make instance, "+className); } return t; } /** * @since 2.7.2 */ protected AST create(Class c) { AST t = null; try { t = (AST)c.newInstance(); // make a new one } catch (Exception e) { error("Can't create AST Node " + c.getName()); return null; } return t; } /** Copy a single node with same Java AST objec type. * Ignore the tokenType->Class mapping since you know * the type of the node, t.getClass(), and doing a dup. * * clone() is not used because we want all AST creation * to go thru the factory so creation can be * tracked. Returns null if t is null. */ public AST dup(AST t) { if ( t==null ) { return null; } AST dup_t = create(t.getClass()); dup_t.initialize(t); return dup_t; } /** Duplicate tree including siblings of root. */ public AST dupList(AST t) { AST result = dupTree(t); // if t == null, then result==null AST nt = result; while (t != null) { // for each sibling of the root t = t.getNextSibling(); nt.setNextSibling(dupTree(t)); // dup each subtree, building new tree nt = nt.getNextSibling(); } return result; } /**Duplicate a tree, assuming this is a root node of a tree-- * duplicate that node and what's below; ignore siblings of root node. */ public AST dupTree(AST t) { AST result = dup(t); // make copy of root // copy all children of root. if (t != null) { result.setFirstChild(dupList(t.getFirstChild())); } return result; } /** Make a tree from a list of nodes. The first element in the * array is the root. If the root is null, then the tree is * a simple list not a tree. Handles null children nodes correctly. * For example, build(a, b, null, c) yields tree (a b c). build(null,a,b) * yields tree (nil a b). */ public AST make(AST[] nodes) { if (nodes == null || nodes.length == 0) return null; AST root = nodes[0]; AST tail = null; if (root != null) { root.setFirstChild(null); // don't leave any old pointers set } // link in children; for (int i = 1; i < nodes.length; i++) { if (nodes[i] == null) continue; // ignore null nodes if (root == null) { // Set the root and set it up for a flat list root = tail = nodes[i]; } else if (tail == null) { root.setFirstChild(nodes[i]); tail = root.getFirstChild(); } else { tail.setNextSibling(nodes[i]); tail = tail.getNextSibling(); } // Chase tail to last sibling while (tail.getNextSibling() != null) { tail = tail.getNextSibling(); } } return root; } /** Make a tree from a list of nodes, where the nodes are contained * in an ASTArray object */ public AST make(ASTArray nodes) { return make(nodes.array); } /** Make an AST the root of current AST */ public void makeASTRoot(ASTPair currentAST, AST root) { if (root != null) { // Add the current root as a child of new root root.addChild(currentAST.root); // The new current child is the last sibling of the old root currentAST.child = currentAST.root; currentAST.advanceChildToEnd(); // Set the new root currentAST.root = root; } } public void setASTNodeClass(Class c) { if ( c!=null ) { theASTNodeTypeClass = c; theASTNodeType = c.getName(); } } public void setASTNodeClass(String t) { theASTNodeType = t; try { theASTNodeTypeClass = Class.forName(t); // get class def } catch (Exception e) { // either class not found, // class is interface/abstract, or // class or initializer is not accessible. error("Can't find/access AST Node type" + t); } } /** Specify the type of node to create during tree building. * @deprecated since 2.7.1 */ public void setASTNodeType(String t) { setASTNodeClass(t); } public Hashtable getTokenTypeToASTClassMap() { return tokenTypeToASTClassMap; } public void setTokenTypeToASTClassMap(Hashtable tokenTypeToClassMap) { this.tokenTypeToASTClassMap = tokenTypeToClassMap; } /** To change where error messages go, can subclass/override this method * and then setASTFactory in Parser and TreeParser. This method removes * a prior dependency on class antlr.Tool. */ public void error(String e) { System.err.println(e); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ASTIterator.java000066400000000000000000000045001161462365500241210ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ASTIterator.java#1 $ */ import antlr.collections.AST; public class ASTIterator { protected AST cursor = null; protected AST original = null; public ASTIterator(AST t) { original = cursor = t; } /** Is 'sub' a subtree of 't' beginning at the root? */ public boolean isSubtree(AST t, AST sub) { AST sibling; // the empty tree is always a subset of any tree. if (sub == null) { return true; } // if the tree is empty, return true if the subtree template is too. if (t == null) { if (sub != null) return false; return true; } // Otherwise, start walking sibling lists. First mismatch, return false. for (sibling = t; sibling != null && sub != null; sibling = sibling.getNextSibling(), sub = sub.getNextSibling()) { // as a quick optimization, check roots first. if (sibling.getType() != sub.getType()) return false; // if roots match, do full match test on children. if (sibling.getFirstChild() != null) { if (!isSubtree(sibling.getFirstChild(), sub.getFirstChild())) return false; } } return true; } /** Find the next subtree with structure and token types equal to * those of 'template'. */ public AST next(AST template) { AST t = null; AST sibling = null; if (cursor == null) { // do nothing if no tree to work on return null; } // Start walking sibling list looking for subtree matches. for (; cursor != null; cursor = cursor.getNextSibling()) { // as a quick optimization, check roots first. if (cursor.getType() == template.getType()) { // if roots match, do full match test on children. if (cursor.getFirstChild() != null) { if (isSubtree(cursor.getFirstChild(), template.getFirstChild())) { return cursor; } } } } return t; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ASTNULLType.java000066400000000000000000000036021161462365500237460ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ASTNULLType.java#1 $ */ import antlr.collections.AST; import antlr.collections.ASTEnumeration; import antlr.Token; /** There is only one instance of this class **/ public class ASTNULLType implements AST { public void addChild(AST c) { } public boolean equals(AST t) { return false; } public boolean equalsList(AST t) { return false; } public boolean equalsListPartial(AST t) { return false; } public boolean equalsTree(AST t) { return false; } public boolean equalsTreePartial(AST t) { return false; } public ASTEnumeration findAll(AST tree) { return null; } public ASTEnumeration findAllPartial(AST subtree) { return null; } public AST getFirstChild() { return this; } public AST getNextSibling() { return this; } public String getText() { return ""; } public int getType() { return Token.NULL_TREE_LOOKAHEAD; } public int getLine() { return 0; } public int getColumn() { return 0; } public int getNumberOfChildren() { return 0; } public void initialize(int t, String txt) { } public void initialize(AST t) { } public void initialize(Token t) { } public void setFirstChild(AST c) { } public void setNextSibling(AST n) { } public void setText(String text) { } public void setType(int ttype) { } public String toString() { return getText(); } public String toStringList() { return getText(); } public String toStringTree() { return getText(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ASTPair.java000066400000000000000000000024251161462365500232270ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ASTPair.java#1 $ */ import antlr.collections.AST; /** ASTPair: utility class used for manipulating a pair of ASTs * representing the current AST root and current AST sibling. * This exists to compensate for the lack of pointers or 'var' * arguments in Java. */ public class ASTPair { public AST root; // current root of tree public AST child; // current child to which siblings are added /** Make sure that child is the last sibling */ public final void advanceChildToEnd() { if (child != null) { while (child.getNextSibling() != null) { child = child.getNextSibling(); } } } /** Copy an ASTPair. Don't call it clone() because we want type-safety */ public ASTPair copy() { ASTPair tmp = new ASTPair(); tmp.root = root; tmp.child = child; return tmp; } public String toString() { String r = root == null ? "null" : root.getText(); String c = child == null ? "null" : child.getText(); return "[" + r + "," + c + "]"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ASTVisitor.java000066400000000000000000000005201161462365500237650ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ASTVisitor.java#1 $ */ import antlr.collections.AST; public interface ASTVisitor { public void visit(AST node); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ActionElement.java000066400000000000000000000014331161462365500245110ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ActionElement.java#1 $ */ class ActionElement extends AlternativeElement { protected String actionText; protected boolean isSemPred = false; public ActionElement(Grammar g, Token t) { super(g); actionText = t.getText(); line = t.getLine(); column = t.getColumn(); } public void generate() { grammar.generator.gen(this); } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public String toString() { return " " + actionText + (isSemPred?"?":""); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ActionTransInfo.java000066400000000000000000000014721161462365500250260ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ActionTransInfo.java#1 $ */ /** * This class contains information about how an action * was translated (using the AST conversion rules). */ public class ActionTransInfo { public boolean assignToRoot = false; // somebody did a "#rule = " public String refRuleRoot = null; // somebody referenced #rule; string is translated var public String followSetName = null; // somebody referenced $FOLLOW; string is the name of the lookahead set public String toString() { return "assignToRoot:" + assignToRoot + ", refRuleRoot:" + refRuleRoot + ", FOLLOW Set:" + followSetName; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/Alternative.java000066400000000000000000000041111161462365500242340ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/Alternative.java#1 $ */ import java.util.Hashtable; /** Intermediate data class holds information about an alternative */ class Alternative { // Tracking alternative linked list AlternativeElement head; // head of alt element list AlternativeElement tail; // last element added // Syntactic predicate block if non-null protected SynPredBlock synPred; // Semantic predicate action if non-null protected String semPred; // Exception specification if non-null protected ExceptionSpec exceptionSpec; // Init action if non-null; protected Lookahead[] cache; // lookahead for alt. Filled in by // deterministic() only!!!!!!! Used for // code gen after calls to deterministic() // and used by deterministic for (...)*, (..)+, // and (..)? blocks. 1..k protected int lookaheadDepth; // each alt has different look depth possibly. // depth can be NONDETERMINISTIC too. // 0..n-1 // If non-null, Tree specification ala -> A B C (not implemented) protected Token treeSpecifier = null; // True of AST generation is on for this alt private boolean doAutoGen; public Alternative() { } public Alternative(AlternativeElement firstElement) { addElement(firstElement); } public void addElement(AlternativeElement e) { // Link the element into the list if (head == null) { head = tail = e; } else { tail.next = e; tail = e; } } public boolean atStart() { return head == null; } public boolean getAutoGen() { // Don't build an AST if there is a tree-rewrite-specifier return doAutoGen && treeSpecifier == null; } public Token getTreeSpecifier() { return treeSpecifier; } public void setAutoGen(boolean doAutoGen_) { doAutoGen = doAutoGen_; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/AlternativeBlock.java000066400000000000000000000167011161462365500252170ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/AlternativeBlock.java#1 $ */ import antlr.collections.impl.Vector; /**A list of alternatives */ class AlternativeBlock extends AlternativeElement { protected String initAction = null; // string for init action {...} protected Vector alternatives; // Contains Alternatives protected String label; // can label a looping block to break out of it. protected int alti, altj; // which alts are being compared at the moment with // deterministic()? protected int analysisAlt; // which alt are we computing look on? Must be alti or altj protected boolean hasAnAction = false; // does any alt have an action? protected boolean hasASynPred = false; // does any alt have a syntactic predicate? protected int ID = 0; // used to generate unique variables protected static int nblks; // how many blocks have we allocated? boolean not = false; // true if block is inverted. boolean greedy = true; // Blocks are greedy by default boolean greedySet = false; // but, if not explicitly greedy, warning might be generated protected boolean doAutoGen = true; // false if no AST (or text) to be generated for block protected boolean warnWhenFollowAmbig = true; // warn when an empty path or exit path protected boolean generateAmbigWarnings = true; // the general warning "shut-up" mechanism // conflicts with alt of subrule. // Turning this off will suppress stuff // like the if-then-else ambig. public AlternativeBlock(Grammar g) { super(g); alternatives = new Vector(5); this.not = false; nblks++; ID = nblks; } public AlternativeBlock(Grammar g, Token start, boolean not) { super(g, start); alternatives = new Vector(5); // this.line = start.getLine(); // this.column = start.getColumn(); this.not = not; nblks++; ID = nblks; } public void addAlternative(Alternative alt) { alternatives.appendElement(alt); } public void generate() { grammar.generator.gen(this); } public Alternative getAlternativeAt(int i) { return (Alternative)alternatives.elementAt(i); } public Vector getAlternatives() { return alternatives; } public boolean getAutoGen() { return doAutoGen; } public String getInitAction() { return initAction; } public String getLabel() { return label; } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public void prepareForAnalysis() { for (int i = 0; i < alternatives.size(); i++) { // deterministic() uses an alternative cache and sets lookahead depth Alternative a = (Alternative)alternatives.elementAt(i); a.cache = new Lookahead[grammar.maxk + 1]; a.lookaheadDepth = GrammarAnalyzer.LOOKAHEAD_DEPTH_INIT; } } /**Walk the syntactic predicate and, for a rule ref R, remove * the ref from the list of FOLLOW references for R (stored * in the symbol table. */ public void removeTrackingOfRuleRefs(Grammar g) { for (int i = 0; i < alternatives.size(); i++) { Alternative alt = getAlternativeAt(i); AlternativeElement elem = alt.head; while (elem != null) { if (elem instanceof RuleRefElement) { RuleRefElement rr = (RuleRefElement)elem; RuleSymbol rs = (RuleSymbol)g.getSymbol(rr.targetRule); if (rs == null) { grammar.antlrTool.error("rule " + rr.targetRule + " referenced in (...)=>, but not defined"); } else { rs.references.removeElement(rr); } } else if (elem instanceof AlternativeBlock) {// recurse into subrules ((AlternativeBlock)elem).removeTrackingOfRuleRefs(g); } elem = elem.next; } } } public void setAlternatives(Vector v) { alternatives = v; } public void setAutoGen(boolean doAutoGen_) { doAutoGen = doAutoGen_; } public void setInitAction(String initAction_) { initAction = initAction_; } public void setLabel(String label_) { label = label_; } public void setOption(Token key, Token value) { if (key.getText().equals("warnWhenFollowAmbig")) { if (value.getText().equals("true")) { warnWhenFollowAmbig = true; } else if (value.getText().equals("false")) { warnWhenFollowAmbig = false; } else { grammar.antlrTool.error("Value for warnWhenFollowAmbig must be true or false", grammar.getFilename(), key.getLine(), key.getColumn()); } } else if (key.getText().equals("generateAmbigWarnings")) { if (value.getText().equals("true")) { generateAmbigWarnings = true; } else if (value.getText().equals("false")) { generateAmbigWarnings = false; } else { grammar.antlrTool.error("Value for generateAmbigWarnings must be true or false", grammar.getFilename(), key.getLine(), key.getColumn()); } } else if (key.getText().equals("greedy")) { if (value.getText().equals("true")) { greedy = true; greedySet = true; } else if (value.getText().equals("false")) { greedy = false; greedySet = true; } else { grammar.antlrTool.error("Value for greedy must be true or false", grammar.getFilename(), key.getLine(), key.getColumn()); } } else { grammar.antlrTool.error("Invalid subrule option: " + key.getText(), grammar.getFilename(), key.getLine(), key.getColumn()); } } public String toString() { String s = " ("; if (initAction != null) { s += initAction; } for (int i = 0; i < alternatives.size(); i++) { Alternative alt = getAlternativeAt(i); Lookahead cache[] = alt.cache; int k = alt.lookaheadDepth; // dump lookahead set if (k == GrammarAnalyzer.LOOKAHEAD_DEPTH_INIT) { } else if (k == GrammarAnalyzer.NONDETERMINISTIC) { s += "{?}:"; } else { s += " {"; for (int j = 1; j <= k; j++) { s += cache[j].toString(",", grammar.tokenManager.getVocabulary()); if (j < k && cache[j + 1] != null) s += ";"; } s += "}:"; } // dump alternative including pred (if any) AlternativeElement p = alt.head; String pred = alt.semPred; if (pred != null) { s += pred; } while (p != null) { s += p; p = p.next; } if (i < (alternatives.size() - 1)) { s += " |"; } } s += " )"; return s; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/AlternativeElement.java000066400000000000000000000017141161462365500255540ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/AlternativeElement.java#1 $ */ abstract class AlternativeElement extends GrammarElement { AlternativeElement next; protected int autoGenType = AUTO_GEN_NONE; protected String enclosingRuleName; public AlternativeElement(Grammar g) { super(g); } public AlternativeElement(Grammar g, Token start) { super(g, start); } public AlternativeElement(Grammar g, Token start, int autoGenType_) { super(g, start); autoGenType = autoGenType_; } public int getAutoGenType() { return autoGenType; } public void setAutoGenType(int a) { autoGenType = a; } public String getLabel() { return null; } public void setLabel(String label) { } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/BaseAST.java000066400000000000000000000351271161462365500232130ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/BaseAST.java#1 $ */ import antlr.collections.AST; import antlr.collections.ASTEnumeration; import antlr.collections.impl.ASTEnumerator; import antlr.collections.impl.Vector; import java.io.Serializable; import java.io.IOException; import java.io.Writer; /** * A Child-Sibling Tree. * * A tree with PLUS at the root and with two children 3 and 4 is * structured as: * * PLUS * | * 3 -- 4 * * and can be specified easily in LISP notation as * * (PLUS 3 4) * * where every '(' starts a new subtree. * * These trees are particular useful for translators because of * the flexibility of the children lists. They are also very easy * to walk automatically, whereas trees with specific children * reference fields can't easily be walked automatically. * * This class contains the basic support for an AST. * Most people will create ASTs that are subclasses of * BaseAST or of CommonAST. */ public abstract class BaseAST implements AST, Serializable { protected BaseAST down; protected BaseAST right; private static boolean verboseStringConversion = false; private static String[] tokenNames = null; /**Add a node to the end of the child list for this node */ public void addChild(AST node) { if (node == null) return; BaseAST t = this.down; if (t != null) { while (t.right != null) { t = t.right; } t.right = (BaseAST)node; } else { this.down = (BaseAST)node; } } /** How many children does this node have? */ public int getNumberOfChildren() { BaseAST t = this.down; int n = 0; if (t != null) { n = 1; while (t.right != null) { t = t.right; n++; } return n; } return n; } private void doWorkForFindAll(Vector v, AST target, boolean partialMatch) { AST sibling; // Start walking sibling lists, looking for matches. siblingWalk: for (sibling = this; sibling != null; sibling = sibling.getNextSibling()) { if ((partialMatch && sibling.equalsTreePartial(target)) || (!partialMatch && sibling.equalsTree(target))) { v.appendElement(sibling); } // regardless of match or not, check any children for matches if (sibling.getFirstChild() != null) { ((BaseAST)sibling.getFirstChild()).doWorkForFindAll(v, target, partialMatch); } } } /** Is node t equal to this in terms of token type and text? */ public boolean equals(AST t) { if (t == null) return false; if ( (this.getText()==null && t.getText()!=null) || (this.getText()!=null && t.getText()==null) ) { return false; } if ( this.getText()==null && t.getText()==null ) { return this.getType() == t.getType(); } return this.getText().equals(t.getText()) && this.getType() == t.getType(); } /** Is t an exact structural and equals() match of this tree. The * 'this' reference is considered the start of a sibling list. */ public boolean equalsList(AST t) { AST sibling; // the empty tree is not a match of any non-null tree. if (t == null) { return false; } // Otherwise, start walking sibling lists. First mismatch, return false. for (sibling = this; sibling != null && t != null; sibling = sibling.getNextSibling(), t = t.getNextSibling()) { // as a quick optimization, check roots first. if (!sibling.equals(t)) { return false; } // if roots match, do full list match test on children. if (sibling.getFirstChild() != null) { if (!sibling.getFirstChild().equalsList(t.getFirstChild())) { return false; } } // sibling has no kids, make sure t doesn't either else if (t.getFirstChild() != null) { return false; } } if (sibling == null && t == null) { return true; } // one sibling list has more than the other return false; } /** Is 'sub' a subtree of this list? * The siblings of the root are NOT ignored. */ public boolean equalsListPartial(AST sub) { AST sibling; // the empty tree is always a subset of any tree. if (sub == null) { return true; } // Otherwise, start walking sibling lists. First mismatch, return false. for (sibling = this; sibling != null && sub != null; sibling = sibling.getNextSibling(), sub = sub.getNextSibling()) { // as a quick optimization, check roots first. if (!sibling.equals(sub)) return false; // if roots match, do partial list match test on children. if (sibling.getFirstChild() != null) { if (!sibling.getFirstChild().equalsListPartial(sub.getFirstChild())) return false; } } if (sibling == null && sub != null) { // nothing left to match in this tree, but subtree has more return false; } // either both are null or sibling has more, but subtree doesn't return true; } /** Is tree rooted at 'this' equal to 't'? The siblings * of 'this' are ignored. */ public boolean equalsTree(AST t) { // check roots first. if (!this.equals(t)) return false; // if roots match, do full list match test on children. if (this.getFirstChild() != null) { if (!this.getFirstChild().equalsList(t.getFirstChild())) return false; } // sibling has no kids, make sure t doesn't either else if (t.getFirstChild() != null) { return false; } return true; } /** Is 't' a subtree of the tree rooted at 'this'? The siblings * of 'this' are ignored. */ public boolean equalsTreePartial(AST sub) { // the empty tree is always a subset of any tree. if (sub == null) { return true; } // check roots first. if (!this.equals(sub)) return false; // if roots match, do full list partial match test on children. if (this.getFirstChild() != null) { if (!this.getFirstChild().equalsListPartial(sub.getFirstChild())) return false; } return true; } /** Walk the tree looking for all exact subtree matches. Return * an ASTEnumerator that lets the caller walk the list * of subtree roots found herein. */ public ASTEnumeration findAll(AST target) { Vector roots = new Vector(10); AST sibling; // the empty tree cannot result in an enumeration if (target == null) { return null; } doWorkForFindAll(roots, target, false); // find all matches recursively return new ASTEnumerator(roots); } /** Walk the tree looking for all subtrees. Return * an ASTEnumerator that lets the caller walk the list * of subtree roots found herein. */ public ASTEnumeration findAllPartial(AST sub) { Vector roots = new Vector(10); AST sibling; // the empty tree cannot result in an enumeration if (sub == null) { return null; } doWorkForFindAll(roots, sub, true); // find all matches recursively return new ASTEnumerator(roots); } /** Get the first child of this node; null if not children */ public AST getFirstChild() { return down; } /** Get the next sibling in line after this one */ public AST getNextSibling() { return right; } /** Get the token text for this node */ public String getText() { return ""; } /** Get the token type for this node */ public int getType() { return 0; } public int getLine() { return 0; } public int getColumn() { return 0; } public abstract void initialize(int t, String txt); public abstract void initialize(AST t); public abstract void initialize(Token t); /** Remove all children */ public void removeChildren() { down = null; } public void setFirstChild(AST c) { down = (BaseAST)c; } public void setNextSibling(AST n) { right = (BaseAST)n; } /** Set the token text for this node */ public void setText(String text) { } /** Set the token type for this node */ public void setType(int ttype) { } public static void setVerboseStringConversion(boolean verbose, String[] names) { verboseStringConversion = verbose; tokenNames = names; } /** Return an array of strings that maps token ID to it's text. @since 2.7.3 */ public static String[] getTokenNames() { return tokenNames; } public String toString() { StringBuffer b = new StringBuffer(); // if verbose and type name not same as text (keyword probably) if (verboseStringConversion && !getText().equalsIgnoreCase(tokenNames[getType()]) && !getText().equalsIgnoreCase(StringUtils.stripFrontBack(tokenNames[getType()], "\"", "\""))) { b.append('['); b.append(getText()); b.append(",<"); b.append(tokenNames[getType()]); b.append(">]"); return b.toString(); } return getText(); } /** Print out a child-sibling tree in LISP notation */ public String toStringList() { AST t = this; String ts = ""; if (t.getFirstChild() != null) ts += " ("; ts += " " + this.toString(); if (t.getFirstChild() != null) { ts += ((BaseAST)t.getFirstChild()).toStringList(); } if (t.getFirstChild() != null) ts += " )"; if (t.getNextSibling() != null) { ts += ((BaseAST)t.getNextSibling()).toStringList(); } return ts; } public String toStringTree() { AST t = this; String ts = ""; if (t.getFirstChild() != null) ts += " ("; ts += " " + this.toString(); if (t.getFirstChild() != null) { ts += ((BaseAST)t.getFirstChild()).toStringList(); } if (t.getFirstChild() != null) ts += " )"; return ts; } public static String decode(String text) { char c, c1, c2, c3, c4, c5; StringBuffer n = new StringBuffer(); for (int i = 0; i < text.length(); i++) { c = text.charAt(i); if (c == '&') { c1 = text.charAt(i + 1); c2 = text.charAt(i + 2); c3 = text.charAt(i + 3); c4 = text.charAt(i + 4); c5 = text.charAt(i + 5); if (c1 == 'a' && c2 == 'm' && c3 == 'p' && c4 == ';') { n.append("&"); i += 5; } else if (c1 == 'l' && c2 == 't' && c3 == ';') { n.append("<"); i += 4; } else if (c1 == 'g' && c2 == 't' && c3 == ';') { n.append(">"); i += 4; } else if (c1 == 'q' && c2 == 'u' && c3 == 'o' && c4 == 't' && c5 == ';') { n.append("\""); i += 6; } else if (c1 == 'a' && c2 == 'p' && c3 == 'o' && c4 == 's' && c5 == ';') { n.append("'"); i += 6; } else n.append("&"); } else n.append(c); } return new String(n); } public static String encode(String text) { char c; StringBuffer n = new StringBuffer(); for (int i = 0; i < text.length(); i++) { c = text.charAt(i); switch (c) { case '&': { n.append("&"); break; } case '<': { n.append("<"); break; } case '>': { n.append(">"); break; } case '"': { n.append("""); break; } case '\'': { n.append("'"); break; } default : { n.append(c); break; } } } return new String(n); } public void xmlSerializeNode(Writer out) throws IOException { StringBuffer buf = new StringBuffer(100); buf.append("<"); buf.append(getClass().getName() + " "); buf.append("text=\"" + encode(getText()) + "\" type=\"" + getType() + "\"/>"); out.write(buf.toString()); } public void xmlSerializeRootOpen(Writer out) throws IOException { StringBuffer buf = new StringBuffer(100); buf.append("<"); buf.append(getClass().getName() + " "); buf.append("text=\"" + encode(getText()) + "\" type=\"" + getType() + "\">\n"); out.write(buf.toString()); } public void xmlSerializeRootClose(Writer out) throws IOException { out.write("\n"); } public void xmlSerialize(Writer out) throws IOException { // print out this node and all siblings for (AST node = this; node != null; node = node.getNextSibling()) { if (node.getFirstChild() == null) { // print guts (class name, attributes) ((BaseAST)node).xmlSerializeNode(out); } else { ((BaseAST)node).xmlSerializeRootOpen(out); // print children ((BaseAST)node.getFirstChild()).xmlSerialize(out); // print end tag ((BaseAST)node).xmlSerializeRootClose(out); } } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/BlockContext.java000066400000000000000000000017261161462365500243660ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/BlockContext.java#1 $ */ /**BlockContext stores the information needed when creating an * alternative (list of elements). Entering a subrule requires * that we save this state as each block of alternatives * requires state such as "tail of current alternative." */ class BlockContext { AlternativeBlock block; // current block of alternatives int altNum; // which alt are we accepting 0..n-1 BlockEndElement blockEnd; // used if nested public void addAlternativeElement(AlternativeElement e) { currentAlt().addElement(e); } public Alternative currentAlt() { return (Alternative)block.alternatives.elementAt(altNum); } public AlternativeElement currentElement() { return currentAlt().tail; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/BlockEndElement.java000066400000000000000000000015661161462365500247640ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/BlockEndElement.java#1 $ */ /**All alternative blocks are "terminated" by BlockEndElements unless * they are rule blocks (in which case they use RuleEndElement). */ class BlockEndElement extends AlternativeElement { protected boolean[] lock; // for analysis; used to avoid infinite loops protected AlternativeBlock block;// ending blocks know what block they terminate public BlockEndElement(Grammar g) { super(g); lock = new boolean[g.maxk + 1]; } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public String toString() { //return " [BlkEnd]"; return ""; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/BlockWithImpliedExitPath.java000066400000000000000000000013501161462365500266210ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/BlockWithImpliedExitPath.java#1 $ */ abstract class BlockWithImpliedExitPath extends AlternativeBlock { protected int exitLookaheadDepth; // lookahead needed to handle optional path /** lookahead to bypass block; set * by deterministic(). 1..k of Lookahead */ protected Lookahead[] exitCache = new Lookahead[grammar.maxk + 1]; public BlockWithImpliedExitPath(Grammar g) { super(g); } public BlockWithImpliedExitPath(Grammar g, Token start) { super(g, start, false); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/BooBlockFinishingInfo.java000066400000000000000000000017371161462365500261360ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id:$ */ // // ANTLR C# Code Generator by Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com // class BooBlockFinishingInfo { String postscript; // what to generate to terminate block boolean generatedSwitch;// did block finish with "default:" of switch? boolean generatedAnIf; /** When generating an if or switch, end-of-token lookahead sets * will become the else or default clause, don't generate an * error clause in this case. */ boolean needAnErrorClause; public BooBlockFinishingInfo() { postscript=null; generatedSwitch=generatedSwitch = false; needAnErrorClause = true; } public BooBlockFinishingInfo(String ps, boolean genS, boolean generatedAnIf, boolean n) { postscript = ps; generatedSwitch = genS; this.generatedAnIf = generatedAnIf; needAnErrorClause = n; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/BooCharFormatter.java000066400000000000000000000055011161462365500251630ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id:$ */ // // ANTLR C# Code Generator by Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com // class BooCharFormatter implements CharFormatter { /** Given a character value, return a string representing the character * that can be embedded inside a string literal or character literal * This works for Java/C/C++ code-generation and languages with compatible * special-character-escapment. * Code-generators for languages should override this method. * @param c The character of interest. * @param forCharLiteral true to escape for char literal, false for string literal */ public String escapeChar(int c, boolean forCharLiteral) { switch (c) { // case GrammarAnalyzer.EPSILON_TYPE : return ""; case '\n' : return "\\n"; case '\t' : return "\\t"; case '\r' : return "\\r"; case '\\' : return "\\\\"; case '\'' : return forCharLiteral ? "\\'" : "'"; case '"' : return forCharLiteral ? "\"" : "\\\""; default : if ( c<' '||c>126 ) { if ( ( 0x0000 <= c ) && ( c <= 0x000F ) ) { return "\\u000" + Integer.toString(c,16); } else if ( ( 0x0010 <= c ) && ( c <= 0x00FF ) ) { return "\\u00" + Integer.toString(c,16); } else if ( ( 0x0100 <= c ) && ( c <= 0x0FFF )) { return "\\u0" + Integer.toString(c,16); } else { return "\\u" + Integer.toString(c,16); } } else { return String.valueOf((char)c); } } } /** Converts a String into a representation that can be use as a literal * when surrounded by double-quotes. * @param s The String to be changed into a literal */ public String escapeString(String s) { String retval = new String(); for (int i = 0; i < s.length(); i++) { retval += escapeChar(s.charAt(i), false); } return retval; } /** Given a character value, return a string representing the character * literal that can be recognized by the target language compiler. * This works for languages that use single-quotes for character literals. * Code-generators for languages should override this method. * @param c The character of interest. */ public String literalChar(int c) { return "char('" + escapeChar(c, true) + "')"; } /** Converts a String into a string literal * This works for languages that use double-quotes for string literals. * Code-generators for languages should override this method. * @param s The String to be changed into a literal */ public String literalString(String s) { return "\'" + escapeString(s) + "\'"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/BooCodeGenerator.java000066400000000000000000003631211161462365500251500ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id:$ */ // // ANTLR C# Code Generator by Micheal Jordan // Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com // Anthony Oguntimehin // // With many thanks to Eric V. Smith from the ANTLR list. // // HISTORY: // // 17-May-2002 kunle Fixed bug in OctalToUnicode() - was processing non-Octal escape sequences // Also added namespace support based on Cpp version. // 07-Jun-2002 kunle Added Scott Ellis's _saveIndex creation optimizations // 09-Sep-2002 richardN Richard Ney's bug-fix for literals table construction. // [ Hashtable ctor needed instance of hash code provider not it's class name. ] // 17-Sep-2002 kunle & Added all Token ID definitions as data member of every Lexer/Parser/TreeParser // AOg [ A by-product of problem-solving phase of the hetero-AST changes below // but, it breaks nothing and restores "normal" ANTLR codegen behaviour. ] // 19-Oct-2002 kunle & Completed the work required to support heterogenous ASTs (many changes) // AOg & // michealj // 14-Nov-2002 michealj Added "initializeASTFactory()" to support flexible ASTFactory initialization. // [ Thanks to Ric Klaren - for suggesting it and implementing it for Cpp. ] // 18-Nov-2002 kunle Added fix to make xx_tokenSet_xx names CLS compliant. // 01-Dec-2002 richardN Patch to reduce "unreachable code" warnings // 01-Dec-2002 richardN Fix to generate correct TreeParser token-type classnames. // 12-Jan-2003 kunle & Generated Lexers, Parsers and TreeParsers now support ANTLR's tracing option. // michealj // 12-Jan-2003 kunle Fixed issue where initializeASTFactory() was generated when "buildAST=false" // 14-Jan-2003 AOg initializeASTFactory(AST factory) method was modifying the Parser's "astFactory" // member rather than it's own "factory" parameter. Fixed. // 18-Jan-2003 kunle & Fixed reported issues with ASTFactory create() calls for hetero ASTs // michealj - code generated for LEXER token with hetero-AST option specified does not compile // - code generated for imaginary tokens with hetero-AST option specified uses // default AST type // - code generated for per-TokenRef hetero-AST option specified does not compile // 18-Jan-2003 kunle initializeASTFactory(AST) method is now a static public member // 18-May-2003 kunle Changes to address outstanding reported issues:: // - Fixed reported issues with support for case-sensitive literals // - antlr.SemanticException now imported for all Lexers. // [ This exception is thrown on predicate failure. ] // 12-Jan-2004 kunle Added fix for reported issue with un-compileable generated lexers // // import java.io.IOException; import java.util.Enumeration; import java.util.Hashtable; import java.util.Iterator; import java.util.List; import java.util.StringTokenizer; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; /** Generates MyParser.cs, MyLexer.cs and MyParserTokenTypes.cs */ public class BooCodeGenerator extends CodeGenerator { // non-zero if inside syntactic predicate generation protected int syntacticPredLevel = 0; // Are we generating ASTs (for parsers and tree parsers) right now? protected boolean genAST = false; // Are we saving the text consumed (for lexers) right now? protected boolean saveText = false; // Grammar parameters set up to handle different grammar classes. // These are used to get instanceof tests out of code generation boolean usingCustomAST = false; String labeledElementType; String labeledElementASTType; String labeledElementInit; String commonExtraArgs; String commonExtraParams; String commonLocalVars; String lt1Value; String exceptionThrown; String throwNoViable; // Tracks the rule being generated. Used for mapTreeId RuleBlock currentRule; // Tracks the rule or labeled subrule being generated. Used for AST // generation. String currentASTResult; /** * Mapping between the ids used in the current alt, and the names of * variables used to represent their AST values. */ Hashtable treeVariableMap = new Hashtable(); /** * Used to keep track of which AST variables have been defined in a rule * (except for the #rule_name and #rule_name_in var's */ Hashtable declaredASTVariables = new Hashtable(); /* Count of unnamed generated variables */ int astVarNumber = 1; /** Special value used to mark duplicate in treeVariableMap */ protected static final String NONUNIQUE = new String(); public static final int caseSizeThreshold = 127; // ascii is max private Vector semPreds; // Used to keep track of which (heterogeneous AST types are used) // which need to be set in the ASTFactory of the generated parser private java.util.Vector astTypes; private static BooNameSpace nameSpace = null; /** * Create a Boo code-generator using the given Grammar. The caller must * still call setTool, setBehavior, and setAnalyzer before generating code. */ public BooCodeGenerator() { super(); charFormatter = new BooCharFormatter(); } /** * Adds a semantic predicate string to the sem pred vector These strings * will be used to build an array of sem pred names when building a * debugging parser. This method should only be called when the debug option * is specified */ protected int addSemPred(String predicate) { semPreds.appendElement(predicate); return semPreds.size() - 1; } public void exitIfError() { if (antlrTool.hasError()) { antlrTool.fatalError("Exiting due to errors."); } } /** Generate the parser, lexer, treeparser, and token types in Boo */ public void gen() { // Do the code generation try { // Loop over all grammars Enumeration grammarIter = behavior.grammars.elements(); while (grammarIter.hasMoreElements()) { Grammar g = (Grammar) grammarIter.nextElement(); // Connect all the components to each other g.setGrammarAnalyzer(analyzer); g.setCodeGenerator(this); analyzer.setGrammar(g); // To get right overloading behavior across heterogeneous // grammars setupGrammarParameters(g); g.generate(); exitIfError(); } // Loop over all token managers (some of which are lexers) Enumeration tmIter = behavior.tokenManagers.elements(); while (tmIter.hasMoreElements()) { TokenManager tm = (TokenManager) tmIter.nextElement(); if (!tm.isReadOnly()) { // Write the token manager tokens as Boo // this must appear before genTokenInterchange so that // labels are set on string literals genTokenTypes(tm); // Write the token manager tokens as plain text genTokenInterchange(tm); } exitIfError(); } } catch (IOException e) { antlrTool.reportException(e, null); } } /** * Generate code for the given grammar element. * * @param blk * The {...} action to generate */ public void gen(ActionElement action) { if (DEBUG_CODE_GENERATOR) System.out.println("genAction(" + action + ")"); if (action.isSemPred) { genSemPred(action.actionText, action.line); } else { if (grammar.hasSyntacticPredicate) { println("if 0 == inputState.guessing:"); tabs++; } ActionTransInfo tInfo = new ActionTransInfo(); String actionStr = processActionForSpecialSymbols( action.actionText, action.getLine(), currentRule, tInfo); if (tInfo.refRuleRoot != null) { // Somebody referenced "#rule", make sure translated var is // valid // assignment to #rule is left as a ref also, meaning that // assignments // with no other refs like "#rule = foo();" still forces this // code to be // generated (unnecessarily). println(tInfo.refRuleRoot + " = cast(" + labeledElementASTType + ", currentAST).root"); } // dump the translated action printAction(actionStr); if (tInfo.assignToRoot) { // Somebody did a "#rule=", reset internal currentAST.root println("currentAST.root = " + tInfo.refRuleRoot); // reset the child pointer too to be last sibling in sibling // list println("if (" + tInfo.refRuleRoot + " is not null) and (" + tInfo.refRuleRoot + ".getFirstChild() is not null):"); tabs++; println("currentAST.child = " + tInfo.refRuleRoot + ".getFirstChild()"); tabs--; println("else:"); tabs++; println("currentAST.child = " + tInfo.refRuleRoot); tabs--; println("currentAST.advanceChildToEnd()"); } if (grammar.hasSyntacticPredicate) { tabs--; } } } protected void printAction(String s) { if (null == s) { return; } List nonEmptyLines = new java.util.ArrayList(); String[] lines = s.replaceAll("\r\n", "\n").split("\n"); for (int i=0; i 0) { nonEmptyLines.add(line); } } if (0 == nonEmptyLines.size()) { return; } Iterator iterator = nonEmptyLines.iterator(); String indent = getStartingWhitespace((String)nonEmptyLines.get(0)); if (0 == indent.length()) { while (iterator.hasNext()) { String line = (String) iterator.next(); println(line); } } else { while (iterator.hasNext()) { String line = (String) iterator.next(); println(line.substring(indent.length())); } } } private static String getStartingWhitespace(String s) { for (int i=0; i= 1) and " + predictExit + "):"); printSingleLineBlock("goto " + label + "_breakloop"); } BooBlockFinishingInfo howToFinish = genCommonBlock(blk, false); final String finalcnt = cnt; final String finalLabel = label; genBlockFinish(howToFinish, new Runnable() { public void run() { println("if (" + finalcnt + " >= 1):"); printSingleLineBlock("goto " + finalLabel + "_breakloop"); println("else:"); printSingleLineBlock(throwNoViable); } }); println("++" + cnt); tabs--; println(":" + label + "_breakloop"); //tabs--; //println("// ( ... )+"); // Restore previous AST generation currentASTResult = saveCurrentASTResult; } private void printSingleLineBlock(String stmt) { tabs++; println(stmt); tabs--; } /** Generate the parser Boo file */ public void gen(ParserGrammar g) throws IOException { // if debugging, set up a new vector to keep track of sempred // strings for this grammar if (g.debuggingOutput) semPreds = new Vector(); setGrammar(g); if (!(grammar instanceof ParserGrammar)) { antlrTool.panic("Internal error generating parser"); } genBody(g); } /** * Generate code for the given grammar element. * * @param blk * The rule-reference to generate */ public void gen(RuleRefElement rr) { if (DEBUG_CODE_GENERATOR) System.out.println("genRR(" + rr + ")"); RuleSymbol rs = (RuleSymbol) grammar.getSymbol(rr.targetRule); if (rs == null || !rs.isDefined()) { // Is this redundant??? antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn()); return; } if (!(rs instanceof RuleSymbol)) { // Is this redundant??? antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn()); return; } genErrorTryForElement(rr); // AST value for labeled rule refs in tree walker. // This is not AST construction; it is just the input tree node value. if (grammar instanceof TreeWalkerGrammar && rr.getLabel() != null && syntacticPredLevel == 0) { println(rr.getLabel() + " = _t == ASTNULL ? null : " + lt1Value); } // if in lexer and ! on rule ref or alt or rule, save buffer index to // kill later if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("_saveIndex = text.Length"); } // Process return value assignment if any printTabs(); if (rr.idAssign != null) { // Warn if the rule has no return type if (rs.block.returnAction == null) { antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr .getLine(), rr.getColumn()); } _print(rr.idAssign + "="); } else { // Warn about return value if any, but not inside syntactic // predicate if (!(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null) { antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr .getLine(), rr.getColumn()); } } // Call the rule GenRuleInvocation(rr); // if in lexer and ! on element or alt or rule, save buffer index to // kill later if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("text.Length = _saveIndex"); } // if not in a syntactic predicate if (syntacticPredLevel == 0) { boolean doNoGuessTest = (grammar.hasSyntacticPredicate && (grammar.buildAST && rr.getLabel() != null || (genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE))); if (doNoGuessTest) { println("if (0 == inputState.guessing):"); tabs++; } if (grammar.buildAST && rr.getLabel() != null) { // always gen variable for rule return on labeled rules println(rr.getLabel() + "_AST = cast(" + labeledElementASTType + ", returnAST)"); } if (genAST) { switch (rr.getAutoGenType()) { case GrammarElement.AUTO_GEN_NONE: if (usingCustomAST) println("astFactory.addASTChild(currentAST, cast(AST, returnAST))"); else println("astFactory.addASTChild(currentAST, returnAST)"); break; case GrammarElement.AUTO_GEN_CARET: antlrTool .error("Internal: encountered ^ after rule reference"); break; default: break; } } // if a lexer and labeled, Token label defined at rule level, just // set it here if (grammar instanceof LexerGrammar && rr.getLabel() != null) { println(rr.getLabel() + " = returnToken_"); } if (doNoGuessTest) { tabs--; } } genErrorCatchForElement(rr); } /** * Generate code for the given grammar element. * * @param blk * The string-literal reference to generate */ public void gen(StringLiteralElement atom) { if (DEBUG_CODE_GENERATOR) System.out.println("genString(" + atom + ")"); // Variable declarations for labeled elements if (atom.getLabel() != null && syntacticPredLevel == 0) { println(atom.getLabel() + " = " + lt1Value); } // AST genElementAST(atom); // is there a bang on the literal? boolean oldsaveText = saveText; saveText = saveText && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE; // matching genMatch(atom); saveText = oldsaveText; // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling()"); } } /** * Generate code for the given grammar element. * * @param blk * The token-range reference to generate */ public void gen(TokenRangeElement r) { genErrorTryForElement(r); if (r.getLabel() != null && syntacticPredLevel == 0) { println(r.getLabel() + " = " + lt1Value); } // AST genElementAST(r); // match println("matchRange(" + OctalToUnicode(r.beginText) + "," + OctalToUnicode(r.endText) + ")"); genErrorCatchForElement(r); } /** * Generate code for the given grammar element. * * @param blk * The token-reference to generate */ public void gen(TokenRefElement atom) { if (DEBUG_CODE_GENERATOR) System.out.println("genTokenRef(" + atom + ")"); if (grammar instanceof LexerGrammar) { antlrTool.panic("Token reference found in lexer"); } genErrorTryForElement(atom); // Assign Token value to token label variable if (atom.getLabel() != null && syntacticPredLevel == 0) { println(atom.getLabel() + " = " + lt1Value); } // AST genElementAST(atom); // matching genMatch(atom); genErrorCatchForElement(atom); // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling()"); } } public void gen(TreeElement t) { // save AST cursor println("__t" + t.ID + " as AST " + " = _t"); // If there is a label on the root, then assign that to the variable if (t.root.getLabel() != null) { println(t.root.getLabel() + " = (ASTNULL == _t) ? null : cast(" + labeledElementASTType + ", _t)"); } // check for invalid modifiers ! and ^ on tree element roots if (t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG) { antlrTool.error( "Suffixing a root node with '!' is not implemented", grammar.getFilename(), t.getLine(), t.getColumn()); t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE); } if (t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET) { antlrTool .warning( "Suffixing a root node with '^' is redundant; already a root", grammar.getFilename(), t.getLine(), t.getColumn()); t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE); } // Generate AST variables genElementAST(t.root); if (grammar.buildAST) { // Save the AST construction state println("__currentAST" + t.ID + " as ASTPair = currentAST.copy()"); // Make the next item added a child of the TreeElement root println("currentAST.root = currentAST.child"); println("currentAST.child = null"); } // match root if (t.root instanceof WildcardElement) { println("raise MismatchedTokenException() if _t is null"); } else { genMatch(t.root); } // move to list of children println("_t = _t.getFirstChild()"); // walk list of children, generating code for each for (int i = 0; i < t.getAlternatives().size(); i++) { Alternative a = t.getAlternativeAt(i); AlternativeElement e = a.head; while (e != null) { e.generate(); e = e.next; } } if (grammar.buildAST) { // restore the AST construction state to that just after the // tree root was added println("ASTPair.PutInstance(currentAST)"); println("currentAST = __currentAST" + t.ID); } // restore AST cursor println("_t = __t" + t.ID); // move cursor to sibling of tree just parsed println("_t = _t.getNextSibling()"); } /** Generate the tree-parser Boo file */ public void gen(TreeWalkerGrammar g) throws IOException { // SAS: debugging stuff removed for now... setGrammar(g); if (!(grammar instanceof TreeWalkerGrammar)) { antlrTool.panic("Internal error generating tree-walker"); } genBody(g); } /** * Generate code for the given grammar element. * * @param wc * The wildcard element to generate */ public void gen(WildcardElement wc) { // Variable assignment for labeled elements if (wc.getLabel() != null && syntacticPredLevel == 0) { println(wc.getLabel() + " = " + lt1Value); } // AST genElementAST(wc); // Match anything but EOF if (grammar instanceof TreeWalkerGrammar) { println("raise MismatchedTokenException() if _t is null"); } else if (grammar instanceof LexerGrammar) { if (grammar instanceof LexerGrammar && (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("_saveIndex = text.Length"); } println("matchNot(EOF/*_CHAR*/)"); if (grammar instanceof LexerGrammar && (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("text.Length = _saveIndex"); // kill text atom put in // buffer } } else { println("matchNot(" + getValueString(Token.EOF_TYPE) + ")"); } // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling()"); } } /** * Generate code for the given grammar element. * * @param blk * The (...)* block to generate */ public void gen(ZeroOrMoreBlock blk) { if (DEBUG_CODE_GENERATOR) System.out.println("gen*(" + blk + ")"); //println("block: // ( ... )*"); //tabs++; genBlockPreamble(blk); String label; if (blk.getLabel() != null) { label = blk.getLabel(); } else { label = "_loop" + blk.ID; } println("while true:"); tabs++; // generate the init action for ()+ ()* inside the loop // this allows us to do usefull EOF checking... genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); // generate exit test if greedy set to false // and an alt is ambiguous with exit branch // or when lookahead derived purely from end-of-file // Lookahead analysis stops when end-of-file is hit, // returning set {epsilon}. Since {epsilon} is not // ambig with any real tokens, no error is reported // by deterministic() routines and we have to check // for the case where the lookahead depth didn't get // set to NONDETERMINISTIC (this only happens when the // FOLLOW contains real atoms + epsilon). boolean generateNonGreedyExitPath = false; int nonGreedyExitDepth = grammar.maxk; if (!blk.greedy && blk.exitLookaheadDepth <= grammar.maxk && blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) { generateNonGreedyExitPath = true; nonGreedyExitDepth = blk.exitLookaheadDepth; } else if (!blk.greedy && blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) { generateNonGreedyExitPath = true; } if (generateNonGreedyExitPath) { if (DEBUG_CODE_GENERATOR) { System.out.println("nongreedy (...)* loop; exit depth is " + blk.exitLookaheadDepth); } String predictExit = getLookaheadTestExpression(blk.exitCache, nonGreedyExitDepth); println("// nongreedy exit test"); println("goto " + label + "_breakloop if " + predictExit); } BooBlockFinishingInfo howToFinish = genCommonBlock(blk, false); genBlockFinish(howToFinish, "goto " + label + "_breakloop"); tabs--; println(":" + label + "_breakloop"); //tabs--; //println("// ( ... )*"); // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** * Generate an alternative. * * @param alt * The alternative to generate * @param blk * The block to which the alternative belongs */ protected void genAlt(Alternative alt, AlternativeBlock blk) { // Save the AST generation state, and set it to that of the alt boolean savegenAST = genAST; genAST = genAST && alt.getAutoGen(); boolean oldsaveTest = saveText; saveText = saveText && alt.getAutoGen(); // Reset the variable name map for the alternative Hashtable saveMap = treeVariableMap; treeVariableMap = new Hashtable(); // Generate try block around the alt for error handling if (alt.exceptionSpec != null) { println("try: // for error handling"); tabs++; } int generatedElements = 0; AlternativeElement elem = alt.head; while (!(elem instanceof BlockEndElement)) { elem.generate(); // alt can begin with anything. Ask target to // gen. ++generatedElements; elem = elem.next; } if (0 == generatedElements) { println("pass // 947"); } if (genAST) { if (blk instanceof RuleBlock) { // Set the AST return value for the rule RuleBlock rblk = (RuleBlock) blk; if (usingCustomAST) { println(rblk.getRuleName() + "_AST = cast(" + labeledElementASTType + ", currentAST.root)"); } else { println(rblk.getRuleName() + "_AST = currentAST.root"); } } else if (blk.getLabel() != null) { // ### future: also set AST value for labeled subrules. // println(blk.getLabel() + "_AST = // ("+labeledElementASTType+")currentAST.root;"); antlrTool.warning("Labeled subrules not yet supported", grammar .getFilename(), blk.getLine(), blk.getColumn()); } } if (alt.exceptionSpec != null) { // close try block tabs--; genErrorHandler(alt.exceptionSpec); } genAST = savegenAST; saveText = oldsaveTest; treeVariableMap = saveMap; } /** * Generate all the bitsets to be used in the parser or lexer Generate the * raw bitset data like "long _tokenSet1_data[] = {...};" and the BitSet * object declarations like "BitSet _tokenSet1 = new * BitSet(_tokenSet1_data);" Note that most languages do not support object * initialization inside a class definition, so other code-generators may * have to separate the bitset declarations from the initializations (e.g., * put the initializations in the generated constructor instead). * * @param bitsetList * The list of bitsets to generate. * @param maxVocabulary * Ensure that each generated bitset can contain at least this * value. */ protected void genBitsets(Vector bitsetList, int maxVocabulary) { println(""); for (int i = 0; i < bitsetList.size(); i++) { BitSet p = (BitSet) bitsetList.elementAt(i); // Ensure that generated BitSet is large enough for vocabulary p.growToInclude(maxVocabulary); genBitSet(p, i); } } /** * Do something simple like: private static final long[] mk_tokenSet_0() { * long[] data = { -2305839160922996736L, 63L, 16777216L, 0L, 0L, 0L }; * return data; } public static final BitSet _tokenSet_0 = new * BitSet(mk_tokenSet_0()); Or, for large bitsets, optimize init so ranges * are collapsed into loops. This is most useful for lexers using unicode. */ private void genBitSet(BitSet p, int id) { // initialization data println("private static def mk_" + getBitsetName(id) + "() as (long):"); tabs++; int n = p.lengthInLongWords(); if (n < BITSET_OPTIMIZE_INIT_THRESHOLD) { println("data = (" + p.toStringOfWords() + ", )"); } else { // will init manually, allocate space then set values println("data = array(long, " + n + ")"); long[] elems = p.toPackedArray(); for (int i = 0; i < elems.length;) { if ((i + 1) == elems.length || elems[i] != elems[i + 1]) { // last number or no run of numbers, just dump assignment println("data[" + i + "]=" + elems[i] + "L"); i++; } else { // scan to find end of run int j; for (j = i + 1; j < elems.length && elems[j] == elems[i]; j++) { ; } // j-1 is last member of run println("i = " + i); println("while i<=" + (j - 1) + ":"); ++tabs; println("data[i] = " + elems[i] + "L"); println("++i"); --tabs; i = j; } } } println("return data"); tabs--; // BitSet object println("public static final " + getBitsetName(id) + " = BitSet(" + "mk_" + getBitsetName(id) + "()" + ")"); } /** * Given the index of a bitset in the bitset list, generate a unique name. * Specific code-generators may want to override this if the language does * not allow '_' or numerals in identifiers. * * @param index * The index of the bitset in the bitset list. */ protected String getBitsetName(int index) { return "tokenSet_" + index + "_"; } /** * Generate the finish of a block, using a combination of the info returned * from genCommonBlock() and the action to perform when no alts were taken * * @param howToFinish * The return of genCommonBlock() * @param noViableAction * What to generate when no alt is taken */ private void genBlockFinish(BooBlockFinishingInfo howToFinish, Runnable noViableAction) { boolean ifOrSwitch = (howToFinish.generatedAnIf || howToFinish.generatedSwitch); if (howToFinish.needAnErrorClause && ifOrSwitch) { if (howToFinish.generatedAnIf) { println("else:"); } ++tabs; noViableAction.run(); --tabs; } if (howToFinish.postscript != null) { println(howToFinish.postscript); } if (howToFinish.generatedSwitch) { --tabs; } } private void genBlockFinish(BooBlockFinishingInfo howToFinish, final String noViableAction) { genBlockFinish(howToFinish, new Runnable() { public void run() { println(noViableAction); } }); } /** * Generate the init action for a block, which may be a RuleBlock or a plain * AlternativeBLock. * * @blk The block for which the preamble is to be generated. */ protected void genBlockInitAction(AlternativeBlock blk) { // dump out init action if (blk.initAction != null) { printAction(processActionForSpecialSymbols(blk.initAction, blk .getLine(), currentRule, null)); } } /** * Generate the header for a block, which may be a RuleBlock or a plain * AlternativeBLock. This generates any variable declarations and * syntactic-predicate-testing variables. * * @blk The block for which the preamble is to be generated. */ protected void genBlockPreamble(AlternativeBlock blk) { // define labels for rule blocks. if (blk instanceof RuleBlock) { RuleBlock rblk = (RuleBlock) blk; if (rblk.labeledElements != null) { for (int i = 0; i < rblk.labeledElements.size(); i++) { AlternativeElement a = (AlternativeElement) rblk.labeledElements .elementAt(i); // System.out.println("looking at labeled element: "+a); // Variables for labeled rule refs and // subrules are different than variables for // grammar atoms. This test is a little tricky // because we want to get all rule refs and ebnf, // but not rule blocks or syntactic predicates if (a instanceof RuleRefElement || a instanceof AlternativeBlock && !(a instanceof RuleBlock) && !(a instanceof SynPredBlock)) { if (!(a instanceof RuleRefElement) && ((AlternativeBlock) a).not && analyzer.subruleCanBeInverted( ((AlternativeBlock) a), grammar instanceof LexerGrammar)) { // Special case for inverted subrules that // will be inlined. Treat these like // token or char literal references println(a.getLabel() + " as " + labeledElementType + " = " + labeledElementInit); if (grammar.buildAST) { genASTDeclaration(a); } } else { if (grammar.buildAST) { // Always gen AST variables for // labeled elements, even if the // element itself is marked with ! genASTDeclaration(a); } if (grammar instanceof LexerGrammar) { println(a.getLabel() + " as IToken"); } if (grammar instanceof TreeWalkerGrammar) { // always generate rule-ref variables // for tree walker println(a.getLabel() + " as " + labeledElementType + " = " + labeledElementInit); } } } else { // It is a token or literal reference. Generate the // correct variable type for this grammar println(a.getLabel() + " as " + labeledElementType + " = " + labeledElementInit); // In addition, generate *_AST variables if building // ASTs if (grammar.buildAST) { // println(labeledElementASTType+" " + a.getLabel() // + "_AST = null;"); if (a instanceof GrammarAtom && ((GrammarAtom) a).getASTNodeType() != null) { GrammarAtom ga = (GrammarAtom) a; genASTDeclaration(a, ga.getASTNodeType()); } else { genASTDeclaration(a); } } } } } } } public void genBody(LexerGrammar g) throws IOException { // SAS: moved output creation to method so a subclass can change // how the output is generated (for VAJ interface) setupOutput(grammar.getClassName()); genAST = false; // no way to gen trees. saveText = true; // save consumed characters. tabs = 0; // Generate header common to all Boo output files genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Generate the Boo namespace declaration (if specified) if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); // Generate header specific to lexer Boo file println("// Generate header specific to lexer Boo file"); println("import System"); println("import System.IO.Stream as Stream"); println("import System.IO.TextReader as TextReader"); println("import System.Collections.Hashtable as Hashtable"); println("import System.Collections.Comparer as Comparer"); if (!(g.caseSensitiveLiterals)) { println("import System.Collections.CaseInsensitiveHashCodeProvider as CaseInsensitiveHashCodeProvider"); println("import System.Collections.CaseInsensitiveComparer as CaseInsensitiveComparer"); } println(""); println("import antlr.TokenStreamException as TokenStreamException"); println("import antlr.TokenStreamIOException as TokenStreamIOException"); println("import antlr.TokenStreamRecognitionException as TokenStreamRecognitionException"); println("import antlr.CharStreamException as CharStreamException"); println("import antlr.CharStreamIOException as CharStreamIOException"); println("import antlr.ANTLRException as ANTLRException"); println("import antlr.CharScanner as CharScanner"); println("import antlr.InputBuffer as InputBuffer"); println("import antlr.ByteBuffer as ByteBuffer"); println("import antlr.CharBuffer as CharBuffer"); println("import antlr.Token as Token"); println("import antlr.IToken as IToken"); println("import antlr.CommonToken as CommonToken"); println("import antlr.SemanticException as SemanticException"); println("import antlr.RecognitionException as RecognitionException"); println("import antlr.NoViableAltForCharException as NoViableAltForCharException"); println("import antlr.MismatchedCharException as MismatchedCharException"); println("import antlr.TokenStream as TokenStream"); println("import antlr.LexerSharedInputState as LexerSharedInputState"); println("import antlr.collections.impl.BitSet as BitSet"); // Generate user-defined lexer file preamble println(grammar.preambleAction.getText()); // Generate lexer class definition String sup = null; if (grammar.superClass != null) { sup = grammar.superClass; } else { sup = "antlr." + grammar.getSuperClass(); } // print javadoc comment if any if (grammar.comment != null) { _println(grammar.comment); } Token tprefix = (Token) grammar.options.get("classHeaderPrefix"); if (tprefix != null) { String p = StringUtils .stripFrontBack(tprefix.getText(), "\"", "\""); if (p != null) { print(p + " "); } } print("class " + grammar.getClassName() + "(" + sup); print(", TokenStream"); Token tsuffix = (Token) grammar.options.get("classHeaderSuffix"); if (tsuffix != null) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\""); if (suffix != null) { print(", " + suffix); // must be an interface name for Boo } } println("):"); tabs++; // Generate 'const' definitions for Token IDs genTokenDefinitions(grammar.tokenManager); // Generate user-defined lexer class members print(processActionForSpecialSymbols(grammar.classMemberAction .getText(), grammar.classMemberAction.getLine(), currentRule, null)); // // Generate the constructor from InputStream, which in turn // calls the ByteBuffer constructor // println("def constructor(ins as Stream):"); printSingleLineBlock("self(ByteBuffer(ins))"); println(""); // // Generate the constructor from Reader, which in turn // calls the CharBuffer constructor // println("def constructor(r as TextReader):"); printSingleLineBlock("self(CharBuffer(r))"); println(""); println("def constructor(ib as InputBuffer):"); // if debugging, wrap the input buffer in a debugger if (grammar.debuggingOutput) printSingleLineBlock("self(LexerSharedInputState(antlr.debug.DebuggingInputBuffer(ib)))"); else printSingleLineBlock("self(LexerSharedInputState(ib))"); println(""); // // Generate the constructor from InputBuffer (char or byte) // println("def constructor(state as LexerSharedInputState):"); ++tabs; println("super(state)"); println("initialize()"); tabs--; println(""); // Generate the initialize function println("private def initialize():"); tabs++; // if debugging, set up array variables and call user-overridable // debugging setup method if (grammar.debuggingOutput) { println("ruleNames = _ruleNames"); println("semPredNames = _semPredNames"); println("setupDebugging()"); } // Generate the setting of various generated options. // These need to be before the literals since ANTLRHashString depends on // the casesensitive stuff. println("caseSensitiveLiterals = " + g.caseSensitiveLiterals); println("setCaseSensitive(" + g.caseSensitive + ")"); // Generate the initialization of a hashtable // containing the string literals used in the lexer // The literals variable itself is in CharScanner if (g.caseSensitiveLiterals) println("literals = Hashtable(100, 0.4f, null, Comparer.Default)"); else println("literals = Hashtable(100, 0.4f, CaseInsensitiveHashCodeProvider.Default, CaseInsensitiveComparer.Default)"); Enumeration keys = grammar.tokenManager.getTokenSymbolKeys(); while (keys.hasMoreElements()) { String key = (String) keys.nextElement(); if (key.charAt(0) != '"') { continue; } TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key); if (sym instanceof StringLiteralSymbol) { StringLiteralSymbol s = (StringLiteralSymbol) sym; println("literals.Add(" + s.getId() + ", " + s.getTokenType() + ")"); } } Enumeration ids; tabs--; // generate the rule name array for debugging if (grammar.debuggingOutput) { println("private static final _ruleNames = ("); ids = grammar.rules.elements(); int ruleNum = 0; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if (sym instanceof RuleSymbol) println(" \"" + ((RuleSymbol) sym).getId() + "\","); } println(")"); } // Generate nextToken() rule. // nextToken() is a synthetic lexer rule that is the implicit OR of all // user-defined lexer rules. genNextToken(); // Generate code for each rule in the lexer ids = grammar.rules.elements(); int ruleNum = 0; while (ids.hasMoreElements()) { RuleSymbol sym = (RuleSymbol) ids.nextElement(); // Don't generate the synthetic rules if (!sym.getId().equals("mnextToken")) { genRule(sym, false, ruleNum++, grammar.tokenManager); } exitIfError(); } // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) genSemPredMap(); // Generate the bitsets used throughout the lexer genBitsets(bitsetsUsed, ((LexerGrammar) grammar).charVocabulary.size()); println(""); tabs--; // Generate the Boo namespace closures (if required) if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Close the lexer output stream currentOutput.close(); currentOutput = null; } public void genInitFactory(Grammar g) { if (g.buildAST) { // Generate the method to initialize an ASTFactory when we're // building AST's println("static def initializeASTFactory(factory as ASTFactory):"); tabs++; println("factory.setMaxNodeType(" + g.tokenManager.maxTokenType() + ")"); // Walk the token vocabulary and generate code to register every // TokenID->ASTNodeType // mapping specified in the tokens {...} section with the // ASTFactory. Vector v = g.tokenManager.getVocabulary(); for (int i = 0; i < v.size(); i++) { String s = (String) v.elementAt(i); if (s != null) { TokenSymbol ts = g.tokenManager.getTokenSymbol(s); if (ts != null && ts.getASTNodeType() != null) { println("factory.setTokenTypeASTNodeType(" + s + ", \"" + ts.getASTNodeType() + "\")"); } } } tabs--; } } public void genBody(ParserGrammar g) throws IOException { // Open the output stream for the parser and set the currentOutput // SAS: moved file setup so subclass could do it (for VAJ interface) setupOutput(grammar.getClassName()); genAST = grammar.buildAST; tabs = 0; // Generate the header common to all output files. genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Generate the Boo namespace declaration (if specified) if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); // Generate header for the parser println("// Generate the header common to all output files."); println("import System"); println(""); println("import antlr.TokenBuffer as TokenBuffer"); println("import antlr.TokenStreamException as TokenStreamException"); println("import antlr.TokenStreamIOException as TokenStreamIOException"); println("import antlr.ANTLRException as ANTLRException"); String qualifiedClassName = grammar.getSuperClass(); String[] unqualifiedClassName = split(qualifiedClassName, "."); println("import " + "antlr." + qualifiedClassName + " as " + unqualifiedClassName[unqualifiedClassName.length - 1]); println("import antlr.Token as Token"); println("import antlr.IToken as IToken"); println("import antlr.TokenStream as TokenStream"); println("import antlr.RecognitionException as RecognitionException"); println("import antlr.NoViableAltException as NoViableAltException"); println("import antlr.MismatchedTokenException as MismatchedTokenException"); println("import antlr.SemanticException as SemanticException"); println("import antlr.ParserSharedInputState as ParserSharedInputState"); println("import antlr.collections.impl.BitSet as BitSet"); if (genAST) { println("import antlr.collections.AST as AST"); println("import antlr.ASTPair as ASTPair"); println("import antlr.ASTFactory as ASTFactory"); println("import antlr.collections.impl.ASTArray as ASTArray"); } // Output the user-defined parser preamble println(grammar.preambleAction.getText()); // Generate parser class definition String sup = null; if (grammar.superClass != null) sup = grammar.superClass; else sup = "antlr." + grammar.getSuperClass(); // print javadoc comment if any if (grammar.comment != null) { _println(grammar.comment); } Token tprefix = (Token) grammar.options.get("classHeaderPrefix"); if (tprefix != null) { String p = StringUtils .stripFrontBack(tprefix.getText(), "\"", "\""); if (p != null) { print(p + " "); } } print("class " + grammar.getClassName() + "(" + sup); Token tsuffix = (Token) grammar.options.get("classHeaderSuffix"); if (tsuffix != null) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\""); if (suffix != null) print(", " + suffix); // must be an interface name // for Boo } _println("):"); tabs++; // Generate 'const' definitions for Token IDs genTokenDefinitions(grammar.tokenManager); // set up an array of all the rule names so the debugger can // keep track of them only by number -- less to store in tree... if (grammar.debuggingOutput) { println("private static final _ruleNames = ("); tabs++; Enumeration ids = grammar.rules.elements(); int ruleNum = 0; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if (sym instanceof RuleSymbol) println(" \"" + ((RuleSymbol) sym).getId() + "\","); } tabs--; println(")"); } // Generate user-defined parser class members print(processActionForSpecialSymbols(grammar.classMemberAction .getText(), grammar.classMemberAction.getLine(), currentRule, null)); // Generate parser class constructor from TokenBuffer println(""); println("protected def initialize():"); tabs++; println("tokenNames = tokenNames_"); if (grammar.buildAST) println("initializeFactory()"); // if debugging, set up arrays and call the user-overridable // debugging setup method if (grammar.debuggingOutput) { println("ruleNames = _ruleNames"); println("semPredNames = _semPredNames"); println("setupDebugging(tokenBuf)"); } tabs--; println(""); println(""); println("protected def constructor(tokenBuf as TokenBuffer, k as int):"); tabs++; println("super(tokenBuf, k)"); println("initialize()"); tabs--; println(""); println("def constructor(tokenBuf as TokenBuffer):"); printSingleLineBlock("self(tokenBuf, " + grammar.maxk + ")"); println(""); // Generate parser class constructor from TokenStream println("protected def constructor(lexer as TokenStream, k as int):"); tabs++; println("super(lexer, k)"); println("initialize()"); tabs--; println(""); println("public def constructor(lexer as TokenStream):"); printSingleLineBlock("self(lexer, " + grammar.maxk + ")"); println(""); println("public def constructor(state as ParserSharedInputState):"); tabs++; println("super(state, " + grammar.maxk + ")"); println("initialize()"); tabs--; println(""); astTypes = new java.util.Vector(100); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); int ruleNum = 0; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if (sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol) sym; genRule(rs, rs.references.size() == 0, ruleNum++, grammar.tokenManager); } exitIfError(); } // Generate the method that initializes the ASTFactory when we're // building AST's if (grammar.buildAST) { println("private def initializeFactory():"); tabs++; println("if (astFactory is null):"); tabs++; if (usingCustomAST) { println("astFactory = ASTFactory(\"" + labeledElementASTType + "\")"); } else println("astFactory = ASTFactory()"); tabs--; println("initializeASTFactory(astFactory)"); tabs--; genInitFactory(g); } // Generate the token names genTokenStrings(); // Generate the bitsets used throughout the grammar genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType()); // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) genSemPredMap(); // Close class definition println(""); tabs--; // Generate the Boo namespace closures (if required) if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Close the parser output stream currentOutput.close(); currentOutput = null; } public void genBody(TreeWalkerGrammar g) throws IOException { // Open the output stream for the parser and set the currentOutput // SAS: move file open to method so subclass can override it // (mainly for VAJ interface) setupOutput(grammar.getClassName()); genAST = grammar.buildAST; tabs = 0; // Generate the header common to all output files. genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Generate the Boo namespace declaration (if specified) if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); // Generate header specific to the tree-parser Boo file println("// Generate header specific to the tree-parser Boo file"); println("import System"); println(""); println("import antlr." + grammar.getSuperClass() + " as " + grammar.getSuperClass()); println("import antlr.Token as Token"); println("import antlr.IToken as IToken"); println("import antlr.collections.AST as AST"); println("import antlr.RecognitionException as RecognitionException"); println("import antlr.ANTLRException as ANTLRException"); println("import antlr.NoViableAltException as NoViableAltException"); println("import antlr.MismatchedTokenException as MismatchedTokenException"); println("import antlr.SemanticException as SemanticException"); println("import antlr.collections.impl.BitSet as BitSet"); println("import antlr.ASTPair as ASTPair"); println("import antlr.ASTFactory as ASTFactory"); println("import antlr.collections.impl.ASTArray as ASTArray"); // Output the user-defined parser premamble println(grammar.preambleAction.getText()); // Generate parser class definition String sup = null; if (grammar.superClass != null) { sup = grammar.superClass; } else { sup = "antlr." + grammar.getSuperClass(); } println(""); // print javadoc comment if any if (grammar.comment != null) { _println(grammar.comment); } Token tprefix = (Token) grammar.options.get("classHeaderPrefix"); if (tprefix != null) { String p = StringUtils .stripFrontBack(tprefix.getText(), "\"", "\""); if (p != null) { print(p + " "); } } print("class " + grammar.getClassName() + "(" + sup); Token tsuffix = (Token) grammar.options.get("classHeaderSuffix"); if (tsuffix != null) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\""); if (suffix != null) { print(", " + suffix); // must be an interface name // for Boo } } _println("):"); tabs++; // Generate 'const' definitions for Token IDs genTokenDefinitions(grammar.tokenManager); // Generate user-defined parser class members print(processActionForSpecialSymbols(grammar.classMemberAction .getText(), grammar.classMemberAction.getLine(), currentRule, null)); // Generate default parser class constructor println("def constructor():"); tabs++; println("tokenNames = tokenNames_"); tabs--; println(""); astTypes = new java.util.Vector(); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); int ruleNum = 0; String ruleNameInits = ""; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if (sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol) sym; genRule(rs, rs.references.size() == 0, ruleNum++, grammar.tokenManager); } exitIfError(); } // Generate the ASTFactory initialization function genInitFactory(grammar); // Generate the token names genTokenStrings(); // Generate the bitsets used throughout the grammar genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType()); // Close class definition tabs--; println(""); // Generate the Boo namespace closures (if required) if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Close the parser output stream currentOutput.close(); currentOutput = null; } /** * Generate a series of case statements that implement a BitSet test. * * @param p * The Bitset for which cases are to be generated */ protected void genCases(String stmt, BitSet p) { if (DEBUG_CODE_GENERATOR) System.out.println("genCases(" + p + ")"); int[] elems; elems = p.toArray(); print(stmt + " ((_givenValue == " + getValueString(elems[0]) + ")"); if (elems.length > 1) { _println(""); ++tabs; int last = elems.length - 1; for (int i = 1; i < elems.length; i++) { //println("when " + getValueString(elems[i]) + ":"); println(" or (_givenValue ==" + getValueString(elems[i]) + ")"); } --tabs; println("): // 1827"); } else { _println("): // 1831"); } } /** * Generate common code for a block of alternatives; return a postscript * that needs to be generated at the end of the block. Other routines may * append else-clauses and such for error checking before the postfix is * generated. If the grammar is a lexer, then generate alternatives in an * order where alternatives requiring deeper lookahead are generated first, * and EOF in the lookahead set reduces the depth of the lookahead. * * @param blk * The block to generate * @param noTestForSingle * If true, then it does not generate a test for a single * alternative. */ public BooBlockFinishingInfo genCommonBlock(AlternativeBlock blk, boolean noTestForSingle) { int nIF = 0; boolean createdLL1Switch = false; int closingBracesOfIFSequence = 0; BooBlockFinishingInfo finishingInfo = new BooBlockFinishingInfo(); if (DEBUG_CODE_GENERATOR) System.out.println("genCommonBlock(" + blk + ")"); // Save the AST generation state, and set it to that of the block boolean savegenAST = genAST; genAST = genAST && blk.getAutoGen(); boolean oldsaveTest = saveText; saveText = saveText && blk.getAutoGen(); // Is this block inverted? If so, generate special-case code if (blk.not && analyzer.subruleCanBeInverted(blk, grammar instanceof LexerGrammar)) { if (DEBUG_CODE_GENERATOR) System.out.println("special case: ~(subrule)"); Lookahead p = analyzer.look(1, blk); // Variable assignment for labeled elements if (blk.getLabel() != null && syntacticPredLevel == 0) { println(blk.getLabel() + " = " + lt1Value); } // AST genElementAST(blk); String astArgs = ""; if (grammar instanceof TreeWalkerGrammar) { if (usingCustomAST) astArgs = "cast(AST, _t),"; else astArgs = "_t,"; } // match the bitset for the alternative println("match(" + astArgs + getBitsetName(markBitsetForGen(p.fset)) + ")"); // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling()"); } return finishingInfo; } // Special handling for single alt if (blk.getAlternatives().size() == 1) { Alternative alt = blk.getAlternativeAt(0); // Generate a warning if there is a synPred for single alt. if (alt.synPred != null) { antlrTool .warning( "Syntactic predicate superfluous for single alternative", grammar.getFilename(), blk.getAlternativeAt(0).synPred.getLine(), blk .getAlternativeAt(0).synPred .getColumn()); } if (noTestForSingle) { if (alt.semPred != null) { // Generate validating predicate genSemPred(alt.semPred, blk.line); } genAlt(alt, blk); return finishingInfo; } } // count number of simple LL(1) cases; only do switch for // many LL(1) cases (no preds, no end of token refs) // We don't care about exit paths for (...)*, (...)+ // because we don't explicitly have a test for them // as an alt in the loop. // // Also, we now count how many unicode lookahead sets // there are--they must be moved to DEFAULT or ELSE // clause. int nLL1 = 0; for (int i = 0; i < blk.getAlternatives().size(); i++) { Alternative a = blk.getAlternativeAt(i); if (suitableForCaseExpression(a)) { nLL1++; } } // do LL(1) cases if (nLL1 >= makeSwitchThreshold) { // Determine the name of the item to be compared String testExpr = lookaheadString(1); createdLL1Switch = true; // when parsing trees, convert null to valid tree node with NULL // lookahead if (grammar instanceof TreeWalkerGrammar) { println("if _t is null:"); printSingleLineBlock("_t = ASTNULL"); } // given is not supported yet... //println("given " + testExpr + ":"); //tabs++; println("_givenValue = " + testExpr); for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); // ignore any non-LL(1) alts, predicated alts, // or end-of-token alts for case expressions if (!suitableForCaseExpression(alt)) { continue; } Lookahead p = alt.cache[1]; if (p.fset.degree() == 0 && !p.containsEpsilon()) { antlrTool.warning( "Alternate omitted due to empty prediction set", grammar.getFilename(), alt.head.getLine(), alt.head .getColumn()); } else { String stmt = 0 == i ? "if" : "elif"; genCases(stmt, p.fset); tabs++; genAlt(alt, blk); tabs--; } } println("else: // line 1969"); tabs++; } // do non-LL(1) and nondeterministic cases This is tricky in // the lexer, because of cases like: STAR : '*' ; ASSIGN_STAR // : "*="; Since nextToken is generated without a loop, then // the STAR will have end-of-token as it's lookahead set for // LA(2). So, we must generate the alternatives containing // trailing end-of-token in their lookahead sets *after* the // alternatives without end-of-token. This implements the // usual lexer convention that longer matches come before // shorter ones, e.g. "*=" matches ASSIGN_STAR not STAR // // For non-lexer grammars, this does not sort the alternates // by depth Note that alts whose lookahead is purely // end-of-token at k=1 end up as default or else clauses. int startDepth = (grammar instanceof LexerGrammar) ? grammar.maxk : 0; for (int altDepth = startDepth; altDepth >= 0; altDepth--) { if (DEBUG_CODE_GENERATOR) System.out.println("checking depth " + altDepth); for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); if (DEBUG_CODE_GENERATOR) System.out.println("genAlt: " + i); // if we made a switch above, ignore what we already took care // of. Specifically, LL(1) alts with no preds // that do not have end-of-token in their prediction set // and that are not giant unicode sets. if (createdLL1Switch && suitableForCaseExpression(alt)) { if (DEBUG_CODE_GENERATOR) System.out .println("ignoring alt because it was in the switch"); continue; } String e; boolean unpredicted = false; if (grammar instanceof LexerGrammar) { // Calculate the "effective depth" of the alt, // which is the max depth at which // cache[depth]!=end-of-token int effectiveDepth = alt.lookaheadDepth; if (effectiveDepth == GrammarAnalyzer.NONDETERMINISTIC) { // use maximum lookahead effectiveDepth = grammar.maxk; } while (effectiveDepth >= 1 && alt.cache[effectiveDepth].containsEpsilon()) { effectiveDepth--; } // Ignore alts whose effective depth is other than // the ones we are generating for this iteration. if (effectiveDepth != altDepth) { if (DEBUG_CODE_GENERATOR) System.out .println("ignoring alt because effectiveDepth!=altDepth;" + effectiveDepth + "!=" + altDepth); continue; } unpredicted = lookaheadIsEmpty(alt, effectiveDepth); e = getLookaheadTestExpression(alt, effectiveDepth); } else { unpredicted = lookaheadIsEmpty(alt, grammar.maxk); e = getLookaheadTestExpression(alt, grammar.maxk); } // Was it a big unicode range that forced unsuitability // for a case expression? if (alt.cache[1].fset.degree() > caseSizeThreshold && suitableForCaseExpression(alt)) { if (nIF == 0) { println("if " + e + ":"); } else { println("elif " + e + ": // 2053"); } } else if (unpredicted && alt.semPred == null && alt.synPred == null) { // The alt has empty prediction set and no // predicate to help out. if we have not // generated a previous if, just put {...} around // the end-of-token clause if (nIF != 0) { println("else: // line 2053"); } finishingInfo.needAnErrorClause = false; } else { // check for sem and syn preds // Add any semantic predicate expression to the lookahead // test if (alt.semPred != null) { // if debugging, wrap the evaluation of the predicate in // a method // // translate $ and # references ActionTransInfo tInfo = new ActionTransInfo(); String actionStr = processActionForSpecialSymbols( alt.semPred, blk.line, currentRule, tInfo); // ignore translation info...we don't need to // do anything with it. call that will inform // SemanticPredicateListeners of the result if (((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)) && grammar.debuggingOutput) { e = "(" + e + "&& fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEventArgs.PREDICTING," + // FIXME addSemPred(charFormatter .escapeString(actionStr)) + "," + actionStr + "))"; } else { e = "(" + e + " and (" + actionStr + "))"; } } // Generate any syntactic predicates if (nIF > 0) { if (alt.synPred != null) { println("else: // line 2088"); tabs++; genSynPred(alt.synPred, e); closingBracesOfIFSequence++; } else { println("elif " + e + ": // line 2102"); } } else { if (alt.synPred != null) { genSynPred(alt.synPred, e); } else { // when parsing trees, convert null to valid tree // node // with NULL lookahead. if (grammar instanceof TreeWalkerGrammar) { println("if _t is null:"); printSingleLineBlock("_t = ASTNULL"); } println("if " + e + ":"); } } } nIF++; tabs++; genAlt(alt, blk); tabs--; } } // Restore the AST generation state genAST = savegenAST; // restore save text state saveText = oldsaveTest; // Return the finishing info. if (createdLL1Switch) { //tabs--; finishingInfo.generatedSwitch = true; finishingInfo.generatedAnIf = nIF > 0; } else { finishingInfo.generatedSwitch = false; finishingInfo.generatedAnIf = nIF > 0; } return finishingInfo; } private static boolean suitableForCaseExpression(Alternative a) { return a.lookaheadDepth == 1 && a.semPred == null && !a.cache[1].containsEpsilon() && a.cache[1].fset.degree() <= caseSizeThreshold; } /** Generate code to link an element reference into the AST */ private void genElementAST(AlternativeElement el) { // handle case where you're not building trees, but are in tree walker. // Just need to get labels set up. if (grammar instanceof TreeWalkerGrammar && !grammar.buildAST) { String elementRef; String astName; // Generate names and declarations of the AST variable(s) if (el.getLabel() == null) { elementRef = lt1Value; // Generate AST variables for unlabeled stuff astName = "tmp" + astVarNumber + "_AST"; astVarNumber++; // Map the generated AST variable in the alternate mapTreeVariable(el, astName); // Generate an "input" AST variable also println(astName + "_in as " + labeledElementASTType + " = " + elementRef); } return; } if (grammar.buildAST && syntacticPredLevel == 0) { boolean needASTDecl = (genAST && (el.getLabel() != null || (el .getAutoGenType() != GrammarElement.AUTO_GEN_BANG))); // RK: if we have a grammar element always generate the decl // since some guy can access it from an action and we can't // peek ahead (well not without making a mess). // I'd prefer taking this out. if (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG && (el instanceof TokenRefElement)) needASTDecl = true; boolean doNoGuessTest = (grammar.hasSyntacticPredicate && needASTDecl); String elementRef; String astNameBase; // Generate names and declarations of the AST variable(s) if (el.getLabel() != null) { // if the element is labeled use that name... elementRef = el.getLabel(); astNameBase = el.getLabel(); } else { // else generate a temporary name... elementRef = lt1Value; // Generate AST variables for unlabeled stuff astNameBase = "tmp" + astVarNumber; astVarNumber++; } // Generate the declaration if required. if (needASTDecl) { // Generate the declaration if (el instanceof GrammarAtom) { GrammarAtom ga = (GrammarAtom) el; if (ga.getASTNodeType() != null) { genASTDeclaration(el, astNameBase, ga.getASTNodeType()); // println(ga.getASTNodeType()+" " + astName+" = // null;"); } else { genASTDeclaration(el, astNameBase, labeledElementASTType); // println(labeledElementASTType+" " + astName + " = // null;"); } } else { genASTDeclaration(el, astNameBase, labeledElementASTType); // println(labeledElementASTType+" " + astName + " = // null;"); } } // for convenience.. String astName = astNameBase + "_AST"; // Map the generated AST variable in the alternate mapTreeVariable(el, astName); if (grammar instanceof TreeWalkerGrammar) { // Generate an "input" AST variable also println(astName + "_in as " + labeledElementASTType + " = null"); } // Enclose actions with !guessing if (doNoGuessTest) { // println("if (0 == inputState.guessing)"); // println("{"); // tabs++; } // if something has a label assume it will be used // so we must initialize the RefAST if (el.getLabel() != null) { if (el instanceof GrammarAtom) { println(astName + " = " + getASTCreateString((GrammarAtom) el, elementRef)); } else { println(astName + " = " + getASTCreateString(elementRef)); } } // if it has no label but a declaration exists initialize it. if (el.getLabel() == null && needASTDecl) { elementRef = lt1Value; if (el instanceof GrammarAtom) { println(astName + " = " + getASTCreateString((GrammarAtom) el, elementRef)); } else { println(astName + " = " + getASTCreateString(elementRef)); } // Map the generated AST variable in the alternate if (grammar instanceof TreeWalkerGrammar) { // set "input" AST variable also println(astName + "_in = " + elementRef); } } if (genAST) { switch (el.getAutoGenType()) { case GrammarElement.AUTO_GEN_NONE: if (usingCustomAST || ((el instanceof GrammarAtom) && (((GrammarAtom) el) .getASTNodeType() != null))) println("astFactory.addASTChild(currentAST, cast(AST, " + astName + "))"); else println("astFactory.addASTChild(currentAST, " + astName + ")"); break; case GrammarElement.AUTO_GEN_CARET: if (usingCustomAST || ((el instanceof GrammarAtom) && (((GrammarAtom) el) .getASTNodeType() != null))) println("astFactory.makeASTRoot(currentAST, cast(AST, " + astName + "))"); else println("astFactory.makeASTRoot(currentAST, " + astName + ")"); break; default: break; } } if (doNoGuessTest) { // tabs--; // println("}"); } } } /** * Close the try block and generate catch phrases if the element has a * labeled handler in the rule */ private void genErrorCatchForElement(AlternativeElement el) { if (el.getLabel() == null) return; String r = el.enclosingRuleName; if (grammar instanceof LexerGrammar) { r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName); } RuleSymbol rs = (RuleSymbol) grammar.getSymbol(r); if (rs == null) { antlrTool.panic("Enclosing rule not found!"); } ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel()); if (ex != null) { tabs--; genErrorHandler(ex); } } /** Generate the catch phrases for a user-specified error handler */ private void genErrorHandler(ExceptionSpec ex) { // Each ExceptionHandler in the ExceptionSpec is a separate catch for (int i = 0; i < ex.handlers.size(); i++) { ExceptionHandler handler = (ExceptionHandler) ex.handlers .elementAt(i); // Generate catch phrase println("catch (" + handler.exceptionTypeAndName.getText() + "):"); tabs++; if (grammar.hasSyntacticPredicate) { println("if (0 == inputState.guessing):"); tabs++; } // When not guessing, execute user handler action ActionTransInfo tInfo = new ActionTransInfo(); printAction(processActionForSpecialSymbols( handler.action.getText(), handler.action.getLine(), currentRule, tInfo)); if (grammar.hasSyntacticPredicate) { tabs--; println("else:"); tabs++; // When guessing, rethrow exception // println("throw " + // extractIdOfAction(handler.exceptionTypeAndName) + ";"); println("raise"); tabs--; } // Close catch phrase tabs--; } } /** Generate a try { opening if the element has a labeled handler in the rule */ private void genErrorTryForElement(AlternativeElement el) { if (el.getLabel() == null) return; String r = el.enclosingRuleName; if (grammar instanceof LexerGrammar) { r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName); } RuleSymbol rs = (RuleSymbol) grammar.getSymbol(r); if (rs == null) { antlrTool.panic("Enclosing rule not found!"); } ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel()); if (ex != null) { println("try: // for error handling"); tabs++; } } protected void genASTDeclaration(AlternativeElement el) { genASTDeclaration(el, labeledElementASTType); } protected void genASTDeclaration(AlternativeElement el, String node_type) { genASTDeclaration(el, el.getLabel(), node_type); } protected void genASTDeclaration(AlternativeElement el, String var_name, String node_type) { // already declared? if (declaredASTVariables.contains(el)) return; // emit code // String s = StringUtils.stripFrontBack(node_type, "\"", "\""); // println(s + " " + var_name + "_AST = null;"); println(var_name + "_AST as " + node_type + " = null"); // mark as declared declaredASTVariables.put(el, el); } /** Generate a header that is common to all Boo files */ protected void genHeader() { println("// $ANTLR " + Tool.version + ": " + "\"" + antlrTool.fileMinusPath(antlrTool.grammarFile) + "\"" + " -> " + "\"" + grammar.getClassName() + ".boo\"$"); } private void genLiteralsTest() { println("_ttype = testLiteralsTable(_ttype)"); } private void genLiteralsTestForPartialToken() { println("_ttype = testLiteralsTable(text.ToString(_begin, text.Length-_begin), _ttype)"); } protected void genMatch(BitSet b) { } protected void genMatch(GrammarAtom atom) { if (atom instanceof StringLiteralElement) { if (grammar instanceof LexerGrammar) { genMatchUsingAtomText(atom); } else { genMatchUsingAtomTokenType(atom); } } else if (atom instanceof CharLiteralElement) { if (grammar instanceof LexerGrammar) { genMatchUsingAtomText(atom); } else { antlrTool.error("cannot ref character literals in grammar: " + atom); } } else if (atom instanceof TokenRefElement) { genMatchUsingAtomText(atom); } else if (atom instanceof WildcardElement) { gen((WildcardElement) atom); } } protected void genMatchUsingAtomText(GrammarAtom atom) { // match() for trees needs the _t cursor String astArgs = ""; if (grammar instanceof TreeWalkerGrammar) { if (usingCustomAST) astArgs = "cast(AST, _t),"; else astArgs = "_t,"; } // if in lexer and ! on element, save buffer index to kill later if (grammar instanceof LexerGrammar && (!saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("_saveIndex = text.Length"); } print(atom.not ? "matchNot(" : "match("); _print(astArgs); // print out what to match if (atom.atomText.equals("EOF")) { // horrible hack to handle EOF case _print("Token.EOF_TYPE"); } else { _print(atom.atomText); } _println(")"); if (grammar instanceof LexerGrammar && (!saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("text.Length = _saveIndex"); // kill text atom put in // buffer } } protected void genMatchUsingAtomTokenType(GrammarAtom atom) { // match() for trees needs the _t cursor String astArgs = ""; if (grammar instanceof TreeWalkerGrammar) { if (usingCustomAST) astArgs = "cast(AST, _t),"; else astArgs = "_t,"; } // If the literal can be mangled, generate the symbolic constant instead String mangledName = null; String s = astArgs + getValueString(atom.getType()); // matching println((atom.not ? "matchNot(" : "match(") + s + ")"); } /** * Generate the nextToken() rule. nextToken() is a synthetic lexer rule that * is the implicit OR of all user-defined lexer rules. */ public void genNextToken() { // Are there any public rules? If not, then just generate a // fake nextToken(). boolean hasPublicRules = false; for (int i = 0; i < grammar.rules.size(); i++) { RuleSymbol rs = (RuleSymbol) grammar.rules.elementAt(i); if (rs.isDefined() && rs.access.equals("public")) { hasPublicRules = true; break; } } if (!hasPublicRules) { println(""); println("override def nextToken() as IToken:"); tabs++; println("try:"); tabs++; println("uponEOF()"); tabs--; println("except csioe as CharStreamIOException:"); tabs++; println("raise TokenStreamIOException(csioe.io)"); tabs--; println("except cse as CharStreamException:"); tabs++; println("raise TokenStreamException(cse.Message)"); tabs--; println("return CommonToken(Token.EOF_TYPE, \"\")"); tabs--; println(""); return; } // Create the synthesized nextToken() rule RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken"); // Define the nextToken rule symbol RuleSymbol nextTokenRs = new RuleSymbol("mnextToken"); nextTokenRs.setDefined(); nextTokenRs.setBlock(nextTokenBlk); nextTokenRs.access = "private"; grammar.define(nextTokenRs); // Analyze the nextToken rule boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk); // Generate the next token rule String filterRule = null; if (((LexerGrammar) grammar).filterMode) { filterRule = ((LexerGrammar) grammar).filterRule; } println(""); println("override def nextToken() as IToken:"); tabs++; // delay creation of _saveIndex until we need it OK? println("theRetToken as IToken"); println(":tryAgain"); println("while true:"); tabs++; println("_token as IToken = null"); println("_ttype = Token.INVALID_TYPE"); if (((LexerGrammar) grammar).filterMode) { println("setCommitToPath(false)"); if (filterRule != null) { // Here's a good place to ensure that the filter rule actually // exists if (!grammar.isDefined(CodeGenerator .encodeLexerRuleName(filterRule))) { grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer"); } else { RuleSymbol rs = (RuleSymbol) grammar .getSymbol(CodeGenerator .encodeLexerRuleName(filterRule)); if (!rs.isDefined()) { grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer"); } else if (rs.access.equals("public")) { grammar.antlrTool.error("Filter rule " + filterRule + " must be protected"); } } println("_m as int"); println("_m = mark()"); } } println("resetText()"); println("try: // for char stream error handling"); tabs++; // Generate try around whole thing to trap scanner errors println("try: // for lexical error handling"); tabs++; // Test for public lexical rules with empty paths for (int i = 0; i < nextTokenBlk.getAlternatives().size(); i++) { Alternative a = nextTokenBlk.getAlternativeAt(i); if (a.cache[1].containsEpsilon()) { // String r = a.head.toString(); RuleRefElement rr = (RuleRefElement) a.head; String r = CodeGenerator.decodeLexerRuleName(rr.targetRule); antlrTool.warning("public lexical rule " + r + " is optional (can match \"nothing\")"); } } // Generate the block BooBlockFinishingInfo howToFinish = genCommonBlock(nextTokenBlk, false); final String finalFilterRule = filterRule; genBlockFinish(howToFinish, new Runnable() { public void run() { println("if cached_LA1 == EOF_CHAR:"); printSingleLineBlock("uponEOF(); returnToken_ = makeToken(Token.EOF_TYPE)"); if (((LexerGrammar) grammar).filterMode) { if (finalFilterRule == null) { // kunle: errFinish += "else { consume(); continue tryAgain; }"; println("else:"); ++tabs; println("consume()"); println("goto tryAgain"); --tabs; } else { println("else:"); ++tabs; println("commit()"); println("try:"); ++tabs; println("m" + finalFilterRule + "(false)"); --tabs; println("except e as RecognitionException:"); ++tabs; println("// catastrophic failure"); println("reportError(e)"); println("consume()"); --tabs; println("goto tryAgain"); --tabs; } } else { println("else:"); printSingleLineBlock(throwNoViable); } } }); // at this point a valid token has been matched, undo "mark" that was // done if (((LexerGrammar) grammar).filterMode && filterRule != null) { println("commit()"); } // Generate literals test if desired // make sure _ttype is set first; note returnToken_ must be // non-null as the rule was required to create it. println("goto tryAgain if returnToken_ is null // found SKIP token"); println("_ttype = returnToken_.Type"); if (((LexerGrammar) grammar).getTestLiterals()) { genLiteralsTest(); } // return token created by rule reference in switch println("returnToken_.Type = _ttype"); println("return returnToken_"); // Close try block tabs--; println("except e as RecognitionException:"); tabs++; if (((LexerGrammar) grammar).filterMode) { if (filterRule == null) { println("if (!getCommitToPath()):"); tabs++; println("consume()"); println("goto tryAgain"); tabs--; } else { println("if (!getCommitToPath()):"); tabs++; println("rewind(_m)"); println("resetText()"); println("try:"); printSingleLineBlock("m" + filterRule + "(false)"); println("exception ee as RecognitionException:"); println(" // horrendous failure: error in filter rule"); println(" reportError(ee)"); println(" consume()"); // println("goto tryAgain;"); tabs--; println("else:"); } } if (nextTokenBlk.getDefaultErrorHandler()) { tabs++; println("reportError(e)"); println("consume()"); tabs--; } else { // pass on to invoking routine tabs++; println("raise TokenStreamRecognitionException(e)"); tabs--; } tabs--; // close CharStreamException try tabs--; println("except cse as CharStreamException:"); println(" if cse isa CharStreamIOException:"); println(" raise TokenStreamIOException(cast(CharStreamIOException, cse).io)"); println(" else:"); println(" raise TokenStreamException(cse.Message)"); // close for-loop tabs--; // close method nextToken tabs--; println(""); } /** * Gen a named rule block. ASTs are generated for each element of an * alternative unless the rule or the alternative have a '!' modifier. If an * alternative defeats the default tree construction, it must set _AST * to the root of the returned AST. Each alternative that does automatic * tree construction, builds up root and child list pointers in an ASTPair * structure. A rule finishes by setting the returnAST variable from the * ASTPair. * * @param rule * The name of the rule to generate * @param startSymbol * true if the rule is a start symbol (i.e., not referenced * elsewhere) */ public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum, TokenManager tm) { tabs = 1; if (DEBUG_CODE_GENERATOR) System.out.println("genRule(" + s.getId() + ")"); if (!s.isDefined()) { antlrTool.error("undefined rule: " + s.getId()); return; } // Generate rule return type, name, arguments RuleBlock rblk = s.getBlock(); currentRule = rblk; currentASTResult = s.getId(); // clear list of declared ast variables.. declaredASTVariables.clear(); // Save the AST generation state, and set it to that of the rule boolean savegenAST = genAST; genAST = genAST && rblk.getAutoGen(); // boolean oldsaveTest = saveText; saveText = rblk.getAutoGen(); // print javadoc comment if any if (s.comment != null) { _println(s.comment); } // Gen method access and final qualifier // print(s.access + " final "); print(s.access + " def "); // Gen method name _print(s.getId() + "("); // Additional rule parameters common to all rules for this grammar _print(commonExtraParams); if (commonExtraParams.length() != 0 && rblk.argAction != null) { _print(", "); } // Gen arguments if (rblk.argAction != null) { // Has specified arguments _println(""); tabs++; println( extractIdOfAction(rblk.argAction, rblk.line, rblk.column) + " as " + extractTypeOfAction(rblk.argAction, rblk.line, rblk.column) ); tabs--; print(")"); } else { // No specified arguments _print(")"); } _print(" as "); // Gen method return type (note lexer return action set at rule // creation) if (rblk.returnAction != null) { // Has specified return value _print(extractReturnTypeOfRuleBlock(rblk) + ""); } else { // No specified return value _print("void"); } _print(":"); // Gen throws clause and open curly _print(" //throws " + exceptionThrown); if (grammar instanceof ParserGrammar) { _print(", TokenStreamException"); } else if (grammar instanceof LexerGrammar) { _print(", CharStreamException, TokenStreamException"); } // Add user-defined exceptions unless lexer (for now) if (rblk.throwsSpec != null) { if (grammar instanceof LexerGrammar) { antlrTool .error("user-defined throws spec not allowed (yet) for lexer rule " + rblk.ruleName); } else { _print(", " + rblk.throwsSpec); } } _println(""); tabs++; // Convert return action to variable declaration if (rblk.returnAction != null) println(extractReturnIdOfRuleBlock(rblk) + " as " + extractReturnTypeOfRuleBlock(rblk)); // print out definitions needed by rules for various grammar types println(commonLocalVars); if (grammar.traceRules) { if (grammar instanceof TreeWalkerGrammar) { if (usingCustomAST) println("traceIn(\"" + s.getId() + "\", cast(AST, _t))"); else println("traceIn(\"" + s.getId() + "\",_t)"); } else { println("traceIn(\"" + s.getId() + "\")"); } } if (grammar instanceof LexerGrammar) { // lexer rule default return value is the rule's token name // This is a horrible hack to support the built-in EOF lexer rule. if (s.getId().equals("mEOF")) println("_ttype = Token.EOF_TYPE"); else println("_ttype = " + s.getId().substring(1)); /* * println("boolean old_saveConsumedInput=saveConsumedInput;"); if ( * !rblk.getAutoGen() ) { // turn off "save input" if ! on rule * println("saveConsumedInput=false;"); } */ } // if debugging, write code to mark entry to the rule if (grammar.debuggingOutput) if (grammar instanceof ParserGrammar) println("fireEnterRule(" + ruleNum + ",0)"); else if (grammar instanceof LexerGrammar) println("fireEnterRule(" + ruleNum + ",_ttype)"); // Generate trace code if desired if (grammar.debuggingOutput || grammar.traceRules) { println("try: // debugging"); tabs++; } // Initialize AST variables if (grammar instanceof TreeWalkerGrammar) { // "Input" value for rule println(s.getId() + "_AST_in as " + labeledElementASTType + " = cast(" + labeledElementASTType + ", _t)"); } if (grammar.buildAST) { // Parser member used to pass AST returns from rule invocations println("returnAST = null"); // Tracks AST construction // println("ASTPair currentAST = (inputState.guessing==0) ? new // ASTPair() : null;"); println("currentAST as ASTPair = ASTPair.GetInstance()"); // User-settable return value for rule. println(s.getId() + "_AST as " + labeledElementASTType); } genBlockPreamble(rblk); genBlockInitAction(rblk); println(""); // Search for an unlabeled exception specification attached to the rule ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec(""); // Generate try block around the entire rule for error handling if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler()) { println("try: // for error handling"); tabs++; } // Generate the alternatives if (rblk.alternatives.size() == 1) { // One alternative -- use simple form Alternative alt = rblk.getAlternativeAt(0); String pred = alt.semPred; if (pred != null) genSemPred(pred, currentRule.line); if (alt.synPred != null) { antlrTool.warning( "Syntactic predicate ignored for single alternative", grammar.getFilename(), alt.synPred.getLine(), alt.synPred.getColumn()); } genAlt(alt, rblk); } else { // Multiple alternatives -- generate complex form boolean ok = grammar.theLLkAnalyzer.deterministic(rblk); BooBlockFinishingInfo howToFinish = genCommonBlock(rblk, false); genBlockFinish(howToFinish, throwNoViable); } // Generate catch phrase for error handling if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler()) { // Close the try block tabs--; } // Generate user-defined or default catch phrases if (unlabeledUserSpec != null) { genErrorHandler(unlabeledUserSpec); } else if (rblk.getDefaultErrorHandler()) { // Generate default catch phrase println("except ex as " + exceptionThrown + ":"); tabs++; // Generate code to handle error if not guessing if (grammar.hasSyntacticPredicate) { println("if (0 == inputState.guessing):"); tabs++; } println("reportError(ex)"); if (!(grammar instanceof TreeWalkerGrammar)) { // Generate code to consume until token in k==1 follow set Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode); String followSetName = getBitsetName(markBitsetForGen(follow.fset)); println("recover(ex," + followSetName + ")"); } else { // Just consume one token println("if _t is not null:"); tabs++; println("_t = _t.getNextSibling()"); tabs--; } if (grammar.hasSyntacticPredicate) { tabs--; // When guessing, rethrow exception println("else:"); tabs++; println("raise"); tabs--; } // Close catch phrase tabs--; } // Squirrel away the AST "return" value if (grammar.buildAST) { println("returnAST = " + s.getId() + "_AST"); } // Set return tree value for tree walkers if (grammar instanceof TreeWalkerGrammar) { println("retTree_ = _t"); } // Generate literals test for lexer rules so marked if (rblk.getTestLiterals()) { if (s.access.equals("protected")) { genLiteralsTestForPartialToken(); } else { genLiteralsTest(); } } // if doing a lexer rule, dump code to create token if necessary if (grammar instanceof LexerGrammar) { println("if (_createToken and (_token is null) and (_ttype != Token.SKIP)):"); tabs++; println("_token = makeToken(_ttype)"); println("_token.setText(text.ToString(_begin, text.Length-_begin))"); tabs--; println("returnToken_ = _token"); } // Gen the return statement if there is one (lexer has hard-wired return // action) if (rblk.returnAction != null) { println("return " + extractReturnIdOfRuleBlock(rblk)); } if (grammar.debuggingOutput || grammar.traceRules) { println("ASTPair.PutInstance(currentAST)"); tabs--; println("finally:"); tabs++; println("// debugging"); // If debugging, generate calls to mark exit of rule if (grammar.debuggingOutput) if (grammar instanceof ParserGrammar) println("fireExitRule(" + ruleNum + ", 0)"); else if (grammar instanceof LexerGrammar) println("fireExitRule(" + ruleNum + ", _ttype)"); if (grammar.traceRules) { if (grammar instanceof TreeWalkerGrammar) { println("traceOut(\"" + s.getId() + "\",_t)"); } else { println("traceOut(\"" + s.getId() + "\")"); } } tabs--; } // Release the ASTPair instance (if we're not in trace or debug mode) if (grammar.buildAST && !(grammar.debuggingOutput || grammar.traceRules)) { println("ASTPair.PutInstance(currentAST)"); } tabs--; println(""); // Restore the AST generation state genAST = savegenAST; // restore char save state // saveText = oldsaveTest; } private String extractReturnIdOfRuleBlock(RuleBlock rblk) { return extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk .getColumn()); } private String extractReturnTypeOfRuleBlock(RuleBlock rblk) { return extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()); } private void GenRuleInvocation(RuleRefElement rr) { // dump rule name _print(rr.targetRule + "("); // lexers must tell rule if it should set returnToken_ if (grammar instanceof LexerGrammar) { // if labeled, could access Token, so tell rule to create if (rr.getLabel() != null) { _print("true"); } else { _print("false"); } if (commonExtraArgs.length() != 0 || rr.args != null) { _print(", "); } } // Extra arguments common to all rules for this grammar _print(commonExtraArgs); if (commonExtraArgs.length() != 0 && rr.args != null) { _print(", "); } // Process arguments to method, if any RuleSymbol rs = (RuleSymbol) grammar.getSymbol(rr.targetRule); if (rr.args != null) { // When not guessing, execute user arg action ActionTransInfo tInfo = new ActionTransInfo(); String args = processActionForSpecialSymbols(rr.args, 0, currentRule, tInfo); if (tInfo.assignToRoot || tInfo.refRuleRoot != null) { antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #" + currentRule.getRuleName(), grammar.getFilename(), rr.getLine(), rr.getColumn()); } _print(args); // Warn if the rule accepts no arguments if (rs.block.argAction == null) { antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments", grammar.getFilename(), rr .getLine(), rr.getColumn()); } } else { // For C++, no warning if rule has parameters, because there may be // default // values for all of the parameters if (rs.block.argAction != null) { antlrTool.warning("Missing parameters on reference to rule " + rr.targetRule, grammar.getFilename(), rr.getLine(), rr.getColumn()); } } _println(")"); // move down to the first child while parsing if (grammar instanceof TreeWalkerGrammar) { println("_t = retTree_"); } } protected void genSemPred(String pred, int line) { // translate $ and # references ActionTransInfo tInfo = new ActionTransInfo(); pred = processActionForSpecialSymbols(pred, line, currentRule, tInfo); // ignore translation info...we don't need to do anything with it. String escapedPred = charFormatter.escapeString(pred); // if debugging, wrap the semantic predicate evaluation in a method // that can tell SemanticPredicateListeners the result if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) pred = "fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.VALIDATING," + addSemPred(escapedPred) + "," + pred + ")"; println("if (!(" + pred + ")):"); println(" raise SemanticException(\"" + escapedPred + "\")"); } /** * Write an array of Strings which are the semantic predicate expressions. * The debugger will reference them by number only */ protected void genSemPredMap() { Enumeration e = semPreds.elements(); println("_semPredNames = ("); tabs++; while (e.hasMoreElements()) println("\'" + e.nextElement() + "\', "); tabs--; println(")"); } protected void genSynPred(SynPredBlock blk, String lookaheadExpr) { if (DEBUG_CODE_GENERATOR) System.out.println("gen=>(" + blk + ")"); // Dump synpred result variable println("synPredMatched" + blk.ID + " as bool = false"); // Gen normal lookahead test println("if " + lookaheadExpr + ":"); tabs++; // Save input state if (grammar instanceof TreeWalkerGrammar) { println("__t" + blk.ID + "as AST = _t"); } else { println("_m" + blk.ID + " as int = mark()"); } // Once inside the try, assume synpred works unless exception caught println("synPredMatched" + blk.ID + " = true"); println("++inputState.guessing"); // if debugging, tell listeners that a synpred has started if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) { println("fireSyntacticPredicateStarted()"); } syntacticPredLevel++; println("try:"); tabs++; gen((AlternativeBlock) blk); // gen code to test predicate tabs--; println("except x as " + exceptionThrown + ":"); tabs++; println("synPredMatched" + blk.ID + " = false"); tabs--; // Restore input state if (grammar instanceof TreeWalkerGrammar) { println("_t = __t" + blk.ID); } else { println("rewind(_m" + blk.ID + ")"); } println("--inputState.guessing"); // if debugging, tell listeners how the synpred turned out if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) { println("if synPredMatched" + blk.ID + ":"); println(" fireSyntacticPredicateSucceeded()"); println("else:"); println(" fireSyntacticPredicateFailed()"); } syntacticPredLevel--; tabs--; // Test synred result println("if synPredMatched" + blk.ID + ":"); } /** * Generate a static array containing the names of the tokens, indexed by * the token type values. This static array is used to format error messages * so that the token identifers or literal strings are displayed instead of * the token numbers. If a lexical rule has a paraphrase, use it rather than * the token label. */ public void genTokenStrings() { // Generate a string for each token. This creates a static // array of Strings indexed by token type. println(""); println("public static final tokenNames_ = ("); tabs++; // Walk the token vocabulary and generate a Vector of strings // from the tokens. Vector v = grammar.tokenManager.getVocabulary(); for (int i = 0; i < v.size(); i++) { String s = (String) v.elementAt(i); if (s == null) { s = "<" + String.valueOf(i) + ">"; } if (!s.startsWith("\"") && !s.startsWith("<")) { TokenSymbol ts = (TokenSymbol) grammar.tokenManager .getTokenSymbol(s); if (ts != null && ts.getParaphrase() != null) { s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\""); } } else if (s.startsWith("\"")) { s = StringUtils.stripFrontBack(s, "\"", "\""); } print(charFormatter.literalString(s)); _print(","); _println(""); } // Close the string array initailizer tabs--; println(")"); } /** Generate the token types Boo file */ protected void genTokenTypes(TokenManager tm) throws IOException { // Open the token output Boo file and set the currentOutput stream // SAS: file open was moved to a method so a subclass can override // This was mainly for the VAJ interface setupOutput(tm.getName() + TokenTypesFileSuffix); tabs = 0; // Generate the header common to all Boo files genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Generate the Boo namespace declaration (if specified) if (nameSpace != null) { nameSpace.emitDeclarations(currentOutput); } // Encapsulate the definitions in a class. This has to be done as a // class because // they are all constants and Boo inteface types cannot contain // constants. println("class " + tm.getName() + TokenTypesFileSuffix + ":"); tabs++; genTokenDefinitions(tm); // Close the interface tabs--; // Generate the Boo namespace closures (if required) if (nameSpace != null) { nameSpace.emitClosures(currentOutput); } // Close the tokens output file currentOutput.close(); currentOutput = null; exitIfError(); } protected void genTokenDefinitions(TokenManager tm) throws IOException { // Generate a definition for each token type Vector v = tm.getVocabulary(); // Do special tokens manually println("public static final EOF = " + Token.EOF_TYPE); println("public static final NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD); for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) { String s = (String) v.elementAt(i); if (s != null) { if (s.startsWith("\"")) { // a string literal StringLiteralSymbol sl = (StringLiteralSymbol) tm .getTokenSymbol(s); if (sl == null) { antlrTool.panic("String literal " + s + " not in symbol table"); } else if (sl.label != null) { println("public static final " + sl.label + " = " + i); } else { String mangledName = mangleLiteral(s); if (mangledName != null) { // We were able to create a meaningful mangled token // name println("public static final " + mangledName + " = " + i); // if no label specified, make the label equal to // the mangled name sl.label = mangledName; } else { println("// " + s + " = " + i); } } } else if (!s.startsWith("<")) { println("public static final " + s + " = " + i); } } } println(""); } /** * Process a string for an simple expression for use in xx/action.g it is * used to cast simple tokens/references to the right type for the generated * language. Basically called for every element in the vector to * getASTCreateString(vector V) * * @param str * A String. */ public String processStringForASTConstructor(String str) { /* * System.out.println("processStringForASTConstructor: str = "+str+ ", * custom = "+(new Boolean(usingCustomAST)).toString()+ ", tree = "+(new * Boolean((grammar instanceof TreeWalkerGrammar))).toString()+ ", * parser = "+(new Boolean((grammar instanceof * ParserGrammar))).toString()+ ", notDefined = "+(new * Boolean((!(grammar.tokenManager.tokenDefined(str))))).toString() ); */ if (usingCustomAST && ((grammar instanceof TreeWalkerGrammar) || (grammar instanceof ParserGrammar)) && !(grammar.tokenManager.tokenDefined(str))) { // System.out.println("processStringForASTConstructor: "+str+" with // cast"); return "cast(AST, " + str + ")"; } else { // System.out.println("processStringForASTConstructor: "+str); return str; } } /** * Get a string for an expression to generate creation of an AST subtree. * * @param v * A Vector of String, where each element is an expression in the * target language yielding an AST node. */ public String getASTCreateString(Vector v) { if (v.size() == 0) { return ""; } StringBuffer buf = new StringBuffer(); buf.append("cast(" + labeledElementASTType + ", astFactory.make("); buf.append(v.elementAt(0)); for (int i = 1; i < v.size(); i++) { buf.append(", " + v.elementAt(i)); } buf.append("))"); return buf.toString(); } /** * Get a string for an expression to generate creating of an AST node * * @param atom * The grammar node for which you are creating the node * @param str * The arguments to the AST constructor */ public String getASTCreateString(GrammarAtom atom, String astCtorArgs) { String astCreateString = "astFactory.create(" + astCtorArgs + ")"; if (atom == null) return getASTCreateString(astCtorArgs); else { if (atom.getASTNodeType() != null) { // this Atom was instantiated from a Token that had an "AST" // option - associating // it with a specific heterogeneous AST type - applied to // either: // 1) it's underlying TokenSymbol (in the "tokens {} section" // or, // 2) a particular token reference in the grammar // // For option (1), we simply generate a cast to hetero-AST type // For option (2), we generate a call to factory.create(Token, // ASTNodeType) and cast it too TokenSymbol ts = grammar.tokenManager.getTokenSymbol(atom .getText()); if ((ts == null) || (ts.getASTNodeType() != atom.getASTNodeType())) astCreateString = "cast(" + atom.getASTNodeType() + ", astFactory.create(" + astCtorArgs + ", \"" + atom.getASTNodeType() + "\"))"; else if ((ts != null) && (ts.getASTNodeType() != null)) astCreateString = "cast(" + ts.getASTNodeType() + ", " + astCreateString + ")"; } else if (usingCustomAST) astCreateString = "cast(" + labeledElementASTType + ", " + astCreateString + ")"; } return astCreateString; } /** * Returns a string expression that creates an AST node using the specified * AST constructor argument string. Parses the first (possibly only) * argument in the supplied AST ctor argument string to obtain the token * type -- ctorID. IF the token type is a valid token symbol AND it has an * associated AST node type AND this is not a #[ID, "T", "ASTType"] * constructor THEN generate a call to factory.create(ID, Text, * token.ASTNodeType()) #[ID, "T", "ASTType"] constructors are mapped to * astFactory.create(ID, "T", "ASTType") The supported AST constructor forms * are: #[ID] #[ID, "text"] #[ID, "text", ASTclassname] -- introduced in * 2.7.2 * * @param astCtorArgs * The arguments to the AST constructor */ public String getASTCreateString(String astCtorArgs) { // kunle: 19-Aug-2002 // This AST creation string is almost certainly[*1] a manual tree // construction request. // From the manual [I couldn't read ALL of the code ;-)], this can only // be one of: // 1) #[ID] -- 'astCtorArgs' contains: 'ID' (without quotes) or, // 2) #[ID, "T"] -- 'astCtorArgs' contains: 'ID, "Text"' (without single // quotes) or, // kunle: 08-Dec-2002 - 2.7.2a6 // 3) #[ID, "T", "ASTTypeName"] -- 'astCtorArgs' contains: 'ID, "T", // "ASTTypeName"' (without single quotes) // // [*1] In my tests, 'atom' was '== null' only for manual tree // construction requests if (astCtorArgs == null) { astCtorArgs = ""; } String astCreateString = "astFactory.create(" + astCtorArgs + ")"; String ctorID = astCtorArgs; String ctorText = null; int commaIndex; boolean ctorIncludesCustomType = false; // Is this a #[ID, "t", // "ASTType"] constructor? commaIndex = astCtorArgs.indexOf(','); if (commaIndex != -1) { ctorID = astCtorArgs.substring(0, commaIndex); // the 'ID' portion // of #[ID, "Text"] ctorText = astCtorArgs.substring(commaIndex + 1, astCtorArgs .length()); // the 'Text' portion of #[ID, "Text"] commaIndex = ctorText.indexOf(','); if (commaIndex != -1) { // This is an AST creation of the form: #[ID, "Text", // "ASTTypename"] // Support for this was introduced with 2.7.2a6 // create default type or (since 2.7.2) 3rd arg is classname ctorIncludesCustomType = true; } } TokenSymbol ts = grammar.tokenManager.getTokenSymbol(ctorID); if ((null != ts) && (null != ts.getASTNodeType())) astCreateString = "cast(" + ts.getASTNodeType() + ", " + astCreateString + ")"; else if (usingCustomAST) astCreateString = "cast(" + labeledElementASTType + ", " + astCreateString + ")"; return astCreateString; } protected String getLookaheadTestExpression(Lookahead[] look, int k) { StringBuffer e = new StringBuffer(100); boolean first = true; e.append("("); for (int i = 1; i <= k; i++) { BitSet p = look[i].fset; if (!first) { e.append(") and ("); } first = false; // Syn preds can yield (epsilon) lookahead. // There is no way to predict what that token would be. Just // allow anything instead. if (look[i].containsEpsilon()) { e.append("true"); } else { e.append(getLookaheadTestTerm(i, p)); } } e.append(")"); return e.toString(); } /** * Generate a lookahead test expression for an alternate. This will be a * series of tests joined by '&&' and enclosed by '()', the number of such * tests being determined by the depth of the lookahead. */ protected String getLookaheadTestExpression(Alternative alt, int maxDepth) { int depth = alt.lookaheadDepth; if (depth == GrammarAnalyzer.NONDETERMINISTIC) { // if the decision is nondeterministic, do the best we can: LL(k) // any predicates that are around will be generated later. depth = grammar.maxk; } if (maxDepth == 0) { // empty lookahead can result from alt with sem pred // that can see end of token. E.g., A : {pred}? ('a')? ; return "( true )"; } return "(" + getLookaheadTestExpression(alt.cache, depth) + ")"; } /** * Generate a depth==1 lookahead test expression given the BitSet. This may * be one of: 1) a series of 'x==X||' tests 2) a range test using >= && <= * where possible, 3) a bitset membership test for complex comparisons * * @param k * The lookahead level * @param p * The lookahead set for level k */ protected String getLookaheadTestTerm(int k, BitSet p) { // Determine the name of the item to be compared String ts = lookaheadString(k); // Generate a range expression if possible int[] elems = p.toArray(); if (elementsAreRange(elems)) { return getRangeExpression(k, elems); } // Generate a bitset membership test if possible StringBuffer e; int degree = p.degree(); if (degree == 0) { return "true"; } if (degree >= bitsetTestThreshold) { int bitsetIdx = markBitsetForGen(p); return getBitsetName(bitsetIdx) + ".member(cast(int, " + ts + "))"; } // Otherwise, generate the long-winded series of "x==X||" tests e = new StringBuffer(); for (int i = 0; i < elems.length; i++) { // Get the compared-to item (token or character value) String cs = getValueString(elems[i]); // Generate the element comparison if (i > 0) e.append(" or "); e.append(ts); e.append("=="); e.append(cs); } return e.toString(); } /** * Return an expression for testing a contiguous renage of elements * * @param k * The lookahead level * @param elems * The elements representing the set, usually from * BitSet.toArray(). * @return String containing test expression. */ public String getRangeExpression(int k, int[] elems) { if (!elementsAreRange(elems)) { antlrTool.panic("getRangeExpression called with non-range"); } int begin = elems[0]; int end = elems[elems.length - 1]; return "((" + lookaheadString(k) + " >= " + getValueString(begin) + ") and (" + lookaheadString(k) + " <= " + getValueString(end) + "))"; } /** * getValueString: get a string representation of a token or char value * * @param value * The token or char value */ private String getValueString(int value) { String cs; if (grammar instanceof LexerGrammar) { cs = charFormatter.literalChar(value); } else { TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt(value); if (ts == null) { return "" + value; // return token type as string // antlrTool.panic("vocabulary for token type " + value + " is // null"); } String tId = ts.getId(); if (ts instanceof StringLiteralSymbol) { // if string literal, use predefined label if any // if no predefined, try to mangle into LITERAL_xxx. // if can't mangle, use int value as last resort StringLiteralSymbol sl = (StringLiteralSymbol) ts; String label = sl.getLabel(); if (label != null) { cs = label; } else { cs = mangleLiteral(tId); if (cs == null) { cs = String.valueOf(value); } } } else { cs = tId; } } return cs; } /** Is the lookahead for this alt empty? */ protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) { int depth = alt.lookaheadDepth; if (depth == GrammarAnalyzer.NONDETERMINISTIC) { depth = grammar.maxk; } for (int i = 1; i <= depth && i <= maxDepth; i++) { BitSet p = alt.cache[i].fset; if (p.degree() != 0) { return false; } } return true; } private String lookaheadString(int k) { if (grammar instanceof TreeWalkerGrammar) { return "_t.Type"; } if (grammar instanceof LexerGrammar) { if (k == 1) { return "cached_LA1"; } if (k == 2) { return "cached_LA2"; } } return "LA(" + k + ")"; } /** * Mangle a string literal into a meaningful token name. This is only * possible for literals that are all characters. The resulting mangled * literal name is literalsPrefix with the text of the literal appended. * * @return A string representing the mangled literal, or null if not * possible. */ private String mangleLiteral(String s) { String mangled = antlrTool.literalsPrefix; for (int i = 1; i < s.length() - 1; i++) { if (!Character.isLetter(s.charAt(i)) && s.charAt(i) != '_') { return null; } mangled += s.charAt(i); } if (antlrTool.upperCaseMangledLiterals) { mangled = mangled.toUpperCase(); } return mangled; } /** * Map an identifier to it's corresponding tree-node variable. This is * context-sensitive, depending on the rule and alternative being generated * * @param idParam * The identifier name to map * @return The mapped id (which may be the same as the input), or null if * the mapping is invalid due to duplicates */ public String mapTreeId(String idParam, ActionTransInfo transInfo) { // if not in an action of a rule, nothing to map. if (currentRule == null) return idParam; boolean in_var = false; String id = idParam; if (grammar instanceof TreeWalkerGrammar) { if (!grammar.buildAST) { in_var = true; } // If the id ends with "_in", then map it to the input variable else if (id.length() > 3 && id.lastIndexOf("_in") == id.length() - 3) { // Strip off the "_in" id = id.substring(0, id.length() - 3); in_var = true; } } // Check the rule labels. If id is a label, then the output // variable is label_AST, and the input variable is plain label. for (int i = 0; i < currentRule.labeledElements.size(); i++) { AlternativeElement elt = (AlternativeElement) currentRule.labeledElements .elementAt(i); if (elt.getLabel().equals(id)) { return in_var ? id : id + "_AST"; } } // Failing that, check the id-to-variable map for the alternative. // If the id is in the map, then output variable is the name in the // map, and input variable is name_in String s = (String) treeVariableMap.get(id); if (s != null) { if (s == NONUNIQUE) { // There is more than one element with this id antlrTool.error("Ambiguous reference to AST element " + id + " in rule " + currentRule.getRuleName()); return null; } else if (s.equals(currentRule.getRuleName())) { // a recursive call to the enclosing rule is // ambiguous with the rule itself. // if( in_var ) // System.out.println("returning null (rulename)"); antlrTool.error("Ambiguous reference to AST element " + id + " in rule " + currentRule.getRuleName()); return null; } else { return in_var ? s + "_in" : s; } } // Failing that, check the rule name itself. Output variable // is rule_AST; input variable is rule_AST_in (treeparsers). if (id.equals(currentRule.getRuleName())) { String r = in_var ? id + "_AST_in" : id + "_AST"; if (transInfo != null) { if (!in_var) { transInfo.refRuleRoot = r; } } return r; } else { // id does not map to anything -- return itself. return id; } } /** * Given an element and the name of an associated AST variable, create a * mapping between the element "name" and the variable name. */ private void mapTreeVariable(AlternativeElement e, String name) { // For tree elements, defer to the root if (e instanceof TreeElement) { mapTreeVariable(((TreeElement) e).root, name); return; } // Determine the name of the element, if any, for mapping purposes String elName = null; // Don't map labeled items if (e.getLabel() == null) { if (e instanceof TokenRefElement) { // use the token id elName = ((TokenRefElement) e).atomText; } else if (e instanceof RuleRefElement) { // use the rule name elName = ((RuleRefElement) e).targetRule; } } // Add the element to the tree variable map if it has a name if (elName != null) { if (treeVariableMap.get(elName) != null) { // Name is already in the map -- mark it as duplicate treeVariableMap.remove(elName); treeVariableMap.put(elName, NONUNIQUE); } else { treeVariableMap.put(elName, name); } } } /** * Lexically process tree-specifiers in the action. This will replace #id * and #(...) with the appropriate function calls and/or variables. */ protected String processActionForSpecialSymbols(String actionStr, int line, RuleBlock currentRule, ActionTransInfo tInfo) { if (actionStr == null || actionStr.length() == 0) return null; // The action trans info tells us (at the moment) whether an // assignment was done to the rule's tree root. if (grammar == null) return actionStr; // see if we have anything to do... if ((grammar.buildAST && actionStr.indexOf('#') != -1) || grammar instanceof TreeWalkerGrammar || ((grammar instanceof LexerGrammar || grammar instanceof ParserGrammar) && actionStr .indexOf('$') != -1)) { // Create a lexer to read an action and return the translated // version antlr.actions.csharp.ActionLexer lexer = new antlr.actions.csharp.ActionLexer( actionStr, currentRule, this, tInfo); lexer.setLineOffset(line); lexer.setFilename(grammar.getFilename()); lexer.setTool(antlrTool); try { lexer.mACTION(true); actionStr = lexer.getTokenObject().getText(); // System.out.println("action translated: "+actionStr); // System.out.println("trans info is "+tInfo); } catch (RecognitionException ex) { lexer.reportError(ex); return actionStr; } catch (TokenStreamException tex) { antlrTool.panic("Error reading action:" + actionStr); return actionStr; } catch (CharStreamException io) { antlrTool.panic("Error reading action:" + actionStr); return actionStr; } } return actionStr; } private void setupGrammarParameters(Grammar g) { if (g instanceof ParserGrammar || g instanceof LexerGrammar || g instanceof TreeWalkerGrammar) { /* * RK: options also have to be added to Grammar.java and for options * on the file level entries have to be defined in * DefineGrammarSymbols.java and passed around via 'globals' in * antlrTool.java */ if (antlrTool.nameSpace != null) nameSpace = new BooNameSpace(antlrTool.nameSpace.getName()); // genHashLines = antlrTool.genHashLines; /* * let grammar level options override filelevel ones... */ if (g.hasOption("namespace")) { Token t = g.getOption("namespace"); if (t != null) { nameSpace = new BooNameSpace(t.getText()); } } /* * if( g.hasOption("genHashLines") ) { Token t = * g.getOption("genHashLines"); if( t != null ) { String val = * StringUtils.stripFrontBack(t.getText(),"\"","\""); genHashLines = * val.equals("true"); } } */ } if (g instanceof ParserGrammar) { labeledElementASTType = "AST"; if (g.hasOption("ASTLabelType")) { Token tsuffix = g.getOption("ASTLabelType"); if (tsuffix != null) { String suffix = StringUtils.stripFrontBack(tsuffix .getText(), "\"", "\""); if (suffix != null) { usingCustomAST = true; labeledElementASTType = suffix; } } } labeledElementType = "IToken "; labeledElementInit = "null"; commonExtraArgs = ""; commonExtraParams = ""; commonLocalVars = ""; lt1Value = "LT(1)"; exceptionThrown = "RecognitionException"; throwNoViable = "raise NoViableAltException(LT(1), getFilename())"; } else if (g instanceof LexerGrammar) { labeledElementType = "char "; labeledElementInit = "'\\0'"; commonExtraArgs = ""; commonExtraParams = "_createToken as bool"; commonLocalVars = "_ttype as int; _token as IToken; _begin = text.Length;"; lt1Value = "cached_LA1"; exceptionThrown = "RecognitionException"; throwNoViable = "raise NoViableAltForCharException(cached_LA1, getFilename(), getLine(), getColumn())"; } else if (g instanceof TreeWalkerGrammar) { labeledElementASTType = "AST"; labeledElementType = "AST"; if (g.hasOption("ASTLabelType")) { Token tsuffix = g.getOption("ASTLabelType"); if (tsuffix != null) { String suffix = StringUtils.stripFrontBack(tsuffix .getText(), "\"", "\""); if (suffix != null) { usingCustomAST = true; labeledElementASTType = suffix; labeledElementType = suffix; } } } if (!g.hasOption("ASTLabelType")) { g.setOption("ASTLabelType", new Token( ANTLRTokenTypes.STRING_LITERAL, "AST")); } labeledElementInit = "null"; commonExtraArgs = "_t"; commonExtraParams = "_t as AST"; commonLocalVars = ""; if (usingCustomAST) lt1Value = "(_t == ASTNULL) ? null : cast(" + labeledElementASTType + ", _t)"; else lt1Value = "_t"; exceptionThrown = "RecognitionException"; throwNoViable = "raise NoViableAltException(_t)"; } else { antlrTool.panic("Unknown grammar type"); } } /** * This method exists so a subclass, namely VAJCodeGenerator, can open the * file in its own evil way. JavaCodeGenerator simply opens a text file... */ public void setupOutput(String className) throws IOException { currentOutput = antlrTool.openOutputFile(className + ".boo"); } /** Helper method from Eric Smith's version of BooCodeGenerator. */ private static String OctalToUnicode(String str) { // only do any conversion if the string looks like "'\003'" if ((4 <= str.length()) && ('\'' == str.charAt(0)) && ('\\' == str.charAt(1)) && (('0' <= str.charAt(2)) && ('7' >= str.charAt(2))) && ('\'' == str.charAt(str.length() - 1))) { // convert octal representation to decimal, then to hex Integer x = Integer.valueOf(str.substring(2, str.length() - 1), 8); return "char('\\x" + Integer.toHexString(x.intValue()) + "')"; } else { return "char(" + str + ")"; } } /** * Helper method that returns the name of the interface/class/enum type for * token type constants. */ public String getTokenTypesClassName() { TokenManager tm = grammar.tokenManager; return new String(tm.getName() + TokenTypesFileSuffix); } public String[] split(String str, String sep) { StringTokenizer st = new StringTokenizer(str, sep); int count = st.countTokens(); String[] values = new String[count]; int i = 0; while (st.hasMoreTokens()) { values[i] = st.nextToken(); i++; } return values; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/BooNameSpace.java000066400000000000000000000023641161462365500242620ustar00rootroot00000000000000package antlr; /** * ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * Container for a C++ namespace specification. Namespaces can be * nested, so this contains a vector of all the nested names. * * @author David Wagner (JPL/Caltech) 8-12-00 * * $Id:$ */ // // ANTLR C# Code Generator by Micheal Jordan // Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com // Anthony Oguntimehin // // With many thanks to Eric V. Smith from the ANTLR list. // // HISTORY: // // 17-May-2002 kunle Original version // import java.util.Vector; import java.util.Enumeration; import java.io.PrintWriter; import java.util.StringTokenizer; public class BooNameSpace extends NameSpace { public BooNameSpace(String name) { super(name); } /** * Method to generate the required Boo namespace declarations */ void emitDeclarations(PrintWriter out) { out.println("namespace " + getName() ); } /** * Method to generate the required Boo namespace closures */ void emitClosures(PrintWriter out) { } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ByteBuffer.java000066400000000000000000000031761161462365500240250ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ByteBuffer.java#1 $ */ /**A Stream of characters fed to the lexer from a InputStream that can * be rewound via mark()/rewind() methods. *

* A dynamic array is used to buffer up all the input characters. Normally, * "k" characters are stored in the buffer. More characters may be stored during * guess mode (testing syntactic predicate), or when LT(i>k) is referenced. * Consumption of characters is deferred. In other words, reading the next * character is not done by conume(), but deferred until needed by LA or LT. *

* * @see antlr.CharQueue */ // SAS: added this class to handle Binary input w/ FileInputStream import java.io.InputStream; import java.io.IOException; public class ByteBuffer extends InputBuffer { // char source transient InputStream input; /** Create a character buffer */ public ByteBuffer(InputStream input_) { super(); input = input_; } /** Ensure that the character buffer is sufficiently full */ public void fill(int amount) throws CharStreamException { try { syncConsume(); // Fill the buffer sufficiently to hold needed characters while (queue.nbrEntries < amount + markerOffset) { // Append the next character queue.append((char)input.read()); } } catch (IOException io) { throw new CharStreamIOException(io); } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CSharpBlockFinishingInfo.java000077500000000000000000000017501161462365500265750ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id:$ */ // // ANTLR C# Code Generator by Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com // class CSharpBlockFinishingInfo { String postscript; // what to generate to terminate block boolean generatedSwitch;// did block finish with "default:" of switch? boolean generatedAnIf; /** When generating an if or switch, end-of-token lookahead sets * will become the else or default clause, don't generate an * error clause in this case. */ boolean needAnErrorClause; public CSharpBlockFinishingInfo() { postscript=null; generatedSwitch=generatedSwitch = false; needAnErrorClause = true; } public CSharpBlockFinishingInfo(String ps, boolean genS, boolean generatedAnIf, boolean n) { postscript = ps; generatedSwitch = genS; this.generatedAnIf = generatedAnIf; needAnErrorClause = n; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CSharpCharFormatter.java000077500000000000000000000055611161462365500256350ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id:$ */ // // ANTLR C# Code Generator by Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com // class CSharpCharFormatter implements CharFormatter { /** Given a character value, return a string representing the character * that can be embedded inside a string literal or character literal * This works for Java/C/C++ code-generation and languages with compatible * special-character-escapment. * Code-generators for languages should override this method. * @param c The character of interest. * @param forCharLiteral true to escape for char literal, false for string literal */ public String escapeChar(int c, boolean forCharLiteral) { switch (c) { // case GrammarAnalyzer.EPSILON_TYPE : return ""; case '\n' : return "\\n"; case '\t' : return "\\t"; case '\r' : return "\\r"; case '\\' : return "\\\\"; case '\'' : return forCharLiteral ? "\\'" : "'"; case '"' : return forCharLiteral ? "\"" : "\\\""; default : if ( c<' '||c>126 ) { if ( ( 0x0000 <= c ) && ( c <= 0x000F ) ) { return "\\u000" + Integer.toString(c,16); } else if ( ( 0x0010 <= c ) && ( c <= 0x00FF ) ) { return "\\u00" + Integer.toString(c,16); } else if ( ( 0x0100 <= c ) && ( c <= 0x0FFF )) { return "\\u0" + Integer.toString(c,16); } else { return "\\u" + Integer.toString(c,16); } } else { return String.valueOf((char)c); } } } /** Converts a String into a representation that can be use as a literal * when surrounded by double-quotes. * @param s The String to be changed into a literal */ public String escapeString(String s) { String retval = new String(); for (int i = 0; i < s.length(); i++) { retval += escapeChar(s.charAt(i), false); } return retval; } /** Given a character value, return a string representing the character * literal that can be recognized by the target language compiler. * This works for languages that use single-quotes for character literals. * Code-generators for languages should override this method. * @param c The character of interest. */ public String literalChar(int c) { return "'" + escapeChar(c, true) + "'"; } /** Converts a String into a string literal * This works for languages that use double-quotes for string literals. * Code-generators for languages should override this method. * @param s The String to be changed into a literal */ public String literalString(String s) { //return "\"" + escapeString(s) + "\""; return "@\"\"\"" + escapeString(s) + "\"\"\""; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CSharpCodeGenerator.java000077500000000000000000003761401161462365500256210ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id:$ */ // // ANTLR C# Code Generator by Micheal Jordan // Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com // Anthony Oguntimehin // // With many thanks to Eric V. Smith from the ANTLR list. // // HISTORY: // // 17-May-2002 kunle Fixed bug in OctalToUnicode() - was processing non-Octal escape sequences // Also added namespace support based on Cpp version. // 07-Jun-2002 kunle Added Scott Ellis's _saveIndex creation optimizations // 09-Sep-2002 richardN Richard Ney's bug-fix for literals table construction. // [ Hashtable ctor needed instance of hash code provider not it's class name. ] // 17-Sep-2002 kunle & Added all Token ID definitions as data member of every Lexer/Parser/TreeParser // AOg [ A by-product of problem-solving phase of the hetero-AST changes below // but, it breaks nothing and restores "normal" ANTLR codegen behaviour. ] // 19-Oct-2002 kunle & Completed the work required to support heterogenous ASTs (many changes) // AOg & // michealj // 14-Nov-2002 michealj Added "initializeASTFactory()" to support flexible ASTFactory initialization. // [ Thanks to Ric Klaren - for suggesting it and implementing it for Cpp. ] // 18-Nov-2002 kunle Added fix to make xx_tokenSet_xx names CLS compliant. // 01-Dec-2002 richardN Patch to reduce "unreachable code" warnings // 01-Dec-2002 richardN Fix to generate correct TreeParser token-type classnames. // 12-Jan-2003 kunle & Generated Lexers, Parsers and TreeParsers now support ANTLR's tracing option. // michealj // 12-Jan-2003 kunle Fixed issue where initializeASTFactory() was generated when "buildAST=false" // 14-Jan-2003 AOg initializeASTFactory(AST factory) method was modifying the Parser's "astFactory" // member rather than it's own "factory" parameter. Fixed. // 18-Jan-2003 kunle & Fixed reported issues with ASTFactory create() calls for hetero ASTs // michealj - code generated for LEXER token with hetero-AST option specified does not compile // - code generated for imaginary tokens with hetero-AST option specified uses // default AST type // - code generated for per-TokenRef hetero-AST option specified does not compile // 18-Jan-2003 kunle initializeASTFactory(AST) method is now a static public member // 18-May-2003 kunle Changes to address outstanding reported issues:: // - Fixed reported issues with support for case-sensitive literals // - antlr.SemanticException now imported for all Lexers. // [ This exception is thrown on predicate failure. ] // 12-Jan-2004 kunle Added fix for reported issue with un-compileable generated lexers // // import java.util.Enumeration; import java.util.Hashtable; import java.util.StringTokenizer; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; import java.io.PrintWriter; //SAS: changed for proper text file io import java.io.IOException; import java.io.FileWriter; /** Generates MyParser.cs, MyLexer.cs and MyParserTokenTypes.cs */ public class CSharpCodeGenerator extends CodeGenerator { // non-zero if inside syntactic predicate generation protected int syntacticPredLevel = 0; // Are we generating ASTs (for parsers and tree parsers) right now? protected boolean genAST = false; // Are we saving the text consumed (for lexers) right now? protected boolean saveText = false; // Grammar parameters set up to handle different grammar classes. // These are used to get instanceof tests out of code generation boolean usingCustomAST = false; String labeledElementType; String labeledElementASTType; String labeledElementInit; String commonExtraArgs; String commonExtraParams; String commonLocalVars; String lt1Value; String exceptionThrown; String throwNoViable; // Tracks the rule being generated. Used for mapTreeId RuleBlock currentRule; // Tracks the rule or labeled subrule being generated. Used for AST generation. String currentASTResult; /** Mapping between the ids used in the current alt, and the * names of variables used to represent their AST values. */ Hashtable treeVariableMap = new Hashtable(); /** Used to keep track of which AST variables have been defined in a rule * (except for the #rule_name and #rule_name_in var's */ Hashtable declaredASTVariables = new Hashtable(); /* Count of unnamed generated variables */ int astVarNumber = 1; /** Special value used to mark duplicate in treeVariableMap */ protected static final String NONUNIQUE = new String(); public static final int caseSizeThreshold = 127; // ascii is max private Vector semPreds; // Used to keep track of which (heterogeneous AST types are used) // which need to be set in the ASTFactory of the generated parser private java.util.Vector astTypes; private static CSharpNameSpace nameSpace = null; // _saveIndex creation optimization -- don't create it unless we need to use it int saveIndexCreateLevel; int blockNestingLevel; /** Create a CSharp code-generator using the given Grammar. * The caller must still call setTool, setBehavior, and setAnalyzer * before generating code. */ public CSharpCodeGenerator() { super(); charFormatter = new CSharpCharFormatter(); } /** Adds a semantic predicate string to the sem pred vector These strings will be used to build an array of sem pred names when building a debugging parser. This method should only be called when the debug option is specified */ protected int addSemPred(String predicate) { semPreds.appendElement(predicate); return semPreds.size()-1; } public void exitIfError() { if (antlrTool.hasError()) { antlrTool.fatalError("Exiting due to errors."); } } /**Generate the parser, lexer, treeparser, and token types in CSharp */ public void gen() { // Do the code generation try { // Loop over all grammars Enumeration grammarIter = behavior.grammars.elements(); while (grammarIter.hasMoreElements()) { Grammar g = (Grammar)grammarIter.nextElement(); // Connect all the components to each other g.setGrammarAnalyzer(analyzer); g.setCodeGenerator(this); analyzer.setGrammar(g); // To get right overloading behavior across heterogeneous grammars setupGrammarParameters(g); g.generate(); exitIfError(); } // Loop over all token managers (some of which are lexers) Enumeration tmIter = behavior.tokenManagers.elements(); while (tmIter.hasMoreElements()) { TokenManager tm = (TokenManager)tmIter.nextElement(); if (!tm.isReadOnly()) { // Write the token manager tokens as CSharp // this must appear before genTokenInterchange so that // labels are set on string literals genTokenTypes(tm); // Write the token manager tokens as plain text genTokenInterchange(tm); } exitIfError(); } } catch (IOException e) { antlrTool.reportException(e, null); } } /** Generate code for the given grammar element. * @param blk The {...} action to generate */ public void gen(ActionElement action) { if ( DEBUG_CODE_GENERATOR ) System.out.println("genAction("+action+")"); if ( action.isSemPred ) { genSemPred(action.actionText, action.line); } else { if ( grammar.hasSyntacticPredicate ) { println("if (0==inputState.guessing)"); println("{"); tabs++; } ActionTransInfo tInfo = new ActionTransInfo(); String actionStr = processActionForSpecialSymbols(action.actionText, action.getLine(), currentRule, tInfo); if ( tInfo.refRuleRoot!=null ) { // Somebody referenced "#rule", make sure translated var is valid // assignment to #rule is left as a ref also, meaning that assignments // with no other refs like "#rule = foo();" still forces this code to be // generated (unnecessarily). println(tInfo.refRuleRoot + " = ("+labeledElementASTType+")currentAST.root;"); } // dump the translated action printAction(actionStr); if ( tInfo.assignToRoot ) { // Somebody did a "#rule=", reset internal currentAST.root println("currentAST.root = "+tInfo.refRuleRoot+";"); // reset the child pointer too to be last sibling in sibling list println("if ( (null != "+tInfo.refRuleRoot+") && (null != "+tInfo.refRuleRoot+".getFirstChild()) )"); tabs++; println("currentAST.child = "+tInfo.refRuleRoot+".getFirstChild();"); tabs--; println("else"); tabs++; println("currentAST.child = "+tInfo.refRuleRoot+";"); tabs--; println("currentAST.advanceChildToEnd();"); } if ( grammar.hasSyntacticPredicate ) { tabs--; println("}"); } } } /** Generate code for the given grammar element. * @param blk The "x|y|z|..." block to generate */ public void gen(AlternativeBlock blk) { if ( DEBUG_CODE_GENERATOR ) System.out.println("gen("+blk+")"); println("{"); tabs++; genBlockPreamble(blk); genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); CSharpBlockFinishingInfo howToFinish = genCommonBlock(blk, true); genBlockFinish(howToFinish, throwNoViable); tabs--; println("}"); // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate code for the given grammar element. * @param blk The block-end element to generate. Block-end * elements are synthesized by the grammar parser to represent * the end of a block. */ public void gen(BlockEndElement end) { if ( DEBUG_CODE_GENERATOR ) System.out.println("genRuleEnd("+end+")"); } /** Generate code for the given grammar element. * @param blk The character literal reference to generate */ public void gen(CharLiteralElement atom) { if ( DEBUG_CODE_GENERATOR ) System.out.println("genChar("+atom+")"); if ( atom.getLabel()!=null ) { println(atom.getLabel() + " = " + lt1Value + ";"); } boolean oldsaveText = saveText; saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE; genMatch(atom); saveText = oldsaveText; } /** Generate code for the given grammar element. * @param blk The character-range reference to generate */ public void gen(CharRangeElement r) { if ( r.getLabel()!=null && syntacticPredLevel == 0) { println(r.getLabel() + " = " + lt1Value + ";"); } boolean flag = ( grammar instanceof LexerGrammar && (!saveText || (r.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) ); if (flag) println("_saveIndex = text.Length;"); println("matchRange("+OctalToUnicode(r.beginText)+","+OctalToUnicode(r.endText)+");"); if (flag) println("text.Length = _saveIndex;"); } /** Generate the lexer CSharp file */ public void gen(LexerGrammar g) throws IOException { // If debugging, create a new sempred vector for this grammar if (g.debuggingOutput) semPreds = new Vector(); setGrammar(g); if (!(grammar instanceof LexerGrammar)) { antlrTool.panic("Internal error generating lexer"); } genBody(g); } /** Generate code for the given grammar element. * @param blk The (...)+ block to generate */ public void gen(OneOrMoreBlock blk) { if ( DEBUG_CODE_GENERATOR ) System.out.println("gen+("+blk+")"); String label; String cnt; println("{ // ( ... )+"); tabs++; blockNestingLevel++; genBlockPreamble(blk); if ( blk.getLabel() != null ) { cnt = "_cnt_"+blk.getLabel(); } else { cnt = "_cnt" + blk.ID; } println("int "+cnt+"=0;"); if ( blk.getLabel() != null ) { label = blk.getLabel(); } else { label = "_loop" + blk.ID; } println("for (;;)"); println("{"); tabs++; blockNestingLevel++; // generate the init action for ()+ ()* inside the loop // this allows us to do usefull EOF checking... genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); // generate exit test if greedy set to false // and an alt is ambiguous with exit branch // or when lookahead derived purely from end-of-file // Lookahead analysis stops when end-of-file is hit, // returning set {epsilon}. Since {epsilon} is not // ambig with any real tokens, no error is reported // by deterministic() routines and we have to check // for the case where the lookahead depth didn't get // set to NONDETERMINISTIC (this only happens when the // FOLLOW contains real atoms + epsilon). boolean generateNonGreedyExitPath = false; int nonGreedyExitDepth = grammar.maxk; if ( !blk.greedy && blk.exitLookaheadDepth<=grammar.maxk && blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() ) { generateNonGreedyExitPath = true; nonGreedyExitDepth = blk.exitLookaheadDepth; } else if ( !blk.greedy && blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC ) { generateNonGreedyExitPath = true; } // generate exit test if greedy set to false // and an alt is ambiguous with exit branch if ( generateNonGreedyExitPath ) { if ( DEBUG_CODE_GENERATOR ) { System.out.println("nongreedy (...)+ loop; exit depth is "+ blk.exitLookaheadDepth); } String predictExit = getLookaheadTestExpression(blk.exitCache, nonGreedyExitDepth); println("// nongreedy exit test"); println("if (("+cnt+" >= 1) && "+predictExit+") goto "+label+"_breakloop;"); } CSharpBlockFinishingInfo howToFinish = genCommonBlock(blk, false); genBlockFinish( howToFinish, "if ("+cnt+" >= 1) { goto "+label+"_breakloop; } else { " + throwNoViable + "; }" ); println(cnt+"++;"); tabs--; if (blockNestingLevel-- == saveIndexCreateLevel) saveIndexCreateLevel = 0; println("}"); _print(label + "_breakloop:"); println(";"); tabs--; if (blockNestingLevel-- == saveIndexCreateLevel) saveIndexCreateLevel = 0; println("} // ( ... )+"); // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate the parser CSharp file */ public void gen(ParserGrammar g) throws IOException { // if debugging, set up a new vector to keep track of sempred // strings for this grammar if (g.debuggingOutput) semPreds = new Vector(); setGrammar(g); if (!(grammar instanceof ParserGrammar)) { antlrTool.panic("Internal error generating parser"); } genBody(g); } /** Generate code for the given grammar element. * @param blk The rule-reference to generate */ public void gen(RuleRefElement rr) { if ( DEBUG_CODE_GENERATOR ) System.out.println("genRR("+rr+")"); RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); if (rs == null || !rs.isDefined()) { // Is this redundant??? antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn()); return; } if (!(rs instanceof RuleSymbol)) { // Is this redundant??? antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn()); return; } genErrorTryForElement(rr); // AST value for labeled rule refs in tree walker. // This is not AST construction; it is just the input tree node value. if ( grammar instanceof TreeWalkerGrammar && rr.getLabel() != null && syntacticPredLevel == 0 ) { println(rr.getLabel() + " = _t==ASTNULL ? null : "+lt1Value+";"); } // if in lexer and ! on rule ref or alt or rule, save buffer index to kill later if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { declareSaveIndexVariableIfNeeded(); println("_saveIndex = text.Length;"); } // Process return value assignment if any printTabs(); if (rr.idAssign != null) { // Warn if the rule has no return type if (rs.block.returnAction == null) { antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr.getLine(), rr.getColumn()); } _print(rr.idAssign + "="); } else { // Warn about return value if any, but not inside syntactic predicate if ( !(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null) { antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr.getLine(), rr.getColumn()); } } // Call the rule GenRuleInvocation(rr); // if in lexer and ! on element or alt or rule, save buffer index to kill later if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) { declareSaveIndexVariableIfNeeded(); println("text.Length = _saveIndex;"); } // if not in a syntactic predicate if (syntacticPredLevel == 0) { boolean doNoGuessTest = ( grammar.hasSyntacticPredicate && ( grammar.buildAST && rr.getLabel() != null || (genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE) ) ); if (doNoGuessTest) { println("if (0 == inputState.guessing)"); println("{"); tabs++; } if (grammar.buildAST && rr.getLabel() != null) { // always gen variable for rule return on labeled rules println(rr.getLabel() + "_AST = ("+labeledElementASTType+")returnAST;"); } if (genAST) { switch (rr.getAutoGenType()) { case GrammarElement.AUTO_GEN_NONE: if( usingCustomAST ) println("astFactory.addASTChild(currentAST, (AST)returnAST);"); else println("astFactory.addASTChild(currentAST, returnAST);"); break; case GrammarElement.AUTO_GEN_CARET: antlrTool.error("Internal: encountered ^ after rule reference"); break; default: break; } } // if a lexer and labeled, Token label defined at rule level, just set it here if ( grammar instanceof LexerGrammar && rr.getLabel() != null ) { println(rr.getLabel()+" = returnToken_;"); } if (doNoGuessTest) { tabs--; println("}"); } } genErrorCatchForElement(rr); } /** Generate code for the given grammar element. * @param blk The string-literal reference to generate */ public void gen(StringLiteralElement atom) { if ( DEBUG_CODE_GENERATOR ) System.out.println("genString("+atom+")"); // Variable declarations for labeled elements if (atom.getLabel()!=null && syntacticPredLevel == 0) { println(atom.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(atom); // is there a bang on the literal? boolean oldsaveText = saveText; saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE; // matching genMatch(atom); saveText = oldsaveText; // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling();"); } } /** Generate code for the given grammar element. * @param blk The token-range reference to generate */ public void gen(TokenRangeElement r) { genErrorTryForElement(r); if ( r.getLabel()!=null && syntacticPredLevel == 0) { println(r.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(r); // match println("matchRange("+OctalToUnicode(r.beginText)+","+OctalToUnicode(r.endText)+");"); genErrorCatchForElement(r); } /** Generate code for the given grammar element. * @param blk The token-reference to generate */ public void gen(TokenRefElement atom) { if ( DEBUG_CODE_GENERATOR ) System.out.println("genTokenRef("+atom+")"); if ( grammar instanceof LexerGrammar ) { antlrTool.panic("Token reference found in lexer"); } genErrorTryForElement(atom); // Assign Token value to token label variable if ( atom.getLabel()!=null && syntacticPredLevel == 0) { println(atom.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(atom); // matching genMatch(atom); genErrorCatchForElement(atom); // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling();"); } } public void gen(TreeElement t) { // save AST cursor println("AST __t" + t.ID + " = _t;"); // If there is a label on the root, then assign that to the variable if (t.root.getLabel() != null) { println(t.root.getLabel() + " = (ASTNULL == _t) ? null : ("+labeledElementASTType +")_t;"); } // check for invalid modifiers ! and ^ on tree element roots if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) { antlrTool.error("Suffixing a root node with '!' is not implemented", grammar.getFilename(), t.getLine(), t.getColumn()); t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE); } if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET ) { antlrTool.warning("Suffixing a root node with '^' is redundant; already a root", grammar.getFilename(), t.getLine(), t.getColumn()); t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE); } // Generate AST variables genElementAST(t.root); if (grammar.buildAST) { // Save the AST construction state println("ASTPair __currentAST" + t.ID + " = currentAST.copy();"); // Make the next item added a child of the TreeElement root println("currentAST.root = currentAST.child;"); println("currentAST.child = null;"); } // match root if ( t.root instanceof WildcardElement ) { println("if (null == _t) throw new MismatchedTokenException();"); } else { genMatch(t.root); } // move to list of children println("_t = _t.getFirstChild();"); // walk list of children, generating code for each for (int i=0; iASTNodeType // mapping specified in the tokens {...} section with the ASTFactory. Vector v = g.tokenManager.getVocabulary(); for (int i = 0; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s != null) { TokenSymbol ts = g.tokenManager.getTokenSymbol(s); if (ts != null && ts.getASTNodeType() != null) { println("factory.setTokenTypeASTNodeType(" + s + ", \"" + ts.getASTNodeType() + "\");"); } } } tabs--; println("}"); } } public void genBody(ParserGrammar g) throws IOException { // Open the output stream for the parser and set the currentOutput // SAS: moved file setup so subclass could do it (for VAJ interface) setupOutput(grammar.getClassName()); genAST = grammar.buildAST; tabs = 0; // Generate the header common to all output files. genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Generate the CSharp namespace declaration (if specified) if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); tabs++; // Generate header for the parser println("// Generate the header common to all output files."); println("using System;"); println(""); println("using TokenBuffer = antlr.TokenBuffer;"); println("using TokenStreamException = antlr.TokenStreamException;"); println("using TokenStreamIOException = antlr.TokenStreamIOException;"); println("using ANTLRException = antlr.ANTLRException;"); String qualifiedClassName = grammar.getSuperClass(); String[] unqualifiedClassName = split(qualifiedClassName, "."); println("using " + unqualifiedClassName[unqualifiedClassName.length-1] + " = antlr." + qualifiedClassName + ";"); println("using Token = antlr.Token;"); println("using IToken = antlr.IToken;"); println("using TokenStream = antlr.TokenStream;"); println("using RecognitionException = antlr.RecognitionException;"); println("using NoViableAltException = antlr.NoViableAltException;"); println("using MismatchedTokenException = antlr.MismatchedTokenException;"); println("using SemanticException = antlr.SemanticException;"); println("using ParserSharedInputState = antlr.ParserSharedInputState;"); println("using BitSet = antlr.collections.impl.BitSet;"); if ( genAST ) { println("using AST = antlr.collections.AST;"); println("using ASTPair = antlr.ASTPair;"); println("using ASTFactory = antlr.ASTFactory;"); println("using ASTArray = antlr.collections.impl.ASTArray;"); } // Output the user-defined parser preamble println(grammar.preambleAction.getText()); // Generate parser class definition String sup=null; if ( grammar.superClass != null ) sup = grammar.superClass; else sup = "antlr." + grammar.getSuperClass(); // print javadoc comment if any if ( grammar.comment!=null ) { _println(grammar.comment); } Token tprefix = (Token)grammar.options.get("classHeaderPrefix"); if (tprefix == null) { print("public "); } else { String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\""); if (p == null) { print("public "); } else { print(p+" "); } } println("class " + grammar.getClassName() + " : "+sup); Token tsuffix = (Token)grammar.options.get("classHeaderSuffix"); if ( tsuffix != null ) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\""); if ( suffix != null ) print(" , "+suffix); // must be an interface name for CSharp } println("{"); tabs++; // Generate 'const' definitions for Token IDs genTokenDefinitions(grammar.tokenManager); // set up an array of all the rule names so the debugger can // keep track of them only by number -- less to store in tree... if (grammar.debuggingOutput) { println("private static readonly string[] _ruleNames = new string[] {"); tabs++; Enumeration ids = grammar.rules.elements(); int ruleNum=0; while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if ( sym instanceof RuleSymbol) println(" \""+((RuleSymbol)sym).getId()+"\","); } tabs--; println("};"); } // Generate user-defined parser class members print( processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null) ); // Generate parser class constructor from TokenBuffer println(""); println("protected void initialize()"); println("{"); tabs++; println("tokenNames = tokenNames_;"); if( grammar.buildAST ) println("initializeFactory();"); // if debugging, set up arrays and call the user-overridable // debugging setup method if ( grammar.debuggingOutput ) { println("ruleNames = _ruleNames;"); println("semPredNames = _semPredNames;"); println("setupDebugging(tokenBuf);"); } tabs--; println("}"); println(""); println(""); println("protected " + grammar.getClassName() + "(TokenBuffer tokenBuf, int k) : base(tokenBuf, k)"); println("{"); tabs++; println("initialize();"); tabs--; println("}"); println(""); println("public " + grammar.getClassName() + "(TokenBuffer tokenBuf) : this(tokenBuf," + grammar.maxk + ")"); println("{"); println("}"); println(""); // Generate parser class constructor from TokenStream println("protected " + grammar.getClassName()+"(TokenStream lexer, int k) : base(lexer,k)"); println("{"); tabs++; println("initialize();"); tabs--; println("}"); println(""); println("public " + grammar.getClassName()+"(TokenStream lexer) : this(lexer," + grammar.maxk + ")"); println("{"); println("}"); println(""); println("public " + grammar.getClassName()+"(ParserSharedInputState state) : base(state," + grammar.maxk + ")"); println("{"); tabs++; println("initialize();"); tabs--; println("}"); println(""); astTypes = new java.util.Vector(100); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); int ruleNum=0; while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if ( sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol)sym; genRule(rs, rs.references.size()==0, ruleNum++, grammar.tokenManager); } exitIfError(); } if ( usingCustomAST ) { // when we are using a custom AST, overload Parser.getAST() to return the // custom AST type println("public new " + labeledElementASTType + " getAST()"); println("{"); tabs++; println("return (" + labeledElementASTType + ") returnAST;"); tabs--; println("}"); println(""); } // Generate the method that initializes the ASTFactory when we're // building AST's println("private void initializeFactory()"); println("{"); tabs++; if( grammar.buildAST ) { println("if (astFactory == null)"); println("{"); tabs++; if( usingCustomAST ) { println("astFactory = new ASTFactory(\"" + labeledElementASTType + "\");"); } else println("astFactory = new ASTFactory();"); tabs--; println("}"); println("initializeASTFactory( astFactory );"); } tabs--; println("}"); genInitFactory( g ); // Generate the token names genTokenStrings(); // Generate the bitsets used throughout the grammar genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType()); // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) genSemPredMap(); // Close class definition println(""); tabs--; println("}"); tabs--; // Generate the CSharp namespace closures (if required) if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Close the parser output stream currentOutput.close(); currentOutput = null; } public void genBody(TreeWalkerGrammar g) throws IOException { // Open the output stream for the parser and set the currentOutput // SAS: move file open to method so subclass can override it // (mainly for VAJ interface) setupOutput(grammar.getClassName()); genAST = grammar.buildAST; tabs = 0; // Generate the header common to all output files. genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Generate the CSharp namespace declaration (if specified) if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); tabs++; // Generate header specific to the tree-parser CSharp file println("// Generate header specific to the tree-parser CSharp file"); println("using System;"); println(""); println("using " + grammar.getSuperClass() + " = antlr." + grammar.getSuperClass() + ";"); println("using Token = antlr.Token;"); println("using IToken = antlr.IToken;"); println("using AST = antlr.collections.AST;"); println("using RecognitionException = antlr.RecognitionException;"); println("using ANTLRException = antlr.ANTLRException;"); println("using NoViableAltException = antlr.NoViableAltException;"); println("using MismatchedTokenException = antlr.MismatchedTokenException;"); println("using SemanticException = antlr.SemanticException;"); println("using BitSet = antlr.collections.impl.BitSet;"); println("using ASTPair = antlr.ASTPair;"); println("using ASTFactory = antlr.ASTFactory;"); println("using ASTArray = antlr.collections.impl.ASTArray;"); // Output the user-defined parser premamble println(grammar.preambleAction.getText()); // Generate parser class definition String sup=null; if ( grammar.superClass!=null ) { sup = grammar.superClass; } else { sup = "antlr." + grammar.getSuperClass(); } println(""); // print javadoc comment if any if ( grammar.comment!=null ) { _println(grammar.comment); } Token tprefix = (Token)grammar.options.get("classHeaderPrefix"); if (tprefix == null) { print("public "); } else { String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\""); if (p == null) { print("public "); } else { print(p+" "); } } println("class " + grammar.getClassName() + " : "+sup); Token tsuffix = (Token)grammar.options.get("classHeaderSuffix"); if ( tsuffix != null ) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\""); if ( suffix != null ) { print(" , "+suffix); // must be an interface name for CSharp } } println("{"); tabs++; // Generate 'const' definitions for Token IDs genTokenDefinitions(grammar.tokenManager); // Generate user-defined parser class members print( processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null) ); // Generate default parser class constructor println("public " + grammar.getClassName() + "()"); println("{"); tabs++; println("tokenNames = tokenNames_;"); tabs--; println("}"); println(""); astTypes = new java.util.Vector(); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); int ruleNum=0; String ruleNameInits = ""; while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if ( sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol)sym; genRule(rs, rs.references.size()==0, ruleNum++, grammar.tokenManager); } exitIfError(); } if ( usingCustomAST ) { // when we are using a custom ast override Parser.getAST to return the // custom AST type println("public new " + labeledElementASTType + " getAST()"); println("{"); tabs++; println("return (" + labeledElementASTType + ") returnAST;"); tabs--; println("}"); println(""); } // Generate the ASTFactory initialization function genInitFactory( grammar ); // Generate the token names genTokenStrings(); // Generate the bitsets used throughout the grammar genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType()); // Close class definition tabs--; println("}"); println(""); tabs--; // Generate the CSharp namespace closures (if required) if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Close the parser output stream currentOutput.close(); currentOutput = null; } /** Generate a series of case statements that implement a BitSet test. * @param p The Bitset for which cases are to be generated */ protected void genCases(BitSet p) { if ( DEBUG_CODE_GENERATOR ) System.out.println("genCases("+p+")"); int[] elems; elems = p.toArray(); // Wrap cases four-per-line for lexer, one-per-line for parser int wrap = (grammar instanceof LexerGrammar) ? 4 : 1; int j=1; boolean startOfLine = true; for (int i = 0; i < elems.length; i++) { if (j==1) { print(""); } else { _print(" "); } _print("case " + getValueString(elems[i]) + ":"); if (j==wrap) { _println(""); startOfLine = true; j=1; } else { j++; startOfLine = false; } } if (!startOfLine) { _println(""); } } /**Generate common code for a block of alternatives; return a * postscript that needs to be generated at the end of the * block. Other routines may append else-clauses and such for * error checking before the postfix is generated. If the * grammar is a lexer, then generate alternatives in an order * where alternatives requiring deeper lookahead are generated * first, and EOF in the lookahead set reduces the depth of * the lookahead. @param blk The block to generate @param * noTestForSingle If true, then it does not generate a test * for a single alternative. */ public CSharpBlockFinishingInfo genCommonBlock(AlternativeBlock blk, boolean noTestForSingle) { int nIF=0; boolean createdLL1Switch = false; int closingBracesOfIFSequence = 0; CSharpBlockFinishingInfo finishingInfo = new CSharpBlockFinishingInfo(); if ( DEBUG_CODE_GENERATOR ) System.out.println("genCommonBlock("+blk+")"); // Save the AST generation state, and set it to that of the block boolean savegenAST = genAST; genAST = genAST && blk.getAutoGen(); boolean oldsaveTest = saveText; saveText = saveText && blk.getAutoGen(); // Is this block inverted? If so, generate special-case code if ( blk.not && analyzer.subruleCanBeInverted(blk, grammar instanceof LexerGrammar) ) { if ( DEBUG_CODE_GENERATOR ) System.out.println("special case: ~(subrule)"); Lookahead p = analyzer.look(1, blk); // Variable assignment for labeled elements if (blk.getLabel() != null && syntacticPredLevel == 0) { println(blk.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(blk); String astArgs=""; if (grammar instanceof TreeWalkerGrammar) { if ( usingCustomAST ) astArgs = "(AST)_t,"; else astArgs = "_t,"; } // match the bitset for the alternative println("match(" + astArgs + getBitsetName(markBitsetForGen(p.fset)) + ");"); // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling();"); } return finishingInfo; } // Special handling for single alt if (blk.getAlternatives().size() == 1) { Alternative alt = blk.getAlternativeAt(0); // Generate a warning if there is a synPred for single alt. if (alt.synPred != null) { antlrTool.warning( "Syntactic predicate superfluous for single alternative", grammar.getFilename(), blk.getAlternativeAt(0).synPred.getLine(), blk.getAlternativeAt(0).synPred.getColumn() ); } if (noTestForSingle) { if (alt.semPred != null) { // Generate validating predicate genSemPred(alt.semPred, blk.line); } genAlt(alt, blk); return finishingInfo; } } // count number of simple LL(1) cases; only do switch for // many LL(1) cases (no preds, no end of token refs) // We don't care about exit paths for (...)*, (...)+ // because we don't explicitly have a test for them // as an alt in the loop. // // Also, we now count how many unicode lookahead sets // there are--they must be moved to DEFAULT or ELSE // clause. int nLL1 = 0; for (int i=0; i= makeSwitchThreshold) { // Determine the name of the item to be compared String testExpr = lookaheadString(1); createdLL1Switch = true; // when parsing trees, convert null to valid tree node with NULL lookahead if ( grammar instanceof TreeWalkerGrammar ) { println("if (null == _t)"); tabs++; println("_t = ASTNULL;"); tabs--; } println("switch ( " + testExpr+" )"); println("{"); //tabs++; // update block nesting level as it affects creation of _saveIndex OK? blockNestingLevel++; for (int i=0; i= 1 && alt.cache[effectiveDepth].containsEpsilon() ) { effectiveDepth--; } // Ignore alts whose effective depth is other than // the ones we are generating for this iteration. if (effectiveDepth != altDepth) { if ( DEBUG_CODE_GENERATOR ) System.out.println("ignoring alt because effectiveDepth!=altDepth;"+effectiveDepth+"!="+altDepth); continue; } unpredicted = lookaheadIsEmpty(alt, effectiveDepth); e = getLookaheadTestExpression(alt, effectiveDepth); } else { unpredicted = lookaheadIsEmpty(alt, grammar.maxk); e = getLookaheadTestExpression(alt, grammar.maxk); } // Was it a big unicode range that forced unsuitability // for a case expression? if (alt.cache[1].fset.degree() > caseSizeThreshold && suitableForCaseExpression(alt)) { if ( nIF==0 ) { println("if " + e); println("{"); } else { println("else if " + e); println("{"); } } else if (unpredicted && alt.semPred==null && alt.synPred==null) { // The alt has empty prediction set and no // predicate to help out. if we have not // generated a previous if, just put {...} around // the end-of-token clause if ( nIF==0 ) { println("{"); } else { println("else {"); } finishingInfo.needAnErrorClause = false; } else { // check for sem and syn preds // Add any semantic predicate expression to the lookahead test if ( alt.semPred != null ) { // if debugging, wrap the evaluation of the predicate in a method // // translate $ and # references ActionTransInfo tInfo = new ActionTransInfo(); String actionStr = processActionForSpecialSymbols(alt.semPred, blk.line, currentRule, tInfo); // ignore translation info...we don't need to // do anything with it. call that will inform // SemanticPredicateListeners of the result if (((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)) && grammar.debuggingOutput) { e = "("+e+"&& fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEventArgs.PREDICTING,"+ //FIXME addSemPred(charFormatter.escapeString(actionStr))+","+actionStr+"))"; } else { e = "("+e+"&&("+actionStr +"))"; } } // Generate any syntactic predicates if ( nIF>0 ) { if ( alt.synPred != null ) { println("else {"); tabs++; blockNestingLevel++; genSynPred( alt.synPred, e ); closingBracesOfIFSequence++; } else { println("else if " + e + " {"); } } else { if ( alt.synPred != null ) { genSynPred( alt.synPred, e ); } else { // when parsing trees, convert null to valid tree node // with NULL lookahead. if ( grammar instanceof TreeWalkerGrammar ) { println("if (_t == null)"); tabs++; println("_t = ASTNULL;"); tabs--; } println("if " + e); println("{"); } } } blockNestingLevel++; nIF++; tabs++; genAlt(alt, blk); tabs--; if (blockNestingLevel-- == saveIndexCreateLevel) saveIndexCreateLevel = 0; println("}"); } } String ps = ""; for (int i=1; i<=closingBracesOfIFSequence; i++) { ps+="}"; if (blockNestingLevel-- == saveIndexCreateLevel) saveIndexCreateLevel = 0; } // Restore the AST generation state genAST = savegenAST; // restore save text state saveText=oldsaveTest; // Return the finishing info. if ( createdLL1Switch ) { tabs--; finishingInfo.postscript = ps+"break; }"; if (blockNestingLevel-- == saveIndexCreateLevel) saveIndexCreateLevel = 0; finishingInfo.generatedSwitch = true; finishingInfo.generatedAnIf = nIF>0; //return new CSharpBlockFinishingInfo(ps+"}",true,nIF>0); // close up switch statement } else { finishingInfo.postscript = ps; finishingInfo.generatedSwitch = false; finishingInfo.generatedAnIf = nIF>0; // return new CSharpBlockFinishingInfo(ps, false,nIF>0); } return finishingInfo; } private static boolean suitableForCaseExpression(Alternative a) { return a.lookaheadDepth == 1 && a.semPred == null && !a.cache[1].containsEpsilon() && a.cache[1].fset.degree()<=caseSizeThreshold; } /** Generate code to link an element reference into the AST */ private void genElementAST(AlternativeElement el) { // handle case where you're not building trees, but are in tree walker. // Just need to get labels set up. if ( grammar instanceof TreeWalkerGrammar && !grammar.buildAST ) { String elementRef; String astName; // Generate names and declarations of the AST variable(s) if (el.getLabel() == null) { elementRef = lt1Value; // Generate AST variables for unlabeled stuff astName = "tmp" + astVarNumber + "_AST"; astVarNumber++; // Map the generated AST variable in the alternate mapTreeVariable(el, astName); // Generate an "input" AST variable also println(labeledElementASTType+" "+astName+"_in = "+elementRef+";"); } return; } if (grammar.buildAST && syntacticPredLevel == 0) { boolean needASTDecl = (genAST && (el.getLabel() != null || (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG))); // RK: if we have a grammar element always generate the decl // since some guy can access it from an action and we can't // peek ahead (well not without making a mess). // I'd prefer taking this out. if (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG && (el instanceof TokenRefElement)) needASTDecl = true; boolean doNoGuessTest = (grammar.hasSyntacticPredicate && needASTDecl); String elementRef; String astNameBase; // Generate names and declarations of the AST variable(s) if (el.getLabel() != null) { // if the element is labeled use that name... elementRef = el.getLabel(); astNameBase = el.getLabel(); } else { // else generate a temporary name... elementRef = lt1Value; // Generate AST variables for unlabeled stuff astNameBase = "tmp" + astVarNumber; astVarNumber++; } // Generate the declaration if required. if (needASTDecl) { // Generate the declaration if ( el instanceof GrammarAtom ) { GrammarAtom ga = (GrammarAtom)el; if ( ga.getASTNodeType()!=null ) { genASTDeclaration(el, astNameBase, ga.getASTNodeType()); //println(ga.getASTNodeType()+" " + astName+" = null;"); } else { genASTDeclaration(el, astNameBase, labeledElementASTType); //println(labeledElementASTType+" " + astName + " = null;"); } } else { genASTDeclaration(el, astNameBase, labeledElementASTType); //println(labeledElementASTType+" " + astName + " = null;"); } } // for convenience.. String astName = astNameBase + "_AST"; // Map the generated AST variable in the alternate mapTreeVariable(el, astName); if (grammar instanceof TreeWalkerGrammar) { // Generate an "input" AST variable also println(labeledElementASTType+" " + astName + "_in = null;"); } // Enclose actions with !guessing if (doNoGuessTest) { //println("if (0 == inputState.guessing)"); //println("{"); //tabs++; } // if something has a label assume it will be used // so we must initialize the RefAST if (el.getLabel() != null) { if ( el instanceof GrammarAtom ) { println(astName + " = "+ getASTCreateString((GrammarAtom)el, elementRef) + ";"); } else { println(astName + " = "+ getASTCreateString(elementRef) + ";"); } } // if it has no label but a declaration exists initialize it. if (el.getLabel() == null && needASTDecl) { elementRef = lt1Value; if ( el instanceof GrammarAtom ) { println(astName + " = "+ getASTCreateString((GrammarAtom)el, elementRef) + ";"); } else { println(astName + " = "+ getASTCreateString(elementRef) + ";"); } // Map the generated AST variable in the alternate if (grammar instanceof TreeWalkerGrammar) { // set "input" AST variable also println(astName + "_in = " + elementRef + ";"); } } if (genAST) { switch (el.getAutoGenType()) { case GrammarElement.AUTO_GEN_NONE: if ( usingCustomAST || ( (el instanceof GrammarAtom) && (((GrammarAtom)el).getASTNodeType() != null) ) ) println("astFactory.addASTChild(currentAST, (AST)" + astName + ");"); else println("astFactory.addASTChild(currentAST, " + astName + ");"); break; case GrammarElement.AUTO_GEN_CARET: if ( usingCustomAST || ( (el instanceof GrammarAtom) && (((GrammarAtom)el).getASTNodeType() != null) ) ) println("astFactory.makeASTRoot(currentAST, (AST)" + astName + ");"); else println("astFactory.makeASTRoot(currentAST, " + astName + ");"); break; default: break; } } if (doNoGuessTest) { //tabs--; //println("}"); } } } /** Close the try block and generate catch phrases * if the element has a labeled handler in the rule */ private void genErrorCatchForElement(AlternativeElement el) { if (el.getLabel() == null) return; String r = el.enclosingRuleName; if ( grammar instanceof LexerGrammar ) { r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName); } RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r); if (rs == null) { antlrTool.panic("Enclosing rule not found!"); } ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel()); if (ex != null) { tabs--; println("}"); genErrorHandler(ex); } } /** Generate the catch phrases for a user-specified error handler */ private void genErrorHandler(ExceptionSpec ex) { // Each ExceptionHandler in the ExceptionSpec is a separate catch for (int i = 0; i < ex.handlers.size(); i++) { ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i); // Generate catch phrase println("catch (" + handler.exceptionTypeAndName.getText() + ")"); println("{"); tabs++; if (grammar.hasSyntacticPredicate) { println("if (0 == inputState.guessing)"); println("{"); tabs++; } // When not guessing, execute user handler action ActionTransInfo tInfo = new ActionTransInfo(); printAction(processActionForSpecialSymbols(handler.action.getText(), handler.action.getLine(), currentRule, tInfo)); if (grammar.hasSyntacticPredicate) { tabs--; println("}"); println("else"); println("{"); tabs++; // When guessing, rethrow exception //println("throw " + extractIdOfAction(handler.exceptionTypeAndName) + ";"); println("throw;"); tabs--; println("}"); } // Close catch phrase tabs--; println("}"); } } /** Generate a try { opening if the element has a labeled handler in the rule */ private void genErrorTryForElement(AlternativeElement el) { if (el.getLabel() == null) return; String r = el.enclosingRuleName; if ( grammar instanceof LexerGrammar ) { r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName); } RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r); if (rs == null) { antlrTool.panic("Enclosing rule not found!"); } ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel()); if (ex != null) { println("try // for error handling"); println("{"); tabs++; } } protected void genASTDeclaration(AlternativeElement el) { genASTDeclaration(el, labeledElementASTType); } protected void genASTDeclaration(AlternativeElement el, String node_type) { genASTDeclaration(el, el.getLabel(), node_type); } protected void genASTDeclaration(AlternativeElement el, String var_name, String node_type) { // already declared? if (declaredASTVariables.contains(el)) return; // emit code //String s = StringUtils.stripFrontBack(node_type, "\"", "\""); //println(s + " " + var_name + "_AST = null;"); println(node_type + " " + var_name + "_AST = null;"); // mark as declared declaredASTVariables.put(el,el); } /** Generate a header that is common to all CSharp files */ protected void genHeader() { println("// $ANTLR "+Tool.version+": "+ "\"" + antlrTool.fileMinusPath(antlrTool.grammarFile) + "\"" + " -> "+ "\""+grammar.getClassName()+".cs\"$"); } private void genLiteralsTest() { println("_ttype = testLiteralsTable(_ttype);"); } private void genLiteralsTestForPartialToken() { println("_ttype = testLiteralsTable(text.ToString(_begin, text.Length-_begin), _ttype);"); } protected void genMatch(BitSet b) { } protected void genMatch(GrammarAtom atom) { if ( atom instanceof StringLiteralElement ) { if ( grammar instanceof LexerGrammar ) { genMatchUsingAtomText(atom); } else { genMatchUsingAtomTokenType(atom); } } else if ( atom instanceof CharLiteralElement ) { if ( grammar instanceof LexerGrammar ) { genMatchUsingAtomText(atom); } else { antlrTool.error("cannot ref character literals in grammar: "+atom); } } else if ( atom instanceof TokenRefElement ) { genMatchUsingAtomText(atom); } else if (atom instanceof WildcardElement) { gen((WildcardElement)atom); } } protected void genMatchUsingAtomText(GrammarAtom atom) { // match() for trees needs the _t cursor String astArgs=""; if (grammar instanceof TreeWalkerGrammar) { if ( usingCustomAST ) astArgs="(AST)_t,"; else astArgs="_t,"; } // if in lexer and ! on element, save buffer index to kill later if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) { declareSaveIndexVariableIfNeeded(); println("_saveIndex = text.Length;"); } print(atom.not ? "matchNot(" : "match("); _print(astArgs); // print out what to match if (atom.atomText.equals("EOF")) { // horrible hack to handle EOF case _print("Token.EOF_TYPE"); } else { _print(atom.atomText); } _println(");"); if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) { declareSaveIndexVariableIfNeeded(); println("text.Length = _saveIndex;"); // kill text atom put in buffer } } protected void genMatchUsingAtomTokenType(GrammarAtom atom) { // match() for trees needs the _t cursor String astArgs=""; if (grammar instanceof TreeWalkerGrammar) { if( usingCustomAST ) astArgs="(AST)_t,"; else astArgs="_t,"; } // If the literal can be mangled, generate the symbolic constant instead String mangledName = null; String s = astArgs + getValueString(atom.getType()); // matching println( (atom.not ? "matchNot(" : "match(") + s + ");"); } /** Generate the nextToken() rule. nextToken() is a synthetic * lexer rule that is the implicit OR of all user-defined * lexer rules. */ public void genNextToken() { // Are there any public rules? If not, then just generate a // fake nextToken(). boolean hasPublicRules = false; for (int i = 0; i < grammar.rules.size(); i++) { RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i); if ( rs.isDefined() && rs.access.equals("public") ) { hasPublicRules = true; break; } } if (!hasPublicRules) { println(""); println("override public IToken nextToken()\t\t\t//throws TokenStreamException"); println("{"); tabs++; println("try"); println("{"); tabs++; println("uponEOF();"); tabs--; println("}"); println("catch(CharStreamIOException csioe)"); println("{"); tabs++; println("throw new TokenStreamIOException(csioe.io);"); tabs--; println("}"); println("catch(CharStreamException cse)"); println("{"); tabs++; println("throw new TokenStreamException(cse.Message);"); tabs--; println("}"); println("return new CommonToken(Token.EOF_TYPE, \"\");"); tabs--; println("}"); println(""); return; } // Create the synthesized nextToken() rule RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken"); // Define the nextToken rule symbol RuleSymbol nextTokenRs = new RuleSymbol("mnextToken"); nextTokenRs.setDefined(); nextTokenRs.setBlock(nextTokenBlk); nextTokenRs.access = "private"; grammar.define(nextTokenRs); // Analyze the nextToken rule boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk); // Generate the next token rule String filterRule=null; if ( ((LexerGrammar)grammar).filterMode ) { filterRule = ((LexerGrammar)grammar).filterRule; } println(""); println("override public IToken nextToken()\t\t\t//throws TokenStreamException"); println("{"); tabs++; // delay creation of _saveIndex until we need it OK? blockNestingLevel = 1; saveIndexCreateLevel = 0; println("IToken theRetToken = null;"); _println("tryAgain:"); println("for (;;)"); println("{"); tabs++; println("IToken _token = null;"); println("int _ttype = Token.INVALID_TYPE;"); if ( ((LexerGrammar)grammar).filterMode ) { println("setCommitToPath(false);"); if ( filterRule!=null ) { // Here's a good place to ensure that the filter rule actually exists if (!grammar.isDefined(CodeGenerator.encodeLexerRuleName(filterRule))) { grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer"); } else { RuleSymbol rs = (RuleSymbol)grammar.getSymbol(CodeGenerator.encodeLexerRuleName(filterRule)); if ( !rs.isDefined() ) { grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer"); } else if ( rs.access.equals("public") ) { grammar.antlrTool.error("Filter rule " + filterRule + " must be protected"); } } println("int _m;"); println("_m = mark();"); } } println("resetText();"); println("try // for char stream error handling"); println("{"); tabs++; // Generate try around whole thing to trap scanner errors println("try // for lexical error handling"); println("{"); tabs++; // Test for public lexical rules with empty paths for (int i=0; i_AST to the root of the returned AST. * * Each alternative that does automatic tree construction, builds * up root and child list pointers in an ASTPair structure. * * A rule finishes by setting the returnAST variable from the * ASTPair. * * @param rule The name of the rule to generate * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere) */ public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum, TokenManager tm) { tabs=1; if ( DEBUG_CODE_GENERATOR ) System.out.println("genRule("+ s.getId() +")"); if ( !s.isDefined() ) { antlrTool.error("undefined rule: "+ s.getId()); return; } // Generate rule return type, name, arguments RuleBlock rblk = s.getBlock(); currentRule = rblk; currentASTResult = s.getId(); // clear list of declared ast variables.. declaredASTVariables.clear(); // Save the AST generation state, and set it to that of the rule boolean savegenAST = genAST; genAST = genAST && rblk.getAutoGen(); // boolean oldsaveTest = saveText; saveText = rblk.getAutoGen(); // print javadoc comment if any if ( s.comment!=null ) { _println(s.comment); } // Gen method access and final qualifier //print(s.access + " final "); print(s.access + " "); // Gen method return type (note lexer return action set at rule creation) if (rblk.returnAction != null) { // Has specified return value _print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " "); } else { // No specified return value _print("void "); } // Gen method name _print(s.getId() + "("); // Additional rule parameters common to all rules for this grammar _print(commonExtraParams); if (commonExtraParams.length() != 0 && rblk.argAction != null ) { _print(","); } // Gen arguments if (rblk.argAction != null) { // Has specified arguments _println(""); tabs++; println(rblk.argAction); tabs--; print(")"); } else { // No specified arguments _print(")"); } // Gen throws clause and open curly _print(" //throws " + exceptionThrown); if ( grammar instanceof ParserGrammar ) { _print(", TokenStreamException"); } else if ( grammar instanceof LexerGrammar ) { _print(", CharStreamException, TokenStreamException"); } // Add user-defined exceptions unless lexer (for now) if ( rblk.throwsSpec!=null ) { if ( grammar instanceof LexerGrammar ) { antlrTool.error("user-defined throws spec not allowed (yet) for lexer rule "+rblk.ruleName); } else { _print(", "+rblk.throwsSpec); } } _println(""); _println("{"); tabs++; // Convert return action to variable declaration if (rblk.returnAction != null) println(rblk.returnAction + ";"); // print out definitions needed by rules for various grammar types println(commonLocalVars); if (grammar.traceRules) { if ( grammar instanceof TreeWalkerGrammar ) { if ( usingCustomAST ) println("traceIn(\""+ s.getId() +"\",(AST)_t);"); else println("traceIn(\""+ s.getId() +"\",_t);"); } else { println("traceIn(\""+ s.getId() +"\");"); } } if ( grammar instanceof LexerGrammar ) { // lexer rule default return value is the rule's token name // This is a horrible hack to support the built-in EOF lexer rule. if (s.getId().equals("mEOF")) println("_ttype = Token.EOF_TYPE;"); else println("_ttype = " + s.getId().substring(1)+";"); // delay creation of _saveIndex until we need it OK? blockNestingLevel = 1; saveIndexCreateLevel = 0; /* println("boolean old_saveConsumedInput=saveConsumedInput;"); if ( !rblk.getAutoGen() ) { // turn off "save input" if ! on rule println("saveConsumedInput=false;"); } */ } // if debugging, write code to mark entry to the rule if ( grammar.debuggingOutput) if (grammar instanceof ParserGrammar) println("fireEnterRule(" + ruleNum + ",0);"); else if (grammar instanceof LexerGrammar) println("fireEnterRule(" + ruleNum + ",_ttype);"); // Generate trace code if desired if ( grammar.debuggingOutput || grammar.traceRules) { println("try { // debugging"); tabs++; } // Initialize AST variables if (grammar instanceof TreeWalkerGrammar) { // "Input" value for rule println(labeledElementASTType+" " + s.getId() + "_AST_in = ("+labeledElementASTType+")_t;"); } if (grammar.buildAST) { // Parser member used to pass AST returns from rule invocations println("returnAST = null;"); // Tracks AST construction // println("ASTPair currentAST = (inputState.guessing==0) ? new ASTPair() : null;"); println("ASTPair currentAST = ASTPair.GetInstance();"); // User-settable return value for rule. println(labeledElementASTType+" " + s.getId() + "_AST = null;"); } genBlockPreamble(rblk); genBlockInitAction(rblk); println(""); // Search for an unlabeled exception specification attached to the rule ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec(""); // Generate try block around the entire rule for error handling if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) { println("try { // for error handling"); tabs++; } // Generate the alternatives if ( rblk.alternatives.size()==1 ) { // One alternative -- use simple form Alternative alt = rblk.getAlternativeAt(0); String pred = alt.semPred; if ( pred!=null ) genSemPred(pred, currentRule.line); if (alt.synPred != null) { antlrTool.warning( "Syntactic predicate ignored for single alternative", grammar.getFilename(), alt.synPred.getLine(), alt.synPred.getColumn() ); } genAlt(alt, rblk); } else { // Multiple alternatives -- generate complex form boolean ok = grammar.theLLkAnalyzer.deterministic(rblk); CSharpBlockFinishingInfo howToFinish = genCommonBlock(rblk, false); genBlockFinish(howToFinish, throwNoViable); } // Generate catch phrase for error handling if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) { // Close the try block tabs--; println("}"); } // Generate user-defined or default catch phrases if (unlabeledUserSpec != null) { genErrorHandler(unlabeledUserSpec); } else if (rblk.getDefaultErrorHandler()) { // Generate default catch phrase println("catch (" + exceptionThrown + " ex)"); println("{"); tabs++; // Generate code to handle error if not guessing if (grammar.hasSyntacticPredicate) { println("if (0 == inputState.guessing)"); println("{"); tabs++; } println("reportError(ex);"); if ( !(grammar instanceof TreeWalkerGrammar) ) { // Generate code to consume until token in k==1 follow set Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode); String followSetName = getBitsetName(markBitsetForGen(follow.fset)); println("recover(ex," + followSetName + ");"); } else { // Just consume one token println("if (null != _t)"); println("{"); tabs++; println("_t = _t.getNextSibling();"); tabs--; println("}"); } if (grammar.hasSyntacticPredicate) { tabs--; // When guessing, rethrow exception println("}"); println("else"); println("{"); tabs++; println("throw ex;"); tabs--; println("}"); } // Close catch phrase tabs--; println("}"); } // Squirrel away the AST "return" value if (grammar.buildAST) { println("returnAST = " + s.getId() + "_AST;"); } // Set return tree value for tree walkers if ( grammar instanceof TreeWalkerGrammar ) { println("retTree_ = _t;"); } // Generate literals test for lexer rules so marked if (rblk.getTestLiterals()) { if ( s.access.equals("protected") ) { genLiteralsTestForPartialToken(); } else { genLiteralsTest(); } } // if doing a lexer rule, dump code to create token if necessary if ( grammar instanceof LexerGrammar ) { println("if (_createToken && (null == _token) && (_ttype != Token.SKIP))"); println("{"); tabs++; println("_token = makeToken(_ttype);"); println("_token.setText(text.ToString(_begin, text.Length-_begin));"); tabs--; println("}"); println("returnToken_ = _token;"); } // Gen the return statement if there is one (lexer has hard-wired return action) if (rblk.returnAction != null) { println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + ";"); } if ( grammar.debuggingOutput || grammar.traceRules) { println("ASTPair.PutInstance(currentAST);"); tabs--; println("}"); println("finally"); println("{ // debugging"); tabs++; // If debugging, generate calls to mark exit of rule if ( grammar.debuggingOutput) if (grammar instanceof ParserGrammar) println("fireExitRule(" + ruleNum + ",0);"); else if (grammar instanceof LexerGrammar) println("fireExitRule(" + ruleNum + ",_ttype);"); if (grammar.traceRules) { if ( grammar instanceof TreeWalkerGrammar ) { println("traceOut(\""+ s.getId() +"\",_t);"); } else { println("traceOut(\""+ s.getId() +"\");"); } } tabs--; println("}"); } // Release the ASTPair instance (if we're not in trace or debug mode) if (grammar.buildAST && !(grammar.debuggingOutput || grammar.traceRules)) { println("ASTPair.PutInstance(currentAST);"); } tabs--; println("}"); println(""); // Restore the AST generation state genAST = savegenAST; // restore char save state // saveText = oldsaveTest; } private void GenRuleInvocation(RuleRefElement rr) { // dump rule name _print(rr.targetRule + "("); // lexers must tell rule if it should set returnToken_ if ( grammar instanceof LexerGrammar ) { // if labeled, could access Token, so tell rule to create if ( rr.getLabel() != null ) { _print("true"); } else { _print("false"); } if (commonExtraArgs.length() != 0 || rr.args!=null ) { _print(","); } } // Extra arguments common to all rules for this grammar _print(commonExtraArgs); if (commonExtraArgs.length() != 0 && rr.args!=null ) { _print(","); } // Process arguments to method, if any RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); if (rr.args != null) { // When not guessing, execute user arg action ActionTransInfo tInfo = new ActionTransInfo(); String args = processActionForSpecialSymbols(rr.args, 0, currentRule, tInfo); if ( tInfo.assignToRoot || tInfo.refRuleRoot!=null ) { antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #" + currentRule.getRuleName(), grammar.getFilename(), rr.getLine(), rr.getColumn()); } _print(args); // Warn if the rule accepts no arguments if (rs.block.argAction == null) { antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments", grammar.getFilename(), rr.getLine(), rr.getColumn()); } } else { // For C++, no warning if rule has parameters, because there may be default // values for all of the parameters if (rs.block.argAction != null) { antlrTool.warning("Missing parameters on reference to rule " + rr.targetRule, grammar.getFilename(), rr.getLine(), rr.getColumn()); } } _println(");"); // move down to the first child while parsing if ( grammar instanceof TreeWalkerGrammar ) { println("_t = retTree_;"); } } protected void genSemPred(String pred, int line) { // translate $ and # references ActionTransInfo tInfo = new ActionTransInfo(); pred = processActionForSpecialSymbols(pred, line, currentRule, tInfo); // ignore translation info...we don't need to do anything with it. String escapedPred = charFormatter.escapeString(pred); // if debugging, wrap the semantic predicate evaluation in a method // that can tell SemanticPredicateListeners the result if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) pred = "fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.VALIDATING," + addSemPred(escapedPred) + "," + pred + ")"; println("if (!(" + pred + "))"); println(" throw new SemanticException(\"" + escapedPred + "\");"); } /** Write an array of Strings which are the semantic predicate * expressions. The debugger will reference them by number only */ protected void genSemPredMap() { Enumeration e = semPreds.elements(); println("private string[] _semPredNames = {"); tabs++; while(e.hasMoreElements()) println("\""+e.nextElement()+"\","); tabs--; println("};"); } protected void genSynPred(SynPredBlock blk, String lookaheadExpr) { if ( DEBUG_CODE_GENERATOR ) System.out.println("gen=>("+blk+")"); // Dump synpred result variable println("bool synPredMatched" + blk.ID + " = false;"); // Gen normal lookahead test println("if (" + lookaheadExpr + ")"); println("{"); tabs++; // Save input state if ( grammar instanceof TreeWalkerGrammar ) { println("AST __t" + blk.ID + " = _t;"); } else { println("int _m" + blk.ID + " = mark();"); } // Once inside the try, assume synpred works unless exception caught println("synPredMatched" + blk.ID + " = true;"); println("inputState.guessing++;"); // if debugging, tell listeners that a synpred has started if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) { println("fireSyntacticPredicateStarted();"); } syntacticPredLevel++; println("try {"); tabs++; gen((AlternativeBlock)blk); // gen code to test predicate tabs--; //println("System.out.println(\"pred "+blk+" succeeded\");"); println("}"); //kunle: lose a few warnings cheaply // println("catch (" + exceptionThrown + " pe)"); println("catch (" + exceptionThrown + ")"); println("{"); tabs++; println("synPredMatched"+blk.ID+" = false;"); //println("System.out.println(\"pred "+blk+" failed\");"); tabs--; println("}"); // Restore input state if ( grammar instanceof TreeWalkerGrammar ) { println("_t = __t"+blk.ID+";"); } else { println("rewind(_m"+blk.ID+");"); } println("inputState.guessing--;"); // if debugging, tell listeners how the synpred turned out if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) { println("if (synPredMatched" + blk.ID +")"); println(" fireSyntacticPredicateSucceeded();"); println("else"); println(" fireSyntacticPredicateFailed();"); } syntacticPredLevel--; tabs--; // Close lookahead test println("}"); // Test synred result println("if ( synPredMatched"+blk.ID+" )"); println("{"); } /** Generate a static array containing the names of the tokens, * indexed by the token type values. This static array is used * to format error messages so that the token identifers or literal * strings are displayed instead of the token numbers. * * If a lexical rule has a paraphrase, use it rather than the * token label. */ public void genTokenStrings() { // Generate a string for each token. This creates a static // array of Strings indexed by token type. println(""); println("public static readonly string[] tokenNames_ = new string[] {"); tabs++; // Walk the token vocabulary and generate a Vector of strings // from the tokens. Vector v = grammar.tokenManager.getVocabulary(); for (int i = 0; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s == null) { s = "<"+String.valueOf(i)+">"; } if ( !s.startsWith("\"") && !s.startsWith("<") ) { TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s); if ( ts!=null && ts.getParaphrase()!=null ) { s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\""); } } else if (s.startsWith("\"")) { s = StringUtils.stripFrontBack(s, "\"", "\""); } print(charFormatter.literalString(s)); if (i != v.size()-1) { _print(","); } _println(""); } // Close the string array initailizer tabs--; println("};"); } /** Generate the token types CSharp file */ protected void genTokenTypes(TokenManager tm) throws IOException { // Open the token output CSharp file and set the currentOutput stream // SAS: file open was moved to a method so a subclass can override // This was mainly for the VAJ interface setupOutput(tm.getName() + TokenTypesFileSuffix); tabs = 0; // Generate the header common to all CSharp files genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Generate the CSharp namespace declaration (if specified) if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); tabs++; // Encapsulate the definitions in a class. This has to be done as a class because // they are all constants and CSharp inteface types cannot contain constants. println("public class " + tm.getName() + TokenTypesFileSuffix); //println("public class " + getTokenTypesClassName()); println("{"); tabs++; genTokenDefinitions(tm); // Close the interface tabs--; println("}"); tabs--; // Generate the CSharp namespace closures (if required) if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Close the tokens output file currentOutput.close(); currentOutput = null; exitIfError(); } protected void genTokenDefinitions(TokenManager tm) throws IOException { // Generate a definition for each token type Vector v = tm.getVocabulary(); // Do special tokens manually println("public const int EOF = " + Token.EOF_TYPE + ";"); println("public const int NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD + ";"); for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s != null) { if ( s.startsWith("\"") ) { // a string literal StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s); if ( sl==null ) { antlrTool.panic("String literal " + s + " not in symbol table"); } else if ( sl.label != null ) { println("public const int " + sl.label + " = " + i + ";"); } else { String mangledName = mangleLiteral(s); if (mangledName != null) { // We were able to create a meaningful mangled token name println("public const int " + mangledName + " = " + i + ";"); // if no label specified, make the label equal to the mangled name sl.label = mangledName; } else { println("// " + s + " = " + i); } } } else if ( !s.startsWith("<") ) { println("public const int " + s + " = " + i + ";"); } } } println(""); } /** Process a string for an simple expression for use in xx/action.g * it is used to cast simple tokens/references to the right type for * the generated language. Basically called for every element in * the vector to getASTCreateString(vector V) * @param str A String. */ public String processStringForASTConstructor( String str ) { /* System.out.println("processStringForASTConstructor: str = "+str+ ", custom = "+(new Boolean(usingCustomAST)).toString()+ ", tree = "+(new Boolean((grammar instanceof TreeWalkerGrammar))).toString()+ ", parser = "+(new Boolean((grammar instanceof ParserGrammar))).toString()+ ", notDefined = "+(new Boolean((!(grammar.tokenManager.tokenDefined(str))))).toString() ); */ if( usingCustomAST && ( (grammar instanceof TreeWalkerGrammar) || (grammar instanceof ParserGrammar) ) && !(grammar.tokenManager.tokenDefined(str)) ) { //System.out.println("processStringForASTConstructor: "+str+" with cast"); return "(AST)"+str; } else { //System.out.println("processStringForASTConstructor: "+str); return str; } } /** Get a string for an expression to generate creation of an AST subtree. * @param v A Vector of String, where each element is an expression * in the target language yielding an AST node. */ public String getASTCreateString(Vector v) { if (v.size() == 0) { return ""; } StringBuffer buf = new StringBuffer(); buf.append("("+labeledElementASTType+ ") astFactory.make("); buf.append(v.elementAt(0)); for (int i = 1; i < v.size(); i++) { buf.append(", " + v.elementAt(i)); } buf.append(")"); return buf.toString(); } /** Get a string for an expression to generate creating of an AST node * @param atom The grammar node for which you are creating the node * @param str The arguments to the AST constructor */ public String getASTCreateString(GrammarAtom atom, String astCtorArgs) { String astCreateString = "astFactory.create(" + astCtorArgs + ")"; if (atom == null) return getASTCreateString(astCtorArgs); else { if ( atom.getASTNodeType() != null ) { // this Atom was instantiated from a Token that had an "AST" option - associating // it with a specific heterogeneous AST type - applied to either: // 1) it's underlying TokenSymbol (in the "tokens {} section" or, // 2) a particular token reference in the grammar // // For option (1), we simply generate a cast to hetero-AST type // For option (2), we generate a call to factory.create(Token, ASTNodeType) and cast it too TokenSymbol ts = grammar.tokenManager.getTokenSymbol(atom.getText()); if ( (ts == null) || (ts.getASTNodeType() != atom.getASTNodeType()) ) astCreateString = "(" + atom.getASTNodeType() + ") astFactory.create(" + astCtorArgs + ", \"" + atom.getASTNodeType() + "\")"; else if ( (ts != null) && (ts.getASTNodeType() != null) ) astCreateString = "(" + ts.getASTNodeType() + ") " + astCreateString; } else if ( usingCustomAST ) astCreateString = "(" + labeledElementASTType + ") " + astCreateString; } return astCreateString; } /** Returns a string expression that creates an AST node using the specified * AST constructor argument string. * Parses the first (possibly only) argument in the supplied AST ctor argument * string to obtain the token type -- ctorID. * * IF the token type is a valid token symbol AND * it has an associated AST node type AND * this is not a #[ID, "T", "ASTType"] constructor * THEN * generate a call to factory.create(ID, Text, token.ASTNodeType()) * * #[ID, "T", "ASTType"] constructors are mapped to astFactory.create(ID, "T", "ASTType") * * The supported AST constructor forms are: * #[ID] * #[ID, "text"] * #[ID, "text", ASTclassname] -- introduced in 2.7.2 * * @param astCtorArgs The arguments to the AST constructor */ public String getASTCreateString(String astCtorArgs) { // kunle: 19-Aug-2002 // This AST creation string is almost certainly[*1] a manual tree construction request. // From the manual [I couldn't read ALL of the code ;-)], this can only be one of: // 1) #[ID] -- 'astCtorArgs' contains: 'ID' (without quotes) or, // 2) #[ID, "T"] -- 'astCtorArgs' contains: 'ID, "Text"' (without single quotes) or, // kunle: 08-Dec-2002 - 2.7.2a6 // 3) #[ID, "T", "ASTTypeName"] -- 'astCtorArgs' contains: 'ID, "T", "ASTTypeName"' (without single quotes) // // [*1] In my tests, 'atom' was '== null' only for manual tree construction requests if ( astCtorArgs==null ) { astCtorArgs = ""; } String astCreateString = "astFactory.create(" + astCtorArgs + ")"; String ctorID = astCtorArgs; String ctorText = null; int commaIndex; boolean ctorIncludesCustomType = false; // Is this a #[ID, "t", "ASTType"] constructor? commaIndex = astCtorArgs.indexOf(','); if ( commaIndex != -1 ) { ctorID = astCtorArgs.substring(0, commaIndex); // the 'ID' portion of #[ID, "Text"] ctorText = astCtorArgs.substring(commaIndex+1, astCtorArgs.length()); // the 'Text' portion of #[ID, "Text"] commaIndex = ctorText.indexOf(','); if (commaIndex != -1 ) { // This is an AST creation of the form: #[ID, "Text", "ASTTypename"] // Support for this was introduced with 2.7.2a6 // create default type or (since 2.7.2) 3rd arg is classname ctorIncludesCustomType = true; } } TokenSymbol ts = grammar.tokenManager.getTokenSymbol(ctorID); if ( (null != ts) && (null != ts.getASTNodeType()) ) astCreateString = "(" + ts.getASTNodeType() + ") " + astCreateString; else if ( usingCustomAST ) astCreateString = "(" + labeledElementASTType + ") " + astCreateString; return astCreateString; } protected String getLookaheadTestExpression(Lookahead[] look, int k) { StringBuffer e = new StringBuffer(100); boolean first = true; e.append("("); for (int i = 1; i <= k; i++) { BitSet p = look[i].fset; if (!first) { e.append(") && ("); } first = false; // Syn preds can yield (epsilon) lookahead. // There is no way to predict what that token would be. Just // allow anything instead. if (look[i].containsEpsilon()) { e.append("true"); } else { e.append(getLookaheadTestTerm(i, p)); } } e.append(")"); return e.toString(); } /**Generate a lookahead test expression for an alternate. This * will be a series of tests joined by '&&' and enclosed by '()', * the number of such tests being determined by the depth of the lookahead. */ protected String getLookaheadTestExpression(Alternative alt, int maxDepth) { int depth = alt.lookaheadDepth; if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) { // if the decision is nondeterministic, do the best we can: LL(k) // any predicates that are around will be generated later. depth = grammar.maxk; } if ( maxDepth==0 ) { // empty lookahead can result from alt with sem pred // that can see end of token. E.g., A : {pred}? ('a')? ; return "( true )"; } return "(" + getLookaheadTestExpression(alt.cache,depth) + ")"; } /**Generate a depth==1 lookahead test expression given the BitSet. * This may be one of: * 1) a series of 'x==X||' tests * 2) a range test using >= && <= where possible, * 3) a bitset membership test for complex comparisons * @param k The lookahead level * @param p The lookahead set for level k */ protected String getLookaheadTestTerm(int k, BitSet p) { // Determine the name of the item to be compared String ts = lookaheadString(k); // Generate a range expression if possible int[] elems = p.toArray(); if (elementsAreRange(elems)) { return getRangeExpression(k, elems); } // Generate a bitset membership test if possible StringBuffer e; int degree = p.degree(); if ( degree == 0 ) { return "true"; } if (degree >= bitsetTestThreshold) { int bitsetIdx = markBitsetForGen(p); return getBitsetName(bitsetIdx) + ".member(" + ts + ")"; } // Otherwise, generate the long-winded series of "x==X||" tests e = new StringBuffer(); for (int i = 0; i < elems.length; i++) { // Get the compared-to item (token or character value) String cs = getValueString(elems[i]); // Generate the element comparison if ( i>0 ) e.append("||"); e.append(ts); e.append("=="); e.append(cs); } return e.toString(); } /** Return an expression for testing a contiguous renage of elements * @param k The lookahead level * @param elems The elements representing the set, usually from BitSet.toArray(). * @return String containing test expression. */ public String getRangeExpression(int k, int[] elems) { if (!elementsAreRange(elems)) { antlrTool.panic("getRangeExpression called with non-range"); } int begin = elems[0]; int end = elems[elems.length-1]; return "(" + lookaheadString(k) + " >= " + getValueString(begin) + " && " + lookaheadString(k) + " <= " + getValueString(end) + ")"; } /** getValueString: get a string representation of a token or char value * @param value The token or char value */ private String getValueString(int value) { String cs; if ( grammar instanceof LexerGrammar ) { cs = charFormatter.literalChar(value); } else { TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt(value); if ( ts == null ) { return ""+value; // return token type as string // antlrTool.panic("vocabulary for token type " + value + " is null"); } String tId = ts.getId(); if ( ts instanceof StringLiteralSymbol ) { // if string literal, use predefined label if any // if no predefined, try to mangle into LITERAL_xxx. // if can't mangle, use int value as last resort StringLiteralSymbol sl = (StringLiteralSymbol)ts; String label = sl.getLabel(); if ( label!=null ) { cs = label; } else { cs = mangleLiteral(tId); if (cs == null) { cs = String.valueOf(value); } } } else { cs = tId; } } return cs; } /**Is the lookahead for this alt empty? */ protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) { int depth = alt.lookaheadDepth; if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) { depth = grammar.maxk; } for (int i=1; i<=depth && i<=maxDepth; i++) { BitSet p = alt.cache[i].fset; if (p.degree() != 0) { return false; } } return true; } private String lookaheadString(int k) { if (grammar instanceof TreeWalkerGrammar) { return "_t.Type"; } if (grammar instanceof LexerGrammar) { if (k == 1) { return "cached_LA1"; } if (k == 2) { return "cached_LA2"; } } return "LA(" + k + ")"; } /** Mangle a string literal into a meaningful token name. This is * only possible for literals that are all characters. The resulting * mangled literal name is literalsPrefix with the text of the literal * appended. * @return A string representing the mangled literal, or null if not possible. */ private String mangleLiteral(String s) { String mangled = antlrTool.literalsPrefix; for (int i = 1; i < s.length()-1; i++) { if (!Character.isLetter(s.charAt(i)) && s.charAt(i) != '_') { return null; } mangled += s.charAt(i); } if ( antlrTool.upperCaseMangledLiterals ) { mangled = mangled.toUpperCase(); } return mangled; } /** Map an identifier to it's corresponding tree-node variable. * This is context-sensitive, depending on the rule and alternative * being generated * @param idParam The identifier name to map * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates */ public String mapTreeId(String idParam, ActionTransInfo transInfo) { // if not in an action of a rule, nothing to map. if ( currentRule==null ) return idParam; boolean in_var = false; String id = idParam; if (grammar instanceof TreeWalkerGrammar) { if ( !grammar.buildAST ) { in_var = true; } // If the id ends with "_in", then map it to the input variable else if (id.length() > 3 && id.lastIndexOf("_in") == id.length()-3) { // Strip off the "_in" id = id.substring(0, id.length()-3); in_var = true; } } // Check the rule labels. If id is a label, then the output // variable is label_AST, and the input variable is plain label. for (int i = 0; i < currentRule.labeledElements.size(); i++) { AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i); if (elt.getLabel().equals(id)) { return in_var ? id : id + "_AST"; } } // Failing that, check the id-to-variable map for the alternative. // If the id is in the map, then output variable is the name in the // map, and input variable is name_in String s = (String)treeVariableMap.get(id); if (s != null) { if (s == NONUNIQUE) { // There is more than one element with this id antlrTool.error("Ambiguous reference to AST element "+id+ " in rule "+currentRule.getRuleName()); return null; } else if (s.equals(currentRule.getRuleName())) { // a recursive call to the enclosing rule is // ambiguous with the rule itself. // if( in_var ) // System.out.println("returning null (rulename)"); antlrTool.error("Ambiguous reference to AST element "+id+ " in rule "+currentRule.getRuleName()); return null; } else { return in_var ? s + "_in" : s; } } // Failing that, check the rule name itself. Output variable // is rule_AST; input variable is rule_AST_in (treeparsers). if( id.equals(currentRule.getRuleName()) ) { String r = in_var ? id + "_AST_in" : id + "_AST"; if ( transInfo!=null ) { if ( !in_var ) { transInfo.refRuleRoot = r; } } return r; } else { // id does not map to anything -- return itself. return id; } } /** Given an element and the name of an associated AST variable, * create a mapping between the element "name" and the variable name. */ private void mapTreeVariable(AlternativeElement e, String name) { // For tree elements, defer to the root if (e instanceof TreeElement) { mapTreeVariable( ((TreeElement)e).root, name); return; } // Determine the name of the element, if any, for mapping purposes String elName = null; // Don't map labeled items if (e.getLabel() == null) { if (e instanceof TokenRefElement) { // use the token id elName = ((TokenRefElement)e).atomText; } else if (e instanceof RuleRefElement) { // use the rule name elName = ((RuleRefElement)e).targetRule; } } // Add the element to the tree variable map if it has a name if (elName != null) { if (treeVariableMap.get(elName) != null) { // Name is already in the map -- mark it as duplicate treeVariableMap.remove(elName); treeVariableMap.put(elName, NONUNIQUE); } else { treeVariableMap.put(elName, name); } } } /** Lexically process tree-specifiers in the action. * This will replace #id and #(...) with the appropriate * function calls and/or variables. */ protected String processActionForSpecialSymbols(String actionStr, int line, RuleBlock currentRule, ActionTransInfo tInfo) { if ( actionStr==null || actionStr.length()==0 ) return null; // The action trans info tells us (at the moment) whether an // assignment was done to the rule's tree root. if (grammar==null) return actionStr; // see if we have anything to do... if ((grammar.buildAST && actionStr.indexOf('#') != -1) || grammar instanceof TreeWalkerGrammar || ((grammar instanceof LexerGrammar || grammar instanceof ParserGrammar) && actionStr.indexOf('$') != -1) ) { // Create a lexer to read an action and return the translated version antlr.actions.csharp.ActionLexer lexer = new antlr.actions.csharp.ActionLexer(actionStr, currentRule, this, tInfo); lexer.setLineOffset(line); lexer.setFilename(grammar.getFilename()); lexer.setTool(antlrTool); try { lexer.mACTION(true); actionStr = lexer.getTokenObject().getText(); // System.out.println("action translated: "+actionStr); // System.out.println("trans info is "+tInfo); } catch (RecognitionException ex) { lexer.reportError(ex); return actionStr; } catch (TokenStreamException tex) { antlrTool.panic("Error reading action:"+actionStr); return actionStr; } catch (CharStreamException io) { antlrTool.panic("Error reading action:"+actionStr); return actionStr; } } return actionStr; } private void setupGrammarParameters(Grammar g) { if (g instanceof ParserGrammar || g instanceof LexerGrammar || g instanceof TreeWalkerGrammar ) { /* RK: options also have to be added to Grammar.java and for options * on the file level entries have to be defined in * DefineGrammarSymbols.java and passed around via 'globals' in antlrTool.java */ if( antlrTool.nameSpace != null ) nameSpace = new CSharpNameSpace( antlrTool.nameSpace.getName() ); //genHashLines = antlrTool.genHashLines; /* let grammar level options override filelevel ones... */ if( g.hasOption("namespace") ) { Token t = g.getOption("namespace"); if( t != null ) { nameSpace = new CSharpNameSpace(t.getText()); } } /* if( g.hasOption("genHashLines") ) { Token t = g.getOption("genHashLines"); if( t != null ) { String val = StringUtils.stripFrontBack(t.getText(),"\"","\""); genHashLines = val.equals("true"); } } */ } if (g instanceof ParserGrammar) { labeledElementASTType = "AST"; if ( g.hasOption("ASTLabelType") ) { Token tsuffix = g.getOption("ASTLabelType"); if ( tsuffix != null ) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\""); if ( suffix != null ) { usingCustomAST = true; labeledElementASTType = suffix; } } } labeledElementType = "IToken "; labeledElementInit = "null"; commonExtraArgs = ""; commonExtraParams = ""; commonLocalVars = ""; lt1Value = "LT(1)"; exceptionThrown = "RecognitionException"; throwNoViable = "throw new NoViableAltException(LT(1), getFilename());"; } else if (g instanceof LexerGrammar) { labeledElementType = "char "; labeledElementInit = "'\\0'"; commonExtraArgs = ""; commonExtraParams = "bool _createToken"; commonLocalVars = "int _ttype; IToken _token=null; int _begin=text.Length;"; lt1Value = "cached_LA1"; exceptionThrown = "RecognitionException"; throwNoViable = "throw new NoViableAltForCharException(cached_LA1, getFilename(), getLine(), getColumn());"; } else if (g instanceof TreeWalkerGrammar) { labeledElementASTType = "AST"; labeledElementType = "AST"; if ( g.hasOption("ASTLabelType") ) { Token tsuffix = g.getOption("ASTLabelType"); if ( tsuffix != null ) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\""); if ( suffix != null ) { usingCustomAST = true; labeledElementASTType = suffix; labeledElementType = suffix; } } } if ( !g.hasOption("ASTLabelType") ) { g.setOption("ASTLabelType", new Token(ANTLRTokenTypes.STRING_LITERAL,"AST")); } labeledElementInit = "null"; commonExtraArgs = "_t"; commonExtraParams = "AST _t"; commonLocalVars = ""; if (usingCustomAST) lt1Value = "(_t==ASTNULL) ? null : (" + labeledElementASTType + ")_t"; else lt1Value = "_t"; exceptionThrown = "RecognitionException"; throwNoViable = "throw new NoViableAltException(_t);"; } else { antlrTool.panic("Unknown grammar type"); } } /** This method exists so a subclass, namely VAJCodeGenerator, * can open the file in its own evil way. JavaCodeGenerator * simply opens a text file... */ public void setupOutput(String className) throws IOException { currentOutput = antlrTool.openOutputFile(className + ".cs"); } /** Helper method from Eric Smith's version of CSharpCodeGenerator.*/ private static String OctalToUnicode(String str) { // only do any conversion if the string looks like "'\003'" if ( (4 <= str.length()) && ('\'' == str.charAt(0)) && ('\\' == str.charAt(1)) && (('0' <= str.charAt(2)) && ('7' >= str.charAt(2))) && ('\'' == str.charAt(str.length()-1)) ) { // convert octal representation to decimal, then to hex Integer x = Integer.valueOf(str.substring(2, str.length()-1), 8); return "'\\x" + Integer.toHexString(x.intValue()) + "'"; } else { return str; } } /** Helper method that returns the name of the interface/class/enum type for token type constants. */ public String getTokenTypesClassName() { TokenManager tm = grammar.tokenManager; return new String(tm.getName() + TokenTypesFileSuffix); } private void declareSaveIndexVariableIfNeeded() { if (saveIndexCreateLevel == 0) { println("int _saveIndex = 0;"); saveIndexCreateLevel = blockNestingLevel; } } public String[] split(String str, String sep) { StringTokenizer st = new StringTokenizer(str, sep); int count = st.countTokens(); String[] values = new String[count]; int i = 0; while(st.hasMoreTokens()) { values[i] = st.nextToken(); i++; } return values; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CSharpNameSpace.java000077500000000000000000000024641161462365500247270ustar00rootroot00000000000000package antlr; /** * ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * Container for a C++ namespace specification. Namespaces can be * nested, so this contains a vector of all the nested names. * * @author David Wagner (JPL/Caltech) 8-12-00 * * $Id:$ */ // // ANTLR C# Code Generator by Micheal Jordan // Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com // Anthony Oguntimehin // // With many thanks to Eric V. Smith from the ANTLR list. // // HISTORY: // // 17-May-2002 kunle Original version // import java.util.Vector; import java.util.Enumeration; import java.io.PrintWriter; import java.util.StringTokenizer; public class CSharpNameSpace extends NameSpace { public CSharpNameSpace(String name) { super(name); } /** * Method to generate the required CSharp namespace declarations */ void emitDeclarations(PrintWriter out) { out.println("namespace " + getName() ); out.println("{"); } /** * Method to generate the required CSharp namespace closures */ void emitClosures(PrintWriter out) { out.println("}"); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CharBuffer.java000066400000000000000000000033271161462365500237750ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CharBuffer.java#1 $ */ /**A Stream of characters fed to the lexer from a InputStream that can * be rewound via mark()/rewind() methods. *

* A dynamic array is used to buffer up all the input characters. Normally, * "k" characters are stored in the buffer. More characters may be stored during * guess mode (testing syntactic predicate), or when LT(i>k) is referenced. * Consumption of characters is deferred. In other words, reading the next * character is not done by conume(), but deferred until needed by LA or LT. *

* * @see antlr.CharQueue */ import java.io.Reader; // SAS: changed to properly read text files import java.io.IOException; // SAS: Move most functionality into InputBuffer -- just the file-specific // stuff is in here public class CharBuffer extends InputBuffer { // char source transient Reader input; /** Create a character buffer */ public CharBuffer(Reader input_) { // SAS: for proper text i/o super(); input = input_; } /** Ensure that the character buffer is sufficiently full */ public void fill(int amount) throws CharStreamException { try { syncConsume(); // Fill the buffer sufficiently to hold needed characters while (queue.nbrEntries < amount + markerOffset) { // Append the next character queue.append((char)input.read()); } } catch (IOException io) { throw new CharStreamIOException(io); } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CharFormatter.java000066400000000000000000000010571161462365500245250ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CharFormatter.java#1 $ */ /** Interface used by BitSet to format elements of the set when * converting to string */ public interface CharFormatter { public String escapeChar(int c, boolean forCharLiteral); public String escapeString(String s); public String literalChar(int c); public String literalString(String s); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CharLiteralElement.java000066400000000000000000000014431161462365500254670ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CharLiteralElement.java#1 $ */ class CharLiteralElement extends GrammarAtom { public CharLiteralElement(LexerGrammar g, Token t, boolean inverted, int autoGenType) { super(g, t, AUTO_GEN_NONE); tokenType = ANTLRLexer.tokenTypeForCharLiteral(t.getText()); g.charVocabulary.add(tokenType); line = t.getLine(); not = inverted; this.autoGenType = autoGenType; } public void generate() { grammar.generator.gen(this); } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CharQueue.java000066400000000000000000000053341161462365500236500ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CharQueue.java#1 $ */ /** A circular buffer object used by CharBuffer */ public class CharQueue { /** Physical circular buffer of tokens */ protected char[] buffer; /** buffer.length-1 for quick modulos */ private int sizeLessOne; /** physical index of front token */ private int offset; /** number of tokens in the queue */ protected int nbrEntries; public CharQueue(int minSize) { // Find first power of 2 >= to requested size int size; if ( minSize<0 ) { init(16); // pick some value for them return; } // check for overflow if ( minSize>=(Integer.MAX_VALUE/2) ) { init(Integer.MAX_VALUE); // wow that's big. return; } for (size = 2; size < minSize; size *= 2) { } init(size); } /** Add token to end of the queue * @param tok The token to add */ public final void append(char tok) { if (nbrEntries == buffer.length) { expand(); } buffer[(offset + nbrEntries) & sizeLessOne] = tok; nbrEntries++; } /** Fetch a token from the queue by index * @param idx The index of the token to fetch, where zero is the token at the front of the queue */ public final char elementAt(int idx) { return buffer[(offset + idx) & sizeLessOne]; } /** Expand the token buffer by doubling its capacity */ private final void expand() { char[] newBuffer = new char[buffer.length * 2]; // Copy the contents to the new buffer // Note that this will store the first logical item in the // first physical array element. for (int i = 0; i < buffer.length; i++) { newBuffer[i] = elementAt(i); } // Re-initialize with new contents, keep old nbrEntries buffer = newBuffer; sizeLessOne = buffer.length - 1; offset = 0; } /** Initialize the queue. * @param size The initial size of the queue */ public void init(int size) { // Allocate buffer buffer = new char[size]; // Other initialization sizeLessOne = size - 1; offset = 0; nbrEntries = 0; } /** Clear the queue. Leaving the previous buffer alone. */ public final void reset() { offset = 0; nbrEntries = 0; } /** Remove char from front of queue */ public final void removeFirst() { offset = (offset + 1) & sizeLessOne; nbrEntries--; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CharRangeElement.java000066400000000000000000000027071161462365500251330ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CharRangeElement.java#1 $ */ class CharRangeElement extends AlternativeElement { String label; protected char begin = 0; protected char end = 0; protected String beginText; protected String endText; public CharRangeElement(LexerGrammar g, Token t1, Token t2, int autoGenType) { super(g); begin = (char)ANTLRLexer.tokenTypeForCharLiteral(t1.getText()); beginText = t1.getText(); end = (char)ANTLRLexer.tokenTypeForCharLiteral(t2.getText()); endText = t2.getText(); line = t1.getLine(); // track which characters are referenced in the grammar for (int i = begin; i <= end; i++) { g.charVocabulary.add(i); } this.autoGenType = autoGenType; } public void generate() { grammar.generator.gen(this); } public String getLabel() { return label; } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public void setLabel(String label_) { label = label_; } public String toString() { if (label != null) return " " + label + ":" + beginText + ".." + endText; else return " " + beginText + ".." + endText; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CharScanner.java000066400000000000000000000274451161462365500241640ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CharScanner.java#1 $ */ import java.util.Hashtable; import antlr.collections.impl.BitSet; import java.io.IOException; public abstract class CharScanner implements TokenStream { static final char NO_CHAR = 0; public static final char EOF_CHAR = (char)-1; protected ANTLRStringBuffer text; // text of current token protected boolean saveConsumedInput = true; // does consume() save characters? protected Class tokenObjectClass; // what kind of tokens to create? protected boolean caseSensitive = true; protected boolean caseSensitiveLiterals = true; protected Hashtable literals; // set by subclass /** Tab chars are handled by tab() according to this value; override * method to do anything weird with tabs. */ protected int tabsize = 8; protected Token _returnToken = null; // used to return tokens w/o using return val. // Hash string used so we don't new one every time to check literals table protected ANTLRHashString hashString; protected LexerSharedInputState inputState; /** Used during filter mode to indicate that path is desired. * A subsequent scan error will report an error as usual if * acceptPath=true; */ protected boolean commitToPath = false; /** Used to keep track of indentdepth for traceIn/Out */ protected int traceDepth = 0; public CharScanner() { text = new ANTLRStringBuffer(); hashString = new ANTLRHashString(this); setTokenObjectClass("antlr.CommonToken"); } public CharScanner(InputBuffer cb) { // SAS: use generic buffer this(); inputState = new LexerSharedInputState(cb); } public CharScanner(LexerSharedInputState sharedState) { this(); inputState = sharedState; } public void append(char c) { if (saveConsumedInput) { text.append(c); } } public void append(String s) { if (saveConsumedInput) { text.append(s); } } public void commit() { inputState.input.commit(); } public void consume() throws CharStreamException { if (inputState.guessing == 0) { char c = LA(1); if (caseSensitive) { append(c); } else { // use input.LA(), not LA(), to get original case // CharScanner.LA() would toLower it. append(inputState.input.LA(1)); } if (c == '\t') { tab(); } else { inputState.column++; } } inputState.input.consume(); } /** Consume chars until one matches the given char */ public void consumeUntil(int c) throws CharStreamException { while (LA(1) != EOF_CHAR && LA(1) != c) { consume(); } } /** Consume chars until one matches the given set */ public void consumeUntil(BitSet set) throws CharStreamException { while (LA(1) != EOF_CHAR && !set.member(LA(1))) { consume(); } } public boolean getCaseSensitive() { return caseSensitive; } public final boolean getCaseSensitiveLiterals() { return caseSensitiveLiterals; } public int getColumn() { return inputState.column; } public void setColumn(int c) { inputState.column = c; } public boolean getCommitToPath() { return commitToPath; } public String getFilename() { return inputState.filename; } public InputBuffer getInputBuffer() { return inputState.input; } public LexerSharedInputState getInputState() { return inputState; } public void setInputState(LexerSharedInputState state) { inputState = state; } public int getLine() { return inputState.line; } /** return a copy of the current text buffer */ public String getText() { return text.toString(); } public Token getTokenObject() { return _returnToken; } public char LA(int i) throws CharStreamException { if (caseSensitive) { return inputState.input.LA(i); } else { return toLower(inputState.input.LA(i)); } } protected Token makeToken(int t) { try { Token tok = (Token)tokenObjectClass.newInstance(); tok.setType(t); tok.setColumn(inputState.tokenStartColumn); tok.setLine(inputState.tokenStartLine); // tracking real start line now: tok.setLine(inputState.line); return tok; } catch (InstantiationException ie) { panic("can't instantiate token: " + tokenObjectClass); } catch (IllegalAccessException iae) { panic("Token class is not accessible" + tokenObjectClass); } return Token.badToken; } public int mark() { return inputState.input.mark(); } public void match(char c) throws MismatchedCharException, CharStreamException { if (LA(1) != c) { throw new MismatchedCharException(LA(1), c, false, this); } consume(); } public void match(BitSet b) throws MismatchedCharException, CharStreamException { if (!b.member(LA(1))) { throw new MismatchedCharException(LA(1), b, false, this); } else { consume(); } } public void match(String s) throws MismatchedCharException, CharStreamException { int len = s.length(); for (int i = 0; i < len; i++) { if (LA(1) != s.charAt(i)) { throw new MismatchedCharException(LA(1), s.charAt(i), false, this); } consume(); } } public void matchNot(char c) throws MismatchedCharException, CharStreamException { if (LA(1) == c) { throw new MismatchedCharException(LA(1), c, true, this); } consume(); } public void matchRange(char c1, char c2) throws MismatchedCharException, CharStreamException { if (LA(1) < c1 || LA(1) > c2) throw new MismatchedCharException(LA(1), c1, c2, false, this); consume(); } public void newline() { inputState.line++; inputState.column = 1; } /** advance the current column number by an appropriate amount * according to tab size. This method is called from consume(). */ public void tab() { int c = getColumn(); int nc = ( ((c-1)/tabsize) + 1) * tabsize + 1; // calculate tab stop setColumn( nc ); } public void setTabSize( int size ) { tabsize = size; } public int getTabSize() { return tabsize; } /** @see #panic(String) */ public void panic() { System.err.println("CharScanner: panic"); System.exit(1); } /** This method is executed by ANTLR internally when it detected an illegal * state that cannot be recovered from. * The default implementation of this method calls * {@link java.lang.System.exit(int)} and writes directly to * {@link java.lang.System.err)} , which is usually not appropriate when * a translator is embedded into a larger application. It is highly * recommended that this method be overridden to handle the error in a * way appropriate for your application (e.g. throw an unchecked * exception). */ public void panic(String s) { System.err.println("CharScanner; panic: " + s); System.exit(1); } /** Parser error-reporting function can be overridden in subclass */ public void reportError(RecognitionException ex) { System.err.println(ex); } /** Parser error-reporting function can be overridden in subclass */ public void reportError(String s) { if (getFilename() == null) { System.err.println("error: " + s); } else { System.err.println(getFilename() + ": error: " + s); } } /** Parser warning-reporting function can be overridden in subclass */ public void reportWarning(String s) { if (getFilename() == null) { System.err.println("warning: " + s); } else { System.err.println(getFilename() + ": warning: " + s); } } public void resetText() { text.setLength(0); inputState.tokenStartColumn = inputState.column; inputState.tokenStartLine = inputState.line; } public void rewind(int pos) { inputState.input.rewind(pos); // RK: should not be here, it is messing up column calculation // setColumn(inputState.tokenStartColumn); } public void setCaseSensitive(boolean t) { caseSensitive = t; } public void setCommitToPath(boolean commit) { commitToPath = commit; } public void setFilename(String f) { inputState.filename = f; } public void setLine(int line) { inputState.line = line; } public void setText(String s) { resetText(); text.append(s); } public void setTokenObjectClass(String cl) { try { tokenObjectClass = Class.forName(cl); } catch (ClassNotFoundException ce) { panic("ClassNotFoundException: " + cl); } } // Test the token text against the literals table // Override this method to perform a different literals test public int testLiteralsTable(int ttype) { hashString.setBuffer(text.getBuffer(), text.length()); Integer literalsIndex = (Integer)literals.get(hashString); if (literalsIndex != null) { ttype = literalsIndex.intValue(); } return ttype; } /** Test the text passed in against the literals table * Override this method to perform a different literals test * This is used primarily when you want to test a portion of * a token. */ public int testLiteralsTable(String text, int ttype) { ANTLRHashString s = new ANTLRHashString(text, this); Integer literalsIndex = (Integer)literals.get(s); if (literalsIndex != null) { ttype = literalsIndex.intValue(); } return ttype; } // Override this method to get more specific case handling public char toLower(char c) { return Character.toLowerCase(c); } public void traceIndent() { for (int i = 0; i < traceDepth; i++) System.out.print(" "); } public void traceIn(String rname) throws CharStreamException { traceDepth += 1; traceIndent(); System.out.println("> lexer " + rname + "; c==" + LA(1)); } public void traceOut(String rname) throws CharStreamException { traceIndent(); System.out.println("< lexer " + rname + "; c==" + LA(1)); traceDepth -= 1; } /** This method is called by YourLexer.nextToken() when the lexer has * hit EOF condition. EOF is NOT a character. * This method is not called if EOF is reached during * syntactic predicate evaluation or during evaluation * of normal lexical rules, which presumably would be * an IOException. This traps the "normal" EOF condition. * * uponEOF() is called after the complete evaluation of * the previous token and only if your parser asks * for another token beyond that last non-EOF token. * * You might want to throw token or char stream exceptions * like: "Heh, premature eof" or a retry stream exception * ("I found the end of this file, go back to referencing file"). */ public void uponEOF() throws TokenStreamException, CharStreamException { } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CharStreamException.java000066400000000000000000000010451161462365500256710ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CharStreamException.java#1 $ */ /** * Anything that goes wrong while generating a stream of characters */ public class CharStreamException extends ANTLRException { /** * CharStreamException constructor comment. * @param s java.lang.String */ public CharStreamException(String s) { super(s); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CharStreamIOException.java000066400000000000000000000010361161462365500261210ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CharStreamIOException.java#1 $ */ import java.io.IOException; /** * Wrap an IOException in a CharStreamException */ public class CharStreamIOException extends CharStreamException { public IOException io; public CharStreamIOException(IOException io) { super(io.getMessage()); this.io = io; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CodeGenerator.java000066400000000000000000000575651161462365500245240ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CodeGenerator.java#1 $ */ import java.io.PrintWriter; import java.io.IOException; import java.io.FileWriter; import antlr.collections.impl.Vector; import antlr.collections.impl.BitSet; /**A generic ANTLR code generator. All code generators * Derive from this class. * *

* A CodeGenerator knows about a Grammar data structure and * a grammar analyzer. The Grammar is walked to generate the * appropriate code for both a parser and lexer (if present). * This interface may change slightly so that the lexer is * itself living inside of a Grammar object (in which case, * this class generates only one recognizer). The main method * to call is gen(), which initiates all code gen. * *

* The interaction of the code generator with the analyzer is * simple: each subrule block calls deterministic() before generating * code for the block. Method deterministic() sets lookahead caches * in each Alternative object. Technically, a code generator * doesn't need the grammar analyzer if all lookahead analysis * is done at runtime, but this would result in a slower parser. * *

* This class provides a set of support utilities to handle argument * list parsing and so on. * * @author Terence Parr, John Lilley * @version 2.00a * @see antlr.JavaCodeGenerator * @see antlr.DiagnosticCodeGenerator * @see antlr.LLkAnalyzer * @see antlr.Grammar * @see antlr.AlternativeElement * @see antlr.Lookahead */ public abstract class CodeGenerator { protected antlr.Tool antlrTool; /** Current tab indentation for code output */ protected int tabs = 0; /** Current output Stream */ transient protected PrintWriter currentOutput; // SAS: for proper text i/o /** The grammar for which we generate code */ protected Grammar grammar = null; /** List of all bitsets that must be dumped. These are Vectors of BitSet. */ protected Vector bitsetsUsed; /** The grammar behavior */ protected DefineGrammarSymbols behavior; /** The LLk analyzer */ protected LLkGrammarAnalyzer analyzer; /** Object used to format characters in the target language. * subclass must initialize this to the language-specific formatter */ protected CharFormatter charFormatter; /** Use option "codeGenDebug" to generate debugging output */ protected boolean DEBUG_CODE_GENERATOR = false; /** Default values for code-generation thresholds */ protected static final int DEFAULT_MAKE_SWITCH_THRESHOLD = 2; protected static final int DEFAULT_BITSET_TEST_THRESHOLD = 4; /** If there are more than 8 long words to init in a bitset, * try to optimize it; e.g., detect runs of -1L and 0L. */ protected static final int BITSET_OPTIMIZE_INIT_THRESHOLD = 8; /** This is a hint for the language-specific code generator. * A switch() or language-specific equivalent will be generated instead * of a series of if/else statements for blocks with number of alternates * greater than or equal to this number of non-predicated LL(1) alternates. * This is modified by the grammar option "codeGenMakeSwitchThreshold" */ protected int makeSwitchThreshold = DEFAULT_MAKE_SWITCH_THRESHOLD; /** This is a hint for the language-specific code generator. * A bitset membership test will be generated instead of an * ORed series of LA(k) comparisions for lookahead sets with * degree greater than or equal to this value. * This is modified by the grammar option "codeGenBitsetTestThreshold" */ protected int bitsetTestThreshold = DEFAULT_BITSET_TEST_THRESHOLD; private static boolean OLD_ACTION_TRANSLATOR = true; public static String TokenTypesFileSuffix = "TokenTypes"; public static String TokenTypesFileExt = ".txt"; /** Construct code generator base class */ public CodeGenerator() { } /** Output a String to the currentOutput stream. * Ignored if string is null. * @param s The string to output */ protected void _print(String s) { if (s != null) { currentOutput.print(s); } } /** Print an action without leading tabs, attempting to * preserve the current indentation level for multi-line actions * Ignored if string is null. * @param s The action string to output */ protected void _printAction(String s) { if (s == null) { return; } // Skip leading newlines, tabs and spaces int start = 0; while (start < s.length() && Character.isSpaceChar(s.charAt(start))) { start++; } // Skip leading newlines, tabs and spaces int end = s.length() - 1; while (end > start && Character.isSpaceChar(s.charAt(end))) { end--; } char c = 0; for (int i = start; i <= end;) { c = s.charAt(i); i++; boolean newline = false; switch (c) { case '\n': newline = true; break; case '\r': if (i <= end && s.charAt(i) == '\n') { i++; } newline = true; break; default: currentOutput.print(c); break; } if (newline) { currentOutput.println(); printTabs(); // Absorb leading whitespace while (i <= end && Character.isSpaceChar(s.charAt(i))) { i++; } newline = false; } } currentOutput.println(); } /** Output a String followed by newline, to the currentOutput stream. * Ignored if string is null. * @param s The string to output */ protected void _println(String s) { if (s != null) { currentOutput.println(s); } } /** Test if a set element array represents a contiguous range. * @param elems The array of elements representing the set, usually from BitSet.toArray(). * @return true if the elements are a contiguous range (with two or more). */ public static boolean elementsAreRange(int[] elems) { if (elems.length == 0) { return false; } int begin = elems[0]; int end = elems[elems.length - 1]; if (elems.length <= 2) { // Not enough elements for a range expression return false; } if (end - begin + 1 > elems.length) { // The set does not represent a contiguous range return false; } int v = begin + 1; for (int i = 1; i < elems.length - 1; i++) { if (v != elems[i]) { // The set does not represent a contiguous range return false; } v++; } return true; } /** Get the identifier portion of an argument-action token. * The ID of an action is assumed to be a trailing identifier. * Specific code-generators may want to override this * if the language has unusual declaration syntax. * @param t The action token * @return A string containing the text of the identifier */ protected String extractIdOfAction(Token t) { return extractIdOfAction(t.getText(), t.getLine(), t.getColumn()); } /** Get the identifier portion of an argument-action. * The ID of an action is assumed to be a trailing identifier. * Specific code-generators may want to override this * if the language has unusual declaration syntax. * @param s The action text * @param line Line used for error reporting. * @param column Line used for error reporting. * @return A string containing the text of the identifier */ protected String extractIdOfAction(String s, int line, int column) { s = removeAssignmentFromDeclaration(s); // Search back from the end for a non alphanumeric. That marks the // beginning of the identifier for (int i = s.length() - 2; i >= 0; i--) { // TODO: make this work for language-independent identifiers? if (!Character.isLetterOrDigit(s.charAt(i)) && s.charAt(i) != '_') { // Found end of type part return s.substring(i + 1); } } // Something is bogus, but we cannot parse the language-specific // actions any better. The compiler will have to catch the problem. antlrTool.warning("Ill-formed action", grammar.getFilename(), line, column); return ""; } /** Get the type string out of an argument-action token. * The type of an action is assumed to precede a trailing identifier * Specific code-generators may want to override this * if the language has unusual declaration syntax. * @param t The action token * @return A string containing the text of the type */ protected String extractTypeOfAction(Token t) { return extractTypeOfAction(t.getText(), t.getLine(), t.getColumn()); } /** Get the type portion of an argument-action. * The type of an action is assumed to precede a trailing identifier * Specific code-generators may want to override this * if the language has unusual declaration syntax. * @param s The action text * @param line Line used for error reporting. * @return A string containing the text of the type */ protected String extractTypeOfAction(String s, int line, int column) { s = removeAssignmentFromDeclaration(s); // Search back from the end for a non alphanumeric. That marks the // beginning of the identifier for (int i = s.length() - 2; i >= 0; i--) { // TODO: make this work for language-independent identifiers? if (!Character.isLetterOrDigit(s.charAt(i)) && s.charAt(i) != '_') { // Found end of type part return s.substring(0, i + 1); } } // Something is bogus, but we cannot parse the language-specific // actions any better. The compiler will have to catch the problem. antlrTool.warning("Ill-formed action", grammar.getFilename(), line, column); return ""; } /** Generate the code for all grammars */ public abstract void gen(); /** Generate code for the given grammar element. * @param action The {...} action to generate */ public abstract void gen(ActionElement action); /** Generate code for the given grammar element. * @param blk The "x|y|z|..." block to generate */ public abstract void gen(AlternativeBlock blk); /** Generate code for the given grammar element. * @param end The block-end element to generate. Block-end * elements are synthesized by the grammar parser to represent * the end of a block. */ public abstract void gen(BlockEndElement end); /** Generate code for the given grammar element. * @param atom The character literal reference to generate */ public abstract void gen(CharLiteralElement atom); /** Generate code for the given grammar element. * @param r The character-range reference to generate */ public abstract void gen(CharRangeElement r); /** Generate the code for a parser */ public abstract void gen(LexerGrammar g) throws IOException; /** Generate code for the given grammar element. * @param blk The (...)+ block to generate */ public abstract void gen(OneOrMoreBlock blk); /** Generate the code for a parser */ public abstract void gen(ParserGrammar g) throws IOException; /** Generate code for the given grammar element. * @param rr The rule-reference to generate */ public abstract void gen(RuleRefElement rr); /** Generate code for the given grammar element. * @param atom The string-literal reference to generate */ public abstract void gen(StringLiteralElement atom); /** Generate code for the given grammar element. * @param r The token-range reference to generate */ public abstract void gen(TokenRangeElement r); /** Generate code for the given grammar element. * @param atom The token-reference to generate */ public abstract void gen(TokenRefElement atom); /** Generate code for the given grammar element. * @param blk The tree to generate code for. */ public abstract void gen(TreeElement t); /** Generate the code for a parser */ public abstract void gen(TreeWalkerGrammar g) throws IOException; /** Generate code for the given grammar element. * @param wc The wildcard element to generate */ public abstract void gen(WildcardElement wc); /** Generate code for the given grammar element. * @param blk The (...)* block to generate */ public abstract void gen(ZeroOrMoreBlock blk); /** Generate the token types as a text file for persistence across shared lexer/parser */ protected void genTokenInterchange(TokenManager tm) throws IOException { // Open the token output Java file and set the currentOutput stream String fName = tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt; currentOutput = antlrTool.openOutputFile(fName); println("// $ANTLR " + antlrTool.version + ": " + antlrTool.fileMinusPath(antlrTool.grammarFile) + " -> " + fName + "$"); tabs = 0; // Header println(tm.getName() + " // output token vocab name"); // Generate a definition for each token type Vector v = tm.getVocabulary(); for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) { String s = (String)v.elementAt(i); if (DEBUG_CODE_GENERATOR) { System.out.println("gen persistence file entry for: " + s); } if (s != null && !s.startsWith("<")) { // if literal, find label if (s.startsWith("\"")) { StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s); if (sl != null && sl.label != null) { print(sl.label + "="); } println(s + "=" + i); } else { print(s); // check for a paraphrase TokenSymbol ts = (TokenSymbol)tm.getTokenSymbol(s); if (ts == null) { antlrTool.warning("undefined token symbol: " + s); } else { if (ts.getParaphrase() != null) { print("(" + ts.getParaphrase() + ")"); } } println("=" + i); } } } // Close the tokens output file currentOutput.close(); currentOutput = null; } /** Process a string for an simple expression for use in xx/action.g * it is used to cast simple tokens/references to the right type for * the generated language. * @param str A String. */ public String processStringForASTConstructor(String str) { return str; } /** Get a string for an expression to generate creation of an AST subtree. * @param v A Vector of String, where each element is an expression in the target language yielding an AST node. */ public abstract String getASTCreateString(Vector v); /** Get a string for an expression to generate creating of an AST node * @param str The text of the arguments to the AST construction */ public abstract String getASTCreateString(GrammarAtom atom, String str); /** Given the index of a bitset in the bitset list, generate a unique name. * Specific code-generators may want to override this * if the language does not allow '_' or numerals in identifiers. * @param index The index of the bitset in the bitset list. */ protected String getBitsetName(int index) { return "_tokenSet_" + index; } public static String encodeLexerRuleName(String id) { return "m" + id; } public static String decodeLexerRuleName(String id) { if ( id==null ) { return null; } return id.substring(1,id.length()); } /** Map an identifier to it's corresponding tree-node variable. * This is context-sensitive, depending on the rule and alternative * being generated * @param id The identifier name to map * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned. * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates */ public abstract String mapTreeId(String id, ActionTransInfo tInfo); /** Add a bitset to the list of bitsets to be generated. * if the bitset is already in the list, ignore the request. * Always adds the bitset to the end of the list, so the * caller can rely on the position of bitsets in the list. * The returned position can be used to format the bitset * name, since it is invariant. * @param p Bit set to mark for code generation * @param forParser true if the bitset is used for the parser, false for the lexer * @return The position of the bitset in the list. */ protected int markBitsetForGen(BitSet p) { // Is the bitset (or an identical one) already marked for gen? for (int i = 0; i < bitsetsUsed.size(); i++) { BitSet set = (BitSet)bitsetsUsed.elementAt(i); if (p.equals(set)) { // Use the identical one already stored return i; } } // Add the new bitset bitsetsUsed.appendElement(p.clone()); return bitsetsUsed.size() - 1; } /** Output tab indent followed by a String, to the currentOutput stream. * Ignored if string is null. * @param s The string to output. */ protected void print(String s) { if (s != null) { printTabs(); currentOutput.print(s); } } /** Print an action with leading tabs, attempting to * preserve the current indentation level for multi-line actions * Ignored if string is null. * @param s The action string to output */ protected void printAction(String s) { if (s != null) { printTabs(); _printAction(s); } } /** Output tab indent followed by a String followed by newline, * to the currentOutput stream. Ignored if string is null. * @param s The string to output */ protected void println(String s) { if (s != null) { printTabs(); currentOutput.println(s); } } /** Output the current tab indentation. This outputs the number of tabs * indicated by the "tabs" variable to the currentOutput stream. */ protected void printTabs() { for (int i = 1; i <= tabs; i++) { currentOutput.print("\t"); } } /** Lexically process $ and # references within the action. * This will replace #id and #(...) with the appropriate * function calls and/or variables etc... */ protected abstract String processActionForSpecialSymbols(String actionStr, int line, RuleBlock currentRule, ActionTransInfo tInfo); public String getFOLLOWBitSet(String ruleName, int k) { GrammarSymbol rs = grammar.getSymbol(ruleName); if ( !(rs instanceof RuleSymbol) ) { return null; } RuleBlock blk = ((RuleSymbol)rs).getBlock(); Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(k, blk.endNode); String followSetName = getBitsetName(markBitsetForGen(follow.fset)); return followSetName; } public String getFIRSTBitSet(String ruleName, int k) { GrammarSymbol rs = grammar.getSymbol(ruleName); if ( !(rs instanceof RuleSymbol) ) { return null; } RuleBlock blk = ((RuleSymbol)rs).getBlock(); Lookahead first = grammar.theLLkAnalyzer.look(k, blk); String firstSetName = getBitsetName(markBitsetForGen(first.fset)); return firstSetName; } /** * Remove the assignment portion of a declaration, if any. * @param d the declaration * @return the declaration without any assignment portion */ protected String removeAssignmentFromDeclaration(String d) { // If d contains an equal sign, then it's a declaration // with an initialization. Strip off the initialization part. if (d.indexOf('=') >= 0) d = d.substring(0, d.indexOf('=')).trim(); return d; } /** Set all fields back like one just created */ private void reset() { tabs = 0; // Allocate list of bitsets tagged for code generation bitsetsUsed = new Vector(); currentOutput = null; grammar = null; DEBUG_CODE_GENERATOR = false; makeSwitchThreshold = DEFAULT_MAKE_SWITCH_THRESHOLD; bitsetTestThreshold = DEFAULT_BITSET_TEST_THRESHOLD; } public static String reverseLexerRuleName(String id) { return id.substring(1, id.length()); } public void setAnalyzer(LLkGrammarAnalyzer analyzer_) { analyzer = analyzer_; } public void setBehavior(DefineGrammarSymbols behavior_) { behavior = behavior_; } /** Set a grammar for the code generator to use */ protected void setGrammar(Grammar g) { reset(); grammar = g; // Lookup make-switch threshold in the grammar generic options if (grammar.hasOption("codeGenMakeSwitchThreshold")) { try { makeSwitchThreshold = grammar.getIntegerOption("codeGenMakeSwitchThreshold"); //System.out.println("setting codeGenMakeSwitchThreshold to " + makeSwitchThreshold); } catch (NumberFormatException e) { Token tok = grammar.getOption("codeGenMakeSwitchThreshold"); antlrTool.error( "option 'codeGenMakeSwitchThreshold' must be an integer", grammar.getClassName(), tok.getLine(), tok.getColumn() ); } } // Lookup bitset-test threshold in the grammar generic options if (grammar.hasOption("codeGenBitsetTestThreshold")) { try { bitsetTestThreshold = grammar.getIntegerOption("codeGenBitsetTestThreshold"); //System.out.println("setting codeGenBitsetTestThreshold to " + bitsetTestThreshold); } catch (NumberFormatException e) { Token tok = grammar.getOption("codeGenBitsetTestThreshold"); antlrTool.error( "option 'codeGenBitsetTestThreshold' must be an integer", grammar.getClassName(), tok.getLine(), tok.getColumn() ); } } // Lookup debug code-gen in the grammar generic options if (grammar.hasOption("codeGenDebug")) { Token t = grammar.getOption("codeGenDebug"); if (t.getText().equals("true")) { //System.out.println("setting code-generation debug ON"); DEBUG_CODE_GENERATOR = true; } else if (t.getText().equals("false")) { //System.out.println("setting code-generation debug OFF"); DEBUG_CODE_GENERATOR = false; } else { antlrTool.error("option 'codeGenDebug' must be true or false", grammar.getClassName(), t.getLine(), t.getColumn()); } } } public void setTool(Tool tool) { antlrTool = tool; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CommonAST.java000066400000000000000000000023321161462365500235610ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CommonAST.java#1 $ */ import antlr.collections.AST; /** Common AST node implementation */ public class CommonAST extends BaseAST { int ttype = Token.INVALID_TYPE; String text; /** Get the token text for this node */ public String getText() { return text; } /** Get the token type for this node */ public int getType() { return ttype; } public void initialize(int t, String txt) { setType(t); setText(txt); } public void initialize(AST t) { setText(t.getText()); setType(t.getType()); } public CommonAST() { } public CommonAST(Token tok) { initialize(tok); } public void initialize(Token tok) { setText(tok.getText()); setType(tok.getType()); } /** Set the token text for this node */ public void setText(String text_) { text = text_; } /** Set the token type for this node */ public void setType(int ttype_) { ttype = ttype_; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CommonASTWithHiddenTokens.java000066400000000000000000000024331161462365500267170ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CommonASTWithHiddenTokens.java#1 $ */ import antlr.collections.AST; /** A CommonAST whose initialization copies hidden token * information from the Token used to create a node. */ public class CommonASTWithHiddenTokens extends CommonAST { protected CommonHiddenStreamToken hiddenBefore, hiddenAfter; // references to hidden tokens public CommonASTWithHiddenTokens() { super(); } public CommonASTWithHiddenTokens(Token tok) { super(tok); } public CommonHiddenStreamToken getHiddenAfter() { return hiddenAfter; } public CommonHiddenStreamToken getHiddenBefore() { return hiddenBefore; } public void initialize(AST t) { hiddenBefore = ((CommonASTWithHiddenTokens)t).getHiddenBefore(); hiddenAfter = ((CommonASTWithHiddenTokens)t).getHiddenAfter(); super.initialize(t); } public void initialize(Token tok) { CommonHiddenStreamToken t = (CommonHiddenStreamToken)tok; super.initialize(t); hiddenBefore = t.getHiddenBefore(); hiddenAfter = t.getHiddenAfter(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CommonHiddenStreamToken.java000066400000000000000000000017621161462365500265100ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CommonHiddenStreamToken.java#1 $ */ public class CommonHiddenStreamToken extends CommonToken { protected CommonHiddenStreamToken hiddenBefore; protected CommonHiddenStreamToken hiddenAfter; public CommonHiddenStreamToken() { super(); } public CommonHiddenStreamToken(int t, String txt) { super(t, txt); } public CommonHiddenStreamToken(String s) { super(s); } public CommonHiddenStreamToken getHiddenAfter() { return hiddenAfter; } public CommonHiddenStreamToken getHiddenBefore() { return hiddenBefore; } protected void setHiddenAfter(CommonHiddenStreamToken t) { hiddenAfter = t; } protected void setHiddenBefore(CommonHiddenStreamToken t) { hiddenBefore = t; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CommonToken.java000066400000000000000000000021401161462365500242070ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CommonToken.java#1 $ */ public class CommonToken extends Token { // most tokens will want line and text information protected int line; protected String text = null; protected int col; public CommonToken() { } public CommonToken(int t, String txt) { type = t; setText(txt); } public CommonToken(String s) { text = s; } public int getLine() { return line; } public String getText() { return text; } public void setLine(int l) { line = l; } public void setText(String s) { text = s; } public String toString() { return "[\"" + getText() + "\",<" + type + ">,line=" + line + ",col=" + col + "]"; } /** Return token's start column */ public int getColumn() { return col; } public void setColumn(int c) { col = c; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CppBlockFinishingInfo.java000066400000000000000000000017611161462365500261360ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CppBlockFinishingInfo.java#1 $ */ // C++ code generator by Pete Wells: pete@yamuna.demon.co.uk class CppBlockFinishingInfo { String postscript; // what to generate to terminate block boolean generatedSwitch;// did block finish with "default:" of switch? boolean generatedAnIf; /** When generating an if or switch, end-of-token lookahead sets * will become the else or default clause, don't generate an * error clause in this case. */ boolean needAnErrorClause; public CppBlockFinishingInfo() { postscript=null; generatedSwitch=false; needAnErrorClause = true; } public CppBlockFinishingInfo(String ps, boolean genS, boolean generatedAnIf, boolean n) { postscript = ps; generatedSwitch = genS; this.generatedAnIf = generatedAnIf; needAnErrorClause = n; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CppCharFormatter.java000066400000000000000000000053331161462365500251710ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CppCharFormatter.java#1 $ */ // C++ code generator by Pete Wells: pete@yamuna.demon.co.uk class CppCharFormatter implements CharFormatter { /** Given a character value, return a string representing the character * that can be embedded inside a string literal or character literal * This works for Java/C/C++ code-generation and languages with compatible * special-character-escapment. * * Used internally in CppCharFormatter and in * CppCodeGenerator.converJavaToCppString. * * @param c The character of interest. * @param forCharLiteral true to escape for char literal, false for string literal * IGNORED!! */ public String escapeChar(int c, boolean forCharLiteral) { // System.out.println("CppCharFormatter.escapeChar("+c+")"); switch (c) { case '\n' : return "\\n"; case '\t' : return "\\t"; case '\r' : return "\\r"; case '\\' : return "\\\\"; case '\'' : return "\\'"; case '"' : return "\\\""; default : if ( c < ' ' || c > 126 ) { if (c > 255) { String s = Integer.toString(c,16); // put leading zeroes in front of the thing.. while( s.length() < 4 ) s = '0' + s; return "\\u" + s; } else { return "\\" + Integer.toString(c,8); } } else { return String.valueOf((char)c); } } } /** Converts a String into a representation that can be use as a literal * when surrounded by double-quotes. * * Used for escaping semantic predicate strings for exceptions. * * @param s The String to be changed into a literal */ public String escapeString(String s) { String retval = new String(); for (int i = 0; i < s.length(); i++) retval += escapeChar(s.charAt(i), false); return retval; } /** Given a character value, return a string representing the character * literal that can be recognized by the target language compiler. * This works for languages that use single-quotes for character literals. * @param c The character of interest. */ public String literalChar(int c) { String ret = "0x"+Integer.toString(c,16); if( c >= 0 && c <= 126 ) ret += " /* '"+escapeChar(c,true)+"' */ "; return ret; } /** Converts a String into a string literal * This works for languages that use double-quotes for string literals. * Code-generators for languages should override this method. * * Used for the generation of the tables with token names * * @param s The String to be changed into a literal */ public String literalString(String s) { return "\"" + escapeString(s) + "\""; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/CppCodeGenerator.java000066400000000000000000004402361161462365500251560ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/CppCodeGenerator.java#1 $ */ // C++ code generator by Pete Wells: pete@yamuna.demon.co.uk // #line generation contributed by: Ric Klaren import java.util.Enumeration; import java.util.Hashtable; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; import java.io.PrintWriter; //SAS: changed for proper text file io import java.io.IOException; import java.io.FileWriter; /** Generate MyParser.cpp, MyParser.hpp, MyLexer.cpp, MyLexer.hpp * and MyParserTokenTypes.hpp */ public class CppCodeGenerator extends CodeGenerator { boolean DEBUG_CPP_CODE_GENERATOR = false; // non-zero if inside syntactic predicate generation protected int syntacticPredLevel = 0; // Are we generating ASTs (for parsers and tree parsers) right now? protected boolean genAST = false; // Are we saving the text consumed (for lexers) right now? protected boolean saveText = false; // Generate #line's protected boolean genHashLines = true; // Generate constructors or not protected boolean noConstructors = false; // Used to keep track of lineno in output protected int outputLine; protected String outputFile; // Grammar parameters set up to handle different grammar classes. // These are used to get instanceof tests out of code generation boolean usingCustomAST = false; String labeledElementType; String labeledElementASTType; // mostly the same as labeledElementType except in parsers String labeledElementASTInit; String labeledElementInit; String commonExtraArgs; String commonExtraParams; String commonLocalVars; String lt1Value; String exceptionThrown; String throwNoViable; // Tracks the rule being generated. Used for mapTreeId RuleBlock currentRule; // Tracks the rule or labeled subrule being generated. Used for AST generation. String currentASTResult; // Mapping between the ids used in the current alt, and the // names of variables used to represent their AST values. Hashtable treeVariableMap = new Hashtable(); /** Used to keep track of which AST variables have been defined in a rule * (except for the #rule_name and #rule_name_in var's */ Hashtable declaredASTVariables = new Hashtable(); // Count of unnamed generated variables int astVarNumber = 1; // Special value used to mark duplicate in treeVariableMap protected static final String NONUNIQUE = new String(); public static final int caseSizeThreshold = 127; // ascii is max private Vector semPreds; // Used to keep track of which (heterogeneous AST types are used) // which need to be set in the ASTFactory of the generated parser private Vector astTypes; private static String namespaceStd = "ANTLR_USE_NAMESPACE(std)"; private static String namespaceAntlr = "ANTLR_USE_NAMESPACE(antlr)"; private static NameSpace nameSpace = null; private static final String preIncludeCpp = "pre_include_cpp"; private static final String preIncludeHpp = "pre_include_hpp"; private static final String postIncludeCpp = "post_include_cpp"; private static final String postIncludeHpp = "post_include_hpp"; /** Create a C++ code-generator using the given Grammar. * The caller must still call setTool, setBehavior, and setAnalyzer * before generating code. */ public CppCodeGenerator() { super(); charFormatter = new CppCharFormatter(); } /** Adds a semantic predicate string to the sem pred vector These strings will be used to build an array of sem pred names when building a debugging parser. This method should only be called when the debug option is specified */ protected int addSemPred(String predicate) { semPreds.appendElement(predicate); return semPreds.size()-1; } public void exitIfError() { if (antlrTool.hasError()) { antlrTool.fatalError("Exiting due to errors."); } } protected int countLines( String s ) { int lines = 0; for( int i = 0; i < s.length(); i++ ) { if( s.charAt(i) == '\n' ) lines++; } return lines; } /** Output a String to the currentOutput stream. * Ignored if string is null. * @param s The string to output */ protected void _print(String s) { if (s != null) { outputLine += countLines(s); currentOutput.print(s); } } /** Print an action without leading tabs, attempting to * preserve the current indentation level for multi-line actions * Ignored if string is null. * @param s The action string to output */ protected void _printAction(String s) { if (s != null) { outputLine += countLines(s)+1; super._printAction(s); } } /** Print an action stored in a token surrounded by #line stuff */ public void printAction(Token t) { if (t != null) { genLineNo(t.getLine()); printTabs(); _printAction(processActionForSpecialSymbols(t.getText(), t.getLine(), null, null) ); genLineNo2(); } } /** Print a header action by #line stuff also process any tree construction * @param name The name of the header part */ public void printHeaderAction(String name) { Token a = (antlr.Token)behavior.headerActions.get(name); if (a != null) { genLineNo(a.getLine()); println(processActionForSpecialSymbols(a.getText(), a.getLine(), null, null) ); genLineNo2(); } } /** Output a String followed by newline, to the currentOutput stream. * Ignored if string is null. * @param s The string to output */ protected void _println(String s) { if (s != null) { outputLine += countLines(s)+1; currentOutput.println(s); } } /** Output tab indent followed by a String followed by newline, * to the currentOutput stream. Ignored if string is null. * @param s The string to output */ protected void println(String s) { if (s != null) { printTabs(); outputLine += countLines(s)+1; currentOutput.println(s); } } /** Generate a #line or // line depending on options */ public void genLineNo(int line) { if ( line == 0 ) { line++; } if( genHashLines ) _println("#line "+line+" \""+antlrTool.fileMinusPath(antlrTool.grammarFile)+"\""); } /** Generate a #line or // line depending on options */ public void genLineNo(GrammarElement el) { if( el != null ) genLineNo(el.getLine()); } /** Generate a #line or // line depending on options */ public void genLineNo(Token t) { if (t != null) genLineNo(t.getLine()); } /** Generate a #line or // line depending on options */ public void genLineNo2() { if( genHashLines ) { _println("#line "+(outputLine+1)+" \""+outputFile+"\""); } } /// Bound safe isDigit private boolean charIsDigit( String s, int i ) { return (i < s.length()) && Character.isDigit(s.charAt(i)); } /** Normalize a string coming from antlr's lexer. E.g. translate java * escapes to values. Check their size (multibyte) bomb out if they are * multibyte (bit crude). Then reescape to C++ style things. * Used to generate strings for match() and matchRange() * @param lit the literal string * @param isCharLiteral if it's for a character literal * (enforced to be one length) and enclosed in ' * FIXME: bombing out on mb chars. Should be done in Lexer. * FIXME: this is another horrible hack. * FIXME: life would be soooo much easier if the stuff from the lexer was * normalized in some way. */ private String convertJavaToCppString( String lit, boolean isCharLiteral ) { // System.out.println("convertJavaToCppLiteral: "+lit); String ret = new String(); String s = lit; int i = 0; int val = 0; if( isCharLiteral ) // verify & strip off quotes { if( ! lit.startsWith("'") || ! lit.endsWith("'") ) antlrTool.error("Invalid character literal: '"+lit+"'"); } else { if( ! lit.startsWith("\"") || ! lit.endsWith("\"") ) antlrTool.error("Invalid character string: '"+lit+"'"); } s = lit.substring(1,lit.length()-1); String prefix=""; int maxsize = 255; if( grammar instanceof LexerGrammar ) { // vocab size seems to be 1 bigger than it actually is maxsize = ((LexerGrammar)grammar).charVocabulary.size() - 1; if( maxsize > 255 ) prefix= "L"; } // System.out.println("maxsize "+maxsize+" prefix "+prefix); while ( i < s.length() ) { if( s.charAt(i) == '\\' ) { if( s.length() == i+1 ) antlrTool.error("Invalid escape in char literal: '"+lit+"' looking at '"+s.substring(i)+"'"); // deal with escaped junk switch ( s.charAt(i+1) ) { case 'a' : val = 7; i += 2; break; case 'b' : val = 8; i += 2; break; case 't' : val = 9; i += 2; break; case 'n' : val = 10; i += 2; break; case 'f' : val = 12; i += 2; break; case 'r' : val = 13; i += 2; break; case '"' : case '\'' : case '\\' : val = s.charAt(i+1); i += 2; break; case 'u' : // Unicode char \u1234 if( i+5 < s.length() ) { val = Character.digit(s.charAt(i+2), 16) * 16 * 16 * 16 + Character.digit(s.charAt(i+3), 16) * 16 * 16 + Character.digit(s.charAt(i+4), 16) * 16 + Character.digit(s.charAt(i+5), 16); i += 6; } else antlrTool.error("Invalid escape in char literal: '"+lit+"' looking at '"+s.substring(i)+"'"); break; case '0' : // \123 case '1' : case '2' : case '3' : if( charIsDigit(s, i+2) ) { if( charIsDigit(s, i+3) ) { val = (s.charAt(i+1)-'0')*8*8 + (s.charAt(i+2)-'0')*8 + (s.charAt(i+3)-'0'); i += 4; } else { val = (s.charAt(i+1)-'0')*8 + (s.charAt(i+2)-'0'); i += 3; } } else { val = s.charAt(i+1)-'0'; i += 2; } break; case '4' : case '5' : case '6' : case '7' : if ( charIsDigit(s, i+2) ) { val = (s.charAt(i+1)-'0')*8 + (s.charAt(i+2)-'0'); i += 3; } else { val = s.charAt(i+1)-'0'; i += 2; } default: antlrTool.error("Unhandled escape in char literal: '"+lit+"' looking at '"+s.substring(i)+"'"); val = 0; } } else val = s.charAt(i++); if( grammar instanceof LexerGrammar ) { if( val > maxsize ) // abort if too big { String offender; if( ( 0x20 <= val ) && ( val < 0x7F ) ) offender = charFormatter.escapeChar(val,true); else offender = "0x"+Integer.toString(val,16); antlrTool.error("Character out of range in "+(isCharLiteral?"char literal":"string constant")+": '"+s+"'"); antlrTool.error("Vocabulary size: "+maxsize+" Character "+offender); } } if( isCharLiteral ) { // we should be at end of char literal here.. if( i != s.length() ) antlrTool.error("Invalid char literal: '"+lit+"'"); if( maxsize <= 255 ) { if ( (val <= 255) && (val & 0x80) != 0 ) // the joys of sign extension in the support lib *cough* // actually the support lib needs to be fixed but that's a bit // hairy too. ret = "static_cast('"+charFormatter.escapeChar(val,true)+"')"; else ret = "'"+charFormatter.escapeChar(val,true)+"'"; } else { // so wchar_t is some implementation defined int like thing // so this may even lead to having 16 bit or 32 bit cases... // I smell some extra grammar options in the future :( ret = "L'"+charFormatter.escapeChar(val,true)+"'"; } } else ret += charFormatter.escapeChar(val,true); } if( !isCharLiteral ) ret = prefix+"\""+ret+"\""; return ret; } /** Generate the parser, lexer, treeparser, and token types in C++ */ public void gen() { // Do the code generation try { // Loop over all grammars Enumeration grammarIter = behavior.grammars.elements(); while (grammarIter.hasMoreElements()) { Grammar g = (Grammar)grammarIter.nextElement(); if ( g.debuggingOutput ) { antlrTool.error(g.getFilename()+": C++ mode does not support -debug"); } // Connect all the components to each other g.setGrammarAnalyzer(analyzer); g.setCodeGenerator(this); analyzer.setGrammar(g); // To get right overloading behavior across hetrogeneous grammars setupGrammarParameters(g); g.generate(); exitIfError(); } // Loop over all token managers (some of which are lexers) Enumeration tmIter = behavior.tokenManagers.elements(); while (tmIter.hasMoreElements()) { TokenManager tm = (TokenManager)tmIter.nextElement(); if (!tm.isReadOnly()) { // Write the token manager tokens as C++ // this must appear before genTokenInterchange so that // labels are set on string literals genTokenTypes(tm); // Write the token manager tokens as plain text genTokenInterchange(tm); } exitIfError(); } } catch (IOException e) { antlrTool.reportException(e, null); } } /** Generate code for the given grammar element. * @param blk The {...} action to generate */ public void gen(ActionElement action) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genAction("+action+")"); if ( action.isSemPred ) { genSemPred(action.actionText, action.line); } else { if ( grammar.hasSyntacticPredicate ) { println("if ( inputState->guessing==0 ) {"); tabs++; } ActionTransInfo tInfo = new ActionTransInfo(); String actionStr = processActionForSpecialSymbols(action.actionText, action.getLine(), currentRule, tInfo); if ( tInfo.refRuleRoot!=null ) { // Somebody referenced "#rule", make sure translated var is valid // assignment to #rule is left as a ref also, meaning that assignments // with no other refs like "#rule = foo();" still forces this code to be // generated (unnecessarily). println(tInfo.refRuleRoot + " = "+labeledElementASTType+"(currentAST.root);"); } // dump the translated action genLineNo(action); printAction(actionStr); genLineNo2(); if ( tInfo.assignToRoot ) { // Somebody did a "#rule=", reset internal currentAST.root println("currentAST.root = "+tInfo.refRuleRoot+";"); // reset the child pointer too to be last sibling in sibling list // now use if else in stead of x ? y : z to shut CC 4.2 up. println("if ( "+tInfo.refRuleRoot+"!="+labeledElementASTInit+" &&"); tabs++; println(tInfo.refRuleRoot+"->getFirstChild() != "+labeledElementASTInit+" )"); println(" currentAST.child = "+tInfo.refRuleRoot+"->getFirstChild();"); tabs--; println("else"); tabs++; println("currentAST.child = "+tInfo.refRuleRoot+";"); tabs--; println("currentAST.advanceChildToEnd();"); } if ( grammar.hasSyntacticPredicate ) { tabs--; println("}"); } } } /** Generate code for the given grammar element. * @param blk The "x|y|z|..." block to generate */ public void gen(AlternativeBlock blk) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen("+blk+")"); println("{"); genBlockPreamble(blk); genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); CppBlockFinishingInfo howToFinish = genCommonBlock(blk, true); genBlockFinish(howToFinish, throwNoViable); println("}"); // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate code for the given grammar element. * @param blk The block-end element to generate. Block-end * elements are synthesized by the grammar parser to represent * the end of a block. */ public void gen(BlockEndElement end) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRuleEnd("+end+")"); } /** Generate code for the given grammar element. * Only called from lexer grammars. * @param blk The character literal reference to generate */ public void gen(CharLiteralElement atom) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genChar("+atom+")"); if ( ! (grammar instanceof LexerGrammar) ) antlrTool.error("cannot ref character literals in grammar: "+atom); if ( atom.getLabel() != null ) { println(atom.getLabel() + " = " + lt1Value + ";"); } boolean oldsaveText = saveText; saveText = saveText && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE; // if in lexer and ! on element, save buffer index to kill later if ( !saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG ) println("_saveIndex = text.length();"); print(atom.not ? "matchNot(" : "match("); _print(convertJavaToCppString( atom.atomText, true )); _println(" /* charlit */ );"); if ( !saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) println("text.erase(_saveIndex);"); // kill text atom put in buffer saveText = oldsaveText; } /** Generate code for the given grammar element. * Only called from lexer grammars. * @param blk The character-range reference to generate */ public void gen(CharRangeElement r) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genCharRangeElement("+r.beginText+".."+r.endText+")"); if ( ! (grammar instanceof LexerGrammar) ) antlrTool.error("cannot ref character range in grammar: "+r); if ( r.getLabel() != null && syntacticPredLevel == 0) { println(r.getLabel() + " = " + lt1Value + ";"); } // Correctly take care of saveIndex stuff... boolean save = ( grammar instanceof LexerGrammar && ( !saveText || r.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) ); if (save) println("_saveIndex=text.length();"); println("matchRange("+convertJavaToCppString(r.beginText,true)+ ","+convertJavaToCppString(r.endText,true)+");"); if (save) println("text.erase(_saveIndex);"); } /** Generate the lexer C++ files */ public void gen(LexerGrammar g) throws IOException { // If debugging, create a new sempred vector for this grammar if (g.debuggingOutput) semPreds = new Vector(); if( g.charVocabulary.size() > 256 ) antlrTool.warning(g.getFilename()+": Vocabularies of this size still experimental in C++ mode (vocabulary size now: "+g.charVocabulary.size()+")"); setGrammar(g); if (!(grammar instanceof LexerGrammar)) { antlrTool.panic("Internal error generating lexer"); } genBody(g); genInclude(g); } /** Generate code for the given grammar element. * @param blk The (...)+ block to generate */ public void gen(OneOrMoreBlock blk) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen+("+blk+")"); String label; String cnt; println("{ // ( ... )+"); genBlockPreamble(blk); if ( blk.getLabel() != null ) { cnt = "_cnt_"+blk.getLabel(); } else { cnt = "_cnt" + blk.ID; } println("int "+cnt+"=0;"); if ( blk.getLabel() != null ) { label = blk.getLabel(); } else { label = "_loop" + blk.ID; } println("for (;;) {"); tabs++; // generate the init action for ()+ ()* inside the loop // this allows us to do usefull EOF checking... genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); // generate exit test if greedy set to false // and an alt is ambiguous with exit branch // or when lookahead derived purely from end-of-file // Lookahead analysis stops when end-of-file is hit, // returning set {epsilon}. Since {epsilon} is not // ambig with any real tokens, no error is reported // by deterministic() routines and we have to check // for the case where the lookahead depth didn't get // set to NONDETERMINISTIC (this only happens when the // FOLLOW contains real atoms + epsilon). boolean generateNonGreedyExitPath = false; int nonGreedyExitDepth = grammar.maxk; if ( !blk.greedy && blk.exitLookaheadDepth<=grammar.maxk && blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() ) { generateNonGreedyExitPath = true; nonGreedyExitDepth = blk.exitLookaheadDepth; } else if ( !blk.greedy && blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC ) { generateNonGreedyExitPath = true; } // generate exit test if greedy set to false // and an alt is ambiguous with exit branch if ( generateNonGreedyExitPath ) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) { System.out.println("nongreedy (...)+ loop; exit depth is "+ blk.exitLookaheadDepth); } String predictExit = getLookaheadTestExpression(blk.exitCache, nonGreedyExitDepth); println("// nongreedy exit test"); println("if ( "+cnt+">=1 && "+predictExit+") goto "+label+";"); } CppBlockFinishingInfo howToFinish = genCommonBlock(blk, false); genBlockFinish( howToFinish, "if ( "+cnt+">=1 ) { goto "+label+"; } else {" + throwNoViable + "}" ); println(cnt+"++;"); tabs--; println("}"); println(label+":;"); println("} // ( ... )+"); // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate the parser C++ file */ public void gen(ParserGrammar g) throws IOException { // if debugging, set up a new vector to keep track of sempred // strings for this grammar if (g.debuggingOutput) semPreds = new Vector(); setGrammar(g); if (!(grammar instanceof ParserGrammar)) { antlrTool.panic("Internal error generating parser"); } genBody(g); genInclude(g); } /** Generate code for the given grammar element. * @param blk The rule-reference to generate */ public void gen(RuleRefElement rr) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRR("+rr+")"); RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); if (rs == null || !rs.isDefined()) { // Is this redundant??? antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn()); return; } if (!(rs instanceof RuleSymbol)) { // Is this redundant??? antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn()); return; } genErrorTryForElement(rr); // AST value for labeled rule refs in tree walker. // This is not AST construction; it is just the input tree node value. if ( grammar instanceof TreeWalkerGrammar && rr.getLabel() != null && syntacticPredLevel == 0 ) { println(rr.getLabel() + " = (_t == ASTNULL) ? "+labeledElementASTInit+" : "+lt1Value+";"); } // if in lexer and ! on rule ref or alt or rule, save buffer index to // kill later if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) { println("_saveIndex = text.length();"); } // Process return value assignment if any printTabs(); if (rr.idAssign != null) { // Warn if the rule has no return type if (rs.block.returnAction == null) { antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr.getLine(), rr.getColumn()); } _print(rr.idAssign + "="); } else { // Warn about return value if any, but not inside syntactic predicate if ( !(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null) { antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr.getLine(), rr.getColumn()); } } // Call the rule GenRuleInvocation(rr); // if in lexer and ! on element or alt or rule, save buffer index to kill later if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) { println("text.erase(_saveIndex);"); } // if not in a syntactic predicate if (syntacticPredLevel == 0) { boolean doNoGuessTest = ( grammar.hasSyntacticPredicate && ( grammar.buildAST && rr.getLabel() != null || (genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE) ) ); if (doNoGuessTest) { println("if (inputState->guessing==0) {"); tabs++; } if (grammar.buildAST && rr.getLabel() != null) { // always gen variable for rule return on labeled rules // RK: hmm do I know here if the returnAST needs a cast ? println(rr.getLabel() + "_AST = returnAST;"); } if (genAST) { switch (rr.getAutoGenType()) { case GrammarElement.AUTO_GEN_NONE: if( usingCustomAST ) println("astFactory->addASTChild(currentAST, "+namespaceAntlr+"RefAST(returnAST));"); else println("astFactory->addASTChild( currentAST, returnAST );"); break; case GrammarElement.AUTO_GEN_CARET: // FIXME: RK: I'm not so sure this should be an error.. // I think it might actually work and be usefull at times. antlrTool.error("Internal: encountered ^ after rule reference"); break; default: break; } } // if a lexer and labeled, Token label defined at rule level, just set it here if ( grammar instanceof LexerGrammar && rr.getLabel() != null ) { println(rr.getLabel()+"=_returnToken;"); } if (doNoGuessTest) { tabs--; println("}"); } } genErrorCatchForElement(rr); } /** Generate code for the given grammar element. * @param blk The string-literal reference to generate */ public void gen(StringLiteralElement atom) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genString("+atom+")"); // Variable declarations for labeled elements if (atom.getLabel()!=null && syntacticPredLevel == 0) { println(atom.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(atom); // is there a bang on the literal? boolean oldsaveText = saveText; saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE; // matching genMatch(atom); saveText = oldsaveText; // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t->getNextSibling();"); } } /** Generate code for the given grammar element. * @param blk The token-range reference to generate */ public void gen(TokenRangeElement r) { genErrorTryForElement(r); if ( r.getLabel()!=null && syntacticPredLevel == 0) { println(r.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(r); // match println("matchRange("+r.beginText+","+r.endText+");"); genErrorCatchForElement(r); } /** Generate code for the given grammar element. * @param blk The token-reference to generate */ public void gen(TokenRefElement atom) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genTokenRef("+atom+")"); if ( grammar instanceof LexerGrammar ) { antlrTool.panic("Token reference found in lexer"); } genErrorTryForElement(atom); // Assign Token value to token label variable if ( atom.getLabel()!=null && syntacticPredLevel == 0) { println(atom.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(atom); // matching genMatch(atom); genErrorCatchForElement(atom); // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t->getNextSibling();"); } } public void gen(TreeElement t) { // save AST cursor println(labeledElementType+" __t" + t.ID + " = _t;"); // If there is a label on the root, then assign that to the variable if (t.root.getLabel() != null) { println(t.root.getLabel() + " = (_t == ASTNULL) ? "+labeledElementASTInit+" : _t;"); } // check for invalid modifiers ! and ^ on tree element roots if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) { antlrTool.error("Suffixing a root node with '!' is not implemented", grammar.getFilename(), t.getLine(), t.getColumn()); t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE); } if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET ) { antlrTool.warning("Suffixing a root node with '^' is redundant; already a root", grammar.getFilename(), t.getLine(), t.getColumn()); t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE); } // Generate AST variables genElementAST(t.root); if (grammar.buildAST) { // Save the AST construction state println(namespaceAntlr+"ASTPair __currentAST" + t.ID + " = currentAST;"); // Make the next item added a child of the TreeElement root println("currentAST.root = currentAST.child;"); println("currentAST.child = "+labeledElementASTInit+";"); } // match root if ( t.root instanceof WildcardElement ) { println("if ( _t == ASTNULL ) throw "+namespaceAntlr+"MismatchedTokenException();"); } else { genMatch(t.root); } // move to list of children println("_t = _t->getFirstChild();"); // walk list of children, generating code for each for (int i=0; igetNextSibling();"); } /** Generate the tree-parser C++ files */ public void gen(TreeWalkerGrammar g) throws IOException { setGrammar(g); if (!(grammar instanceof TreeWalkerGrammar)) { antlrTool.panic("Internal error generating tree-walker"); } genBody(g); genInclude(g); } /** Generate code for the given grammar element. * @param wc The wildcard element to generate */ public void gen(WildcardElement wc) { // Variable assignment for labeled elements if (wc.getLabel()!=null && syntacticPredLevel == 0) { println(wc.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(wc); // Match anything but EOF if (grammar instanceof TreeWalkerGrammar) { println("if ( _t == "+labeledElementASTInit+" ) throw "+namespaceAntlr+"MismatchedTokenException();"); } else if (grammar instanceof LexerGrammar) { if ( grammar instanceof LexerGrammar && (!saveText||wc.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) { println("_saveIndex = text.length();"); } println("matchNot(EOF/*_CHAR*/);"); if ( grammar instanceof LexerGrammar && (!saveText||wc.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) { println("text.erase(_saveIndex);"); // kill text atom put in buffer } } else { println("matchNot(" + getValueString(Token.EOF_TYPE) + ");"); } // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t->getNextSibling();"); } } /** Generate code for the given grammar element. * @param blk The (...)* block to generate */ public void gen(ZeroOrMoreBlock blk) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen*("+blk+")"); println("{ // ( ... )*"); genBlockPreamble(blk); String label; if ( blk.getLabel() != null ) { label = blk.getLabel(); } else { label = "_loop" + blk.ID; } println("for (;;) {"); tabs++; // generate the init action for ()+ ()* inside the loop // this allows us to do usefull EOF checking... genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); // generate exit test if greedy set to false // and an alt is ambiguous with exit branch // or when lookahead derived purely from end-of-file // Lookahead analysis stops when end-of-file is hit, // returning set {epsilon}. Since {epsilon} is not // ambig with any real tokens, no error is reported // by deterministic() routines and we have to check // for the case where the lookahead depth didn't get // set to NONDETERMINISTIC (this only happens when the // FOLLOW contains real atoms + epsilon). boolean generateNonGreedyExitPath = false; int nonGreedyExitDepth = grammar.maxk; if ( !blk.greedy && blk.exitLookaheadDepth<=grammar.maxk && blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() ) { generateNonGreedyExitPath = true; nonGreedyExitDepth = blk.exitLookaheadDepth; } else if ( !blk.greedy && blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC ) { generateNonGreedyExitPath = true; } if ( generateNonGreedyExitPath ) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) { System.out.println("nongreedy (...)* loop; exit depth is "+ blk.exitLookaheadDepth); } String predictExit = getLookaheadTestExpression(blk.exitCache, nonGreedyExitDepth); println("// nongreedy exit test"); println("if ("+predictExit+") goto "+label+";"); } CppBlockFinishingInfo howToFinish = genCommonBlock(blk, false); genBlockFinish(howToFinish, "goto " + label + ";"); tabs--; println("}"); println(label+":;"); println("} // ( ... )*"); // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate an alternative. * @param alt The alternative to generate * @param blk The block to which the alternative belongs */ protected void genAlt(Alternative alt, AlternativeBlock blk) { // Save the AST generation state, and set it to that of the alt boolean savegenAST = genAST; genAST = genAST && alt.getAutoGen(); boolean oldsaveTest = saveText; saveText = saveText && alt.getAutoGen(); // Reset the variable name map for the alternative Hashtable saveMap = treeVariableMap; treeVariableMap = new Hashtable(); // Generate try block around the alt for error handling if (alt.exceptionSpec != null) { println("try { // for error handling"); tabs++; } AlternativeElement elem = alt.head; while ( !(elem instanceof BlockEndElement) ) { elem.generate(); // alt can begin with anything. Ask target to gen. elem = elem.next; } if ( genAST) { if (blk instanceof RuleBlock) { // Set the AST return value for the rule RuleBlock rblk = (RuleBlock)blk; if( usingCustomAST ) println(rblk.getRuleName() + "_AST = "+labeledElementASTType+"(currentAST.root);"); else println(rblk.getRuleName() + "_AST = currentAST.root;"); } else if (blk.getLabel() != null) { // ### future: also set AST value for labeled subrules. // println(blk.getLabel() + "_AST = "+labeledElementASTType+"(currentAST.root);"); antlrTool.warning("Labeled subrules are not implemented", grammar.getFilename(), blk.getLine(), blk.getColumn()); } } if (alt.exceptionSpec != null) { // close try block tabs--; println("}"); genErrorHandler(alt.exceptionSpec); } genAST = savegenAST; saveText = oldsaveTest; treeVariableMap = saveMap; } /** Generate all the bitsets to be used in the parser or lexer * Generate the raw bitset data like "long _tokenSet1_data[] = {...};" * and the BitSet object declarations like * "BitSet _tokenSet1 = new BitSet(_tokenSet1_data);" * Note that most languages do not support object initialization inside a * class definition, so other code-generators may have to separate the * bitset declarations from the initializations (e.g., put the * initializations in the generated constructor instead). * @param bitsetList The list of bitsets to generate. * @param maxVocabulary Ensure that each generated bitset can contain at * least this value. * @param prefix string glued in from of bitset names used for namespace * qualifications. */ protected void genBitsets( Vector bitsetList, int maxVocabulary, String prefix ) { TokenManager tm = grammar.tokenManager; println(""); for (int i = 0; i < bitsetList.size(); i++) { BitSet p = (BitSet)bitsetList.elementAt(i); // Ensure that generated BitSet is large enough for vocabulary p.growToInclude(maxVocabulary); // initialization data println( "const unsigned long " + prefix + getBitsetName(i) + "_data_" + "[] = { " + p.toStringOfHalfWords() + " };" ); // Dump the contents of the bitset in readable format... String t = "// "; for( int j = 0; j < tm.getVocabulary().size(); j++ ) { if ( p.member( j ) ) { if ( (grammar instanceof LexerGrammar) ) { // only dump out for pure printable ascii. if( ( 0x20 <= j ) && ( j < 0x7F ) ) t += charFormatter.escapeChar(j,true)+" "; else t += "0x"+Integer.toString(j,16)+" "; } else t += tm.getTokenStringAt(j)+" "; if( t.length() > 70 ) { println(t); t = "// "; } } } if ( t != "// " ) println(t); // BitSet object println( "const "+namespaceAntlr+"BitSet " + prefix + getBitsetName(i) + "(" + getBitsetName(i) + "_data_," + p.size()/32 + ");" ); } } protected void genBitsetsHeader( Vector bitsetList, int maxVocabulary ) { println(""); for (int i = 0; i < bitsetList.size(); i++) { BitSet p = (BitSet)bitsetList.elementAt(i); // Ensure that generated BitSet is large enough for vocabulary p.growToInclude(maxVocabulary); // initialization data println("static const unsigned long " + getBitsetName(i) + "_data_" + "[];"); // BitSet object println("static const "+namespaceAntlr+"BitSet " + getBitsetName(i) + ";"); } } /** Generate the finish of a block, using a combination of the info * returned from genCommonBlock() and the action to perform when * no alts were taken * @param howToFinish The return of genCommonBlock() * @param noViableAction What to generate when no alt is taken */ private void genBlockFinish(CppBlockFinishingInfo howToFinish, String noViableAction) { if (howToFinish.needAnErrorClause && (howToFinish.generatedAnIf || howToFinish.generatedSwitch)) { if ( howToFinish.generatedAnIf ) { println("else {"); } else { println("{"); } tabs++; println(noViableAction); tabs--; println("}"); } if ( howToFinish.postscript!=null ) { println(howToFinish.postscript); } } /** Generate the initaction for a block, which may be a RuleBlock or a * plain AlternativeBLock. * @blk The block for which the preamble is to be generated. */ protected void genBlockInitAction( AlternativeBlock blk ) { // dump out init action if ( blk.initAction!=null ) { genLineNo(blk); printAction(processActionForSpecialSymbols(blk.initAction, blk.line, currentRule, null) ); genLineNo2(); } } /** Generate the header for a block, which may be a RuleBlock or a * plain AlternativeBlock. This generates any variable declarations * and syntactic-predicate-testing variables. * @blk The block for which the preamble is to be generated. */ protected void genBlockPreamble(AlternativeBlock blk) { // define labels for rule blocks. if ( blk instanceof RuleBlock ) { RuleBlock rblk = (RuleBlock)blk; if ( rblk.labeledElements!=null ) { for (int i=0; i"); println("#include "); println("#include "); println("#include "); println("#include "); println("#include "); println("#include "); if (grammar.debuggingOutput) println("#include "); println(""); printHeaderAction(postIncludeCpp); if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); // Generate user-defined lexer file preamble printAction(grammar.preambleAction); // Generate lexer class definition String sup=null; if ( grammar.superClass!=null ) { sup = grammar.superClass; } else { sup = grammar.getSuperClass(); if (sup.lastIndexOf('.') != -1) sup = sup.substring(sup.lastIndexOf('.')+1); sup = namespaceAntlr + sup; } if( noConstructors ) { println("#if 0"); println("// constructor creation turned of with 'noConstructor' option"); } // // Generate the constructor from InputStream // println(grammar.getClassName() + "::" + grammar.getClassName() + "(" + namespaceStd + "istream& in)"); tabs++; // if debugging, wrap the input buffer in a debugger if (grammar.debuggingOutput) println(": " + sup + "(new "+namespaceAntlr+"DebuggingInputBuffer(new "+namespaceAntlr+"CharBuffer(in)),"+g.caseSensitive+")"); else println(": " + sup + "(new "+namespaceAntlr+"CharBuffer(in),"+g.caseSensitive+")"); tabs--; println("{"); tabs++; // if debugging, set up array variables and call user-overridable // debugging setup method if ( grammar.debuggingOutput ) { println("setRuleNames(_ruleNames);"); println("setSemPredNames(_semPredNames);"); println("setupDebugging();"); } // println("setCaseSensitive("+g.caseSensitive+");"); println("initLiterals();"); tabs--; println("}"); println(""); // Generate the constructor from InputBuffer println(grammar.getClassName() + "::" + grammar.getClassName() + "("+namespaceAntlr+"InputBuffer& ib)"); tabs++; // if debugging, wrap the input buffer in a debugger if (grammar.debuggingOutput) println(": " + sup + "(new "+namespaceAntlr+"DebuggingInputBuffer(ib),"+g.caseSensitive+")"); else println(": " + sup + "(ib,"+g.caseSensitive+")"); tabs--; println("{"); tabs++; // if debugging, set up array variables and call user-overridable // debugging setup method if ( grammar.debuggingOutput ) { println("setRuleNames(_ruleNames);"); println("setSemPredNames(_semPredNames);"); println("setupDebugging();"); } // println("setCaseSensitive("+g.caseSensitive+");"); println("initLiterals();"); tabs--; println("}"); println(""); // Generate the constructor from LexerSharedInputState println(grammar.getClassName() + "::" + grammar.getClassName() + "(const "+namespaceAntlr+"LexerSharedInputState& state)"); tabs++; println(": " + sup + "(state,"+g.caseSensitive+")"); tabs--; println("{"); tabs++; // if debugging, set up array variables and call user-overridable // debugging setup method if ( grammar.debuggingOutput ) { println("setRuleNames(_ruleNames);"); println("setSemPredNames(_semPredNames);"); println("setupDebugging();"); } // println("setCaseSensitive("+g.caseSensitive+");"); println("initLiterals();"); tabs--; println("}"); println(""); if( noConstructors ) { println("// constructor creation turned of with 'noConstructor' option"); println("#endif"); } println("void " + grammar.getClassName() + "::initLiterals()"); println("{"); tabs++; // Generate the initialization of the map // containing the string literals used in the lexer // The literals variable itself is in CharScanner Enumeration keys = grammar.tokenManager.getTokenSymbolKeys(); while ( keys.hasMoreElements() ) { String key = (String)keys.nextElement(); if ( key.charAt(0) != '"' ) { continue; } TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key); if ( sym instanceof StringLiteralSymbol ) { StringLiteralSymbol s = (StringLiteralSymbol)sym; println("literals["+s.getId()+"] = "+s.getTokenType()+";"); } } // Generate the setting of various generated options. tabs--; println("}"); Enumeration ids; // generate the rule name array for debugging if (grammar.debuggingOutput) { println("const char* "+grammar.getClassName()+"::_ruleNames[] = {"); tabs++; ids = grammar.rules.elements(); int ruleNum=0; while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if ( sym instanceof RuleSymbol) println("\""+((RuleSymbol)sym).getId()+"\","); } println("0"); tabs--; println("};"); } // Generate nextToken() rule. // nextToken() is a synthetic lexer rule that is the implicit OR of all // user-defined lexer rules. genNextToken(); // Generate code for each rule in the lexer ids = grammar.rules.elements(); int ruleNum=0; while ( ids.hasMoreElements() ) { RuleSymbol sym = (RuleSymbol) ids.nextElement(); // Don't generate the synthetic rules if (!sym.getId().equals("mnextToken")) { genRule(sym, false, ruleNum++, grammar.getClassName() + "::"); } exitIfError(); } // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) genSemPredMap(grammar.getClassName() + "::"); // Generate the bitsets used throughout the lexer genBitsets(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size(), grammar.getClassName() + "::" ); println(""); if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Close the lexer output stream currentOutput.close(); currentOutput = null; } public void genInitFactory( Grammar g ) { // Generate the method to initialize an ASTFactory when we're // building AST's String param_name = "factory "; if( ! g.buildAST ) param_name = ""; println("void "+ g.getClassName() + "::initializeASTFactory( "+namespaceAntlr+"ASTFactory& "+param_name+")"); println("{"); tabs++; if( g.buildAST ) { // sort out custom AST types... synchronize token manager with token // specs on rules (and other stuff we were able to see from // action.g) (imperfect of course) TokenManager tm = grammar.tokenManager; Enumeration tokens = tm.getTokenSymbolKeys(); while( tokens.hasMoreElements() ) { String tok = (String)tokens.nextElement(); TokenSymbol ts = tm.getTokenSymbol(tok); // if we have a custom type and there's not a more local override // of the tokentype then mark this as the type for the tokentype if( ts.getASTNodeType() != null ) { // ensure capacity with this pseudo vector... astTypes.ensureCapacity(ts.getTokenType()); String type = (String)astTypes.elementAt(ts.getTokenType()); if( type == null ) astTypes.setElementAt(ts.getASTNodeType(),ts.getTokenType()); else { // give a warning over action taken if the types are unequal if( ! ts.getASTNodeType().equals(type) ) { antlrTool.warning("Token "+tok+" taking most specific AST type",grammar.getFilename(),1,1); antlrTool.warning(" using "+type+" ignoring "+ts.getASTNodeType(),grammar.getFilename(),1,1); } } } } // now actually write out all the registered types. (except the default // type. for( int i = 0; i < astTypes.size(); i++ ) { String type = (String)astTypes.elementAt(i); if( type != null ) { println("factory.registerFactory("+i +", \""+type+"\", "+type+"::factory);"); } } println("factory.setMaxNodeType("+grammar.tokenManager.maxTokenType()+");"); } tabs--; println("}"); } // FIXME: and so why are we passing here a g param while inside // we merrily use the global grammar. public void genBody(ParserGrammar g) throws IOException { // Open the output stream for the parser and set the currentOutput outputFile = grammar.getClassName() + ".cpp"; outputLine = 1; currentOutput = antlrTool.openOutputFile(outputFile); genAST = grammar.buildAST; tabs = 0; // Generate the header common to all output files. genHeader(outputFile); printHeaderAction(preIncludeCpp); // Generate header for the parser println("#include \"" + grammar.getClassName() + ".hpp\""); println("#include "); println("#include "); println("#include "); printHeaderAction(postIncludeCpp); if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); // Output the user-defined parser preamble printAction(grammar.preambleAction); String sup=null; if ( grammar.superClass!=null ) sup = grammar.superClass; else { sup = grammar.getSuperClass(); if (sup.lastIndexOf('.') != -1) sup = sup.substring(sup.lastIndexOf('.')+1); sup = namespaceAntlr + sup; } // set up an array of all the rule names so the debugger can // keep track of them only by number -- less to store in tree... if (grammar.debuggingOutput) { println("const char* "+grammar.getClassName()+"::_ruleNames[] = {"); tabs++; Enumeration ids = grammar.rules.elements(); int ruleNum=0; while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if ( sym instanceof RuleSymbol) println("\""+((RuleSymbol)sym).getId()+"\","); } println("0"); tabs--; println("};"); } // Generate _initialize function // disabled since it isn't used anymore.. // println("void " + grammar.getClassName() + "::_initialize(void)"); // println("{"); // tabs++; // if debugging, set up arrays and call the user-overridable // debugging setup method // if ( grammar.debuggingOutput ) { // println("setRuleNames(_ruleNames);"); // println("setSemPredNames(_semPredNames);"); // println("setupDebugging();"); // } // tabs--; // println("}"); if( noConstructors ) { println("#if 0"); println("// constructor creation turned of with 'noConstructor' option"); } // Generate parser class constructor from TokenBuffer print(grammar.getClassName() + "::" + grammar.getClassName()); println("("+namespaceAntlr+"TokenBuffer& tokenBuf, int k)"); println(": " + sup + "(tokenBuf,k)"); println("{"); // tabs++; // println("_initialize();"); // tabs--; println("}"); println(""); print(grammar.getClassName() + "::" + grammar.getClassName()); println("("+namespaceAntlr+"TokenBuffer& tokenBuf)"); println(": " + sup + "(tokenBuf," + grammar.maxk + ")"); println("{"); // tabs++; // println("_initialize();"); // tabs--; println("}"); println(""); // Generate parser class constructor from TokenStream print(grammar.getClassName() + "::" + grammar.getClassName()); println("("+namespaceAntlr+"TokenStream& lexer, int k)"); println(": " + sup + "(lexer,k)"); println("{"); // tabs++; // println("_initialize();"); // tabs--; println("}"); println(""); print(grammar.getClassName() + "::" + grammar.getClassName()); println("("+namespaceAntlr+"TokenStream& lexer)"); println(": " + sup + "(lexer," + grammar.maxk + ")"); println("{"); // tabs++; // println("_initialize();"); // tabs--; println("}"); println(""); print(grammar.getClassName() + "::" + grammar.getClassName()); println("(const "+namespaceAntlr+"ParserSharedInputState& state)"); println(": " + sup + "(state," + grammar.maxk + ")"); println("{"); // tabs++; // println("_initialize();"); // tabs--; println("}"); println(""); if( noConstructors ) { println("// constructor creation turned of with 'noConstructor' option"); println("#endif"); } astTypes = new Vector(); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); int ruleNum=0; while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if ( sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol)sym; genRule(rs, rs.references.size()==0, ruleNum++, grammar.getClassName() + "::"); } exitIfError(); } genInitFactory( g ); // Generate the token names genTokenStrings(grammar.getClassName() + "::"); // Generate the bitsets used throughout the grammar genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType(), grammar.getClassName() + "::" ); // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) genSemPredMap(grammar.getClassName() + "::"); // Close class definition println(""); println(""); if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Close the parser output stream currentOutput.close(); currentOutput = null; } public void genBody(TreeWalkerGrammar g) throws IOException { // Open the output stream for the parser and set the currentOutput outputFile = grammar.getClassName() + ".cpp"; outputLine = 1; currentOutput = antlrTool.openOutputFile(outputFile); //SAS: changed for proper text file io genAST = grammar.buildAST; tabs = 0; // Generate the header common to all output files. genHeader(outputFile); printHeaderAction(preIncludeCpp); // Generate header for the parser println("#include \"" + grammar.getClassName() + ".hpp\""); println("#include "); println("#include "); println("#include "); println("#include "); println("#include "); println("#include "); printHeaderAction(postIncludeCpp); if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); // Output the user-defined parser premamble printAction(grammar.preambleAction); // Generate parser class definition String sup = null; if ( grammar.superClass!=null ) { sup = grammar.superClass; } else { sup = grammar.getSuperClass(); if (sup.lastIndexOf('.') != -1) sup = sup.substring(sup.lastIndexOf('.')+1); sup = namespaceAntlr + sup; } if( noConstructors ) { println("#if 0"); println("// constructor creation turned of with 'noConstructor' option"); } // Generate default parser class constructor println(grammar.getClassName() + "::" + grammar.getClassName() + "()"); println("\t: "+namespaceAntlr+"TreeParser() {"); tabs++; // println("setTokenNames(_tokenNames);"); tabs--; println("}"); if( noConstructors ) { println("// constructor creation turned of with 'noConstructor' option"); println("#endif"); } println(""); astTypes = new Vector(); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); int ruleNum=0; String ruleNameInits = ""; while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if ( sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol)sym; genRule(rs, rs.references.size()==0, ruleNum++, grammar.getClassName() + "::"); } exitIfError(); } // Generate the ASTFactory initialization function genInitFactory( grammar ); // Generate the token names genTokenStrings(grammar.getClassName() + "::"); // Generate the bitsets used throughout the grammar genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType(), grammar.getClassName() + "::" ); // Close class definition println(""); println(""); if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Close the parser output stream currentOutput.close(); currentOutput = null; } /** Generate a series of case statements that implement a BitSet test. * @param p The Bitset for which cases are to be generated */ protected void genCases(BitSet p) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genCases("+p+")"); int[] elems; elems = p.toArray(); // Wrap cases four-per-line for lexer, one-per-line for parser int wrap = 1; //(grammar instanceof LexerGrammar) ? 4 : 1; int j=1; boolean startOfLine = true; for (int i = 0; i < elems.length; i++) { if (j==1) { print(""); } else { _print(" "); } _print("case " + getValueString(elems[i]) + ":"); if (j==wrap) { _println(""); startOfLine = true; j=1; } else { j++; startOfLine = false; } } if (!startOfLine) { _println(""); } } /** Generate common code for a block of alternatives; return a postscript * that needs to be generated at the end of the block. Other routines * may append else-clauses and such for error checking before the postfix * is generated. * If the grammar is a lexer, then generate alternatives in an order where * alternatives requiring deeper lookahead are generated first, and * EOF in the lookahead set reduces the depth of the lookahead. * @param blk The block to generate * @param noTestForSingle If true, then it does not generate a test for a single alternative. */ public CppBlockFinishingInfo genCommonBlock( AlternativeBlock blk, boolean noTestForSingle ) { int nIF=0; boolean createdLL1Switch = false; int closingBracesOfIFSequence = 0; CppBlockFinishingInfo finishingInfo = new CppBlockFinishingInfo(); if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genCommonBlk("+blk+")"); // Save the AST generation state, and set it to that of the block boolean savegenAST = genAST; genAST = genAST && blk.getAutoGen(); boolean oldsaveTest = saveText; saveText = saveText && blk.getAutoGen(); // Is this block inverted? If so, generate special-case code if ( blk.not && analyzer.subruleCanBeInverted(blk, grammar instanceof LexerGrammar) ) { Lookahead p = analyzer.look(1, blk); // Variable assignment for labeled elements if (blk.getLabel() != null && syntacticPredLevel == 0) { println(blk.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(blk); String astArgs=""; if (grammar instanceof TreeWalkerGrammar) { if( usingCustomAST ) astArgs=namespaceAntlr+"RefAST"+"(_t),"; else astArgs="_t,"; } // match the bitset for the alternative println("match(" + astArgs + getBitsetName(markBitsetForGen(p.fset)) + ");"); // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t->getNextSibling();"); } return finishingInfo; } // Special handling for single alt if (blk.getAlternatives().size() == 1) { Alternative alt = blk.getAlternativeAt(0); // Generate a warning if there is a synPred for single alt. if (alt.synPred != null) { antlrTool.warning( "Syntactic predicate superfluous for single alternative", grammar.getFilename(), blk.getAlternativeAt(0).synPred.getLine(), blk.getAlternativeAt(0).synPred.getColumn() ); } if (noTestForSingle) { if (alt.semPred != null) { // Generate validating predicate genSemPred(alt.semPred, blk.line); } genAlt(alt, blk); return finishingInfo; } } // count number of simple LL(1) cases; only do switch for // many LL(1) cases (no preds, no end of token refs) // We don't care about exit paths for (...)*, (...)+ // because we don't explicitly have a test for them // as an alt in the loop. // // Also, we now count how many unicode lookahead sets // there are--they must be moved to DEFAULT or ELSE // clause. int nLL1 = 0; for (int i=0; i= makeSwitchThreshold ) { // Determine the name of the item to be compared String testExpr = lookaheadString(1); createdLL1Switch = true; // when parsing trees, convert null to valid tree node with NULL lookahead if ( grammar instanceof TreeWalkerGrammar ) { println("if (_t == "+labeledElementASTInit+" )"); tabs++; println("_t = ASTNULL;"); tabs--; } println("switch ( "+testExpr+") {"); for (int i=0; i= 1 && alt.cache[effectiveDepth].containsEpsilon() ) { effectiveDepth--; } // Ignore alts whose effective depth is other than the ones we // are generating for this iteration. if (effectiveDepth != altDepth) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("ignoring alt because effectiveDepth!=altDepth;"+effectiveDepth+"!="+altDepth); continue; } unpredicted = lookaheadIsEmpty(alt, effectiveDepth); e = getLookaheadTestExpression(alt, effectiveDepth); } else { unpredicted = lookaheadIsEmpty(alt, grammar.maxk); e = getLookaheadTestExpression(alt, grammar.maxk); } // Was it a big unicode range that forced unsuitability // for a case expression? if ( alt.cache[1].fset.degree() > caseSizeThreshold && suitableForCaseExpression(alt)) { if ( nIF==0 ) { // generate this only for the first if the elseif's // are covered by this one if ( grammar instanceof TreeWalkerGrammar ) { println("if (_t == "+labeledElementASTInit+" )"); tabs++; println("_t = ASTNULL;"); tabs--; } println("if " + e + " {"); } else println("else if " + e + " {"); } else if (unpredicted && alt.semPred==null && alt.synPred==null) { // The alt has empty prediction set and no // predicate to help out. if we have not // generated a previous if, just put {...} around // the end-of-token clause if ( nIF==0 ) { println("{"); } else { println("else {"); } finishingInfo.needAnErrorClause = false; } else { // check for sem and syn preds // Add any semantic predicate expression to the lookahead test if ( alt.semPred != null ) { // if debugging, wrap the evaluation of the predicate in a method // // translate $ and # references ActionTransInfo tInfo = new ActionTransInfo(); String actionStr = processActionForSpecialSymbols(alt.semPred, blk.line, currentRule, tInfo); // ignore translation info...we don't need to do anything with it. // call that will inform SemanticPredicateListeners of the // result if ( grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)) ) e = "("+e+"&& fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.PREDICTING,"+ //FIXME addSemPred(charFormatter.escapeString(actionStr))+","+actionStr+"))"; else e = "("+e+"&&("+actionStr +"))"; } // Generate any syntactic predicates if ( nIF>0 ) { if ( alt.synPred != null ) { println("else {"); tabs++; genSynPred( alt.synPred, e ); closingBracesOfIFSequence++; } else { println("else if " + e + " {"); } } else { if ( alt.synPred != null ) { genSynPred( alt.synPred, e ); } else { // when parsing trees, convert null to valid tree node // with NULL lookahead. if ( grammar instanceof TreeWalkerGrammar ) { println("if (_t == "+labeledElementASTInit+" )"); tabs++; println("_t = ASTNULL;"); tabs--; } println("if " + e + " {"); } } } nIF++; tabs++; genAlt(alt, blk); tabs--; println("}"); } } String ps = ""; for (int i=1; i<=closingBracesOfIFSequence; i++) { tabs--; // does JavaCodeGenerator need this? ps+="}"; } // Restore the AST generation state genAST = savegenAST; // restore save text state saveText=oldsaveTest; // Return the finishing info. if ( createdLL1Switch ) { tabs--; finishingInfo.postscript = ps+"}"; finishingInfo.generatedSwitch = true; finishingInfo.generatedAnIf = nIF>0; //return new CppBlockFinishingInfo(ps+"}",true,nIF>0); // close up switch statement } else { finishingInfo.postscript = ps; finishingInfo.generatedSwitch = false; finishingInfo.generatedAnIf = nIF>0; //return new CppBlockFinishingInfo(ps, false,nIF>0); } return finishingInfo; } private static boolean suitableForCaseExpression(Alternative a) { return a.lookaheadDepth == 1 && a.semPred == null && !a.cache[1].containsEpsilon() && a.cache[1].fset.degree()<=caseSizeThreshold; } /** Generate code to link an element reference into the AST */ private void genElementAST(AlternativeElement el) { // handle case where you're not building trees, but are in tree walker. // Just need to get labels set up. if ( grammar instanceof TreeWalkerGrammar && !grammar.buildAST ) { String elementRef; String astName; // Generate names and declarations of the AST variable(s) if (el.getLabel() == null) { elementRef = lt1Value; // Generate AST variables for unlabeled stuff astName = "tmp" + astVarNumber + "_AST"; astVarNumber++; // Map the generated AST variable in the alternate mapTreeVariable(el, astName); // Generate an "input" AST variable also println(labeledElementASTType+" "+astName+"_in = "+elementRef+";"); } return; } if (grammar.buildAST && syntacticPredLevel == 0) { boolean needASTDecl = ( genAST && (el.getLabel() != null || el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG )); // RK: if we have a grammar element always generate the decl // since some guy can access it from an action and we can't // peek ahead (well not without making a mess). // I'd prefer taking this out. if( el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG && (el instanceof TokenRefElement) ) needASTDecl = true; boolean doNoGuessTest = ( grammar.hasSyntacticPredicate && needASTDecl ); String elementRef; String astNameBase; // Generate names and declarations of the AST variable(s) if (el.getLabel() != null) { // if the element is labeled use that name... elementRef = el.getLabel(); astNameBase = el.getLabel(); } else { // else generate a temporary name... elementRef = lt1Value; // Generate AST variables for unlabeled stuff astNameBase = "tmp" + astVarNumber; astVarNumber++; } // Generate the declaration if required. if ( needASTDecl ) { if ( el instanceof GrammarAtom ) { GrammarAtom ga = (GrammarAtom)el; if ( ga.getASTNodeType()!=null ) { genASTDeclaration( el, astNameBase, "Ref"+ga.getASTNodeType() ); // println("Ref"+ga.getASTNodeType()+" " + astName + ";"); } else { genASTDeclaration( el, astNameBase, labeledElementASTType ); // println(labeledElementASTType+" " + astName + " = "+labeledElementASTInit+";"); } } else { genASTDeclaration( el, astNameBase, labeledElementASTType ); // println(labeledElementASTType+" " + astName + " = "+labeledElementASTInit+";"); } } // for convenience.. String astName = astNameBase + "_AST"; // Map the generated AST variable in the alternate mapTreeVariable(el, astName); if (grammar instanceof TreeWalkerGrammar) { // Generate an "input" AST variable also println(labeledElementASTType+" " + astName + "_in = "+labeledElementASTInit+";"); } // Enclose actions with !guessing if (doNoGuessTest) { println("if ( inputState->guessing == 0 ) {"); tabs++; } // if something has a label assume it will be used // so we must initialize the RefAST if (el.getLabel() != null) { if ( el instanceof GrammarAtom ) { println(astName + " = "+ getASTCreateString((GrammarAtom)el,elementRef) + ";"); } else { println(astName + " = "+ getASTCreateString(elementRef) + ";"); } } // if it has no label but a declaration exists initialize it. if( el.getLabel() == null && needASTDecl ) { elementRef = lt1Value; if ( el instanceof GrammarAtom ) { println(astName + " = "+ getASTCreateString((GrammarAtom)el,elementRef) + ";"); } else { println(astName + " = "+ getASTCreateString(elementRef) + ";"); } // Map the generated AST variable in the alternate if (grammar instanceof TreeWalkerGrammar) { // set "input" AST variable also println(astName + "_in = " + elementRef + ";"); } } if (genAST) { switch (el.getAutoGenType()) { case GrammarElement.AUTO_GEN_NONE: if( usingCustomAST || (el instanceof GrammarAtom && ((GrammarAtom)el).getASTNodeType() != null) ) println("astFactory->addASTChild(currentAST, "+namespaceAntlr+"RefAST("+ astName + "));"); else println("astFactory->addASTChild(currentAST, "+ astName + ");"); // println("astFactory.addASTChild(currentAST, "+namespaceAntlr+"RefAST(" + astName + "));"); break; case GrammarElement.AUTO_GEN_CARET: if( usingCustomAST || (el instanceof GrammarAtom && ((GrammarAtom)el).getASTNodeType() != null) ) println("astFactory->makeASTRoot(currentAST, "+namespaceAntlr+"RefAST(" + astName + "));"); else println("astFactory->makeASTRoot(currentAST, " + astName + ");"); break; default: break; } } if (doNoGuessTest) { tabs--; println("}"); } } } /** Close the try block and generate catch phrases * if the element has a labeled handler in the rule */ private void genErrorCatchForElement(AlternativeElement el) { if (el.getLabel() == null) return; String r = el.enclosingRuleName; if ( grammar instanceof LexerGrammar ) { r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName); } RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r); if (rs == null) { antlrTool.panic("Enclosing rule not found!"); } ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel()); if (ex != null) { tabs--; println("}"); genErrorHandler(ex); } } /** Generate the catch phrases for a user-specified error handler */ private void genErrorHandler(ExceptionSpec ex) { // Each ExceptionHandler in the ExceptionSpec is a separate catch for (int i = 0; i < ex.handlers.size(); i++) { ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i); // Generate catch phrase println("catch (" + handler.exceptionTypeAndName.getText() + ") {"); tabs++; if (grammar.hasSyntacticPredicate) { println("if (inputState->guessing==0) {"); tabs++; } // When not guessing, execute user handler action ActionTransInfo tInfo = new ActionTransInfo(); genLineNo(handler.action); printAction( processActionForSpecialSymbols( handler.action.getText(), handler.action.getLine(), currentRule, tInfo ) ); genLineNo2(); if (grammar.hasSyntacticPredicate) { tabs--; println("} else {"); tabs++; // When guessing, rethrow exception println("throw;"); tabs--; println("}"); } // Close catch phrase tabs--; println("}"); } } /** Generate a try { opening if the element has a labeled handler in the rule */ private void genErrorTryForElement(AlternativeElement el) { if (el.getLabel() == null) return; String r = el.enclosingRuleName; if ( grammar instanceof LexerGrammar ) { r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName); } RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r); if (rs == null) { antlrTool.panic("Enclosing rule not found!"); } ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel()); if (ex != null) { println("try { // for error handling"); tabs++; } } /** Generate a header that is common to all C++ files */ protected void genHeader(String fileName) { println("/* $ANTLR "+antlrTool.version+": "+ "\""+antlrTool.fileMinusPath(antlrTool.grammarFile)+"\""+ " -> "+ "\""+fileName+"\"$ */"); } // these are unique to C++ mode public void genInclude(LexerGrammar g) throws IOException { outputFile = grammar.getClassName() + ".hpp"; outputLine = 1; currentOutput = antlrTool.openOutputFile(outputFile); //SAS: changed for proper text file io genAST = false; // no way to gen trees. saveText = true; // save consumed characters. tabs=0; // Generate a guard wrapper println("#ifndef INC_"+grammar.getClassName()+"_hpp_"); println("#define INC_"+grammar.getClassName()+"_hpp_"); println(""); printHeaderAction(preIncludeHpp); println("#include "); // Generate header common to all C++ output files genHeader(outputFile); // Generate header specific to lexer header file println("#include "); println("#include "); println("#include "); println("#include \"" + grammar.tokenManager.getName() + TokenTypesFileSuffix+".hpp\""); // Find the name of the super class String sup=null; if ( grammar.superClass!=null ) { sup = grammar.superClass; println("\n// Include correct superclass header with a header statement for example:"); println("// header \"post_include_hpp\" {"); println("// #include \""+sup+".hpp\""); println("// }"); println("// Or...."); println("// header {"); println("// #include \""+sup+".hpp\""); println("// }\n"); } else { sup = grammar.getSuperClass(); if (sup.lastIndexOf('.') != -1) sup = sup.substring(sup.lastIndexOf('.')+1); println("#include "); sup = namespaceAntlr + sup; } // Do not use printAction because we assume tabs==0 printHeaderAction(postIncludeHpp); if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); printHeaderAction(""); // print javadoc comment if any if ( grammar.comment!=null ) { _println(grammar.comment); } // Generate lexer class definition print("class CUSTOM_API " + grammar.getClassName() + " : public " + sup); println(", public " + grammar.tokenManager.getName() + TokenTypesFileSuffix); Token tsuffix = (Token)grammar.options.get("classHeaderSuffix"); if ( tsuffix != null ) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\""); if ( suffix != null ) { print(", "+suffix); // must be an interface name for Java } } println("{"); // Generate user-defined lexer class members if (grammar.classMemberAction != null) { genLineNo(grammar.classMemberAction); print( processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null) ); genLineNo2(); } // Generate initLiterals() method tabs=0; println("private:"); tabs=1; println("void initLiterals();"); // Generate getCaseSensitiveLiterals() method tabs=0; println("public:"); tabs=1; println("bool getCaseSensitiveLiterals() const"); println("{"); tabs++; println("return "+g.caseSensitiveLiterals + ";"); tabs--; println("}"); // Make constructors public tabs=0; println("public:"); tabs=1; if( noConstructors ) { tabs = 0; println("#if 0"); println("// constructor creation turned of with 'noConstructor' option"); tabs = 1; } // Generate the constructor from std::istream println(grammar.getClassName() + "(" + namespaceStd + "istream& in);"); // Generate the constructor from InputBuffer println(grammar.getClassName() + "("+namespaceAntlr+"InputBuffer& ib);"); println(grammar.getClassName() + "(const "+namespaceAntlr+"LexerSharedInputState& state);"); if( noConstructors ) { tabs = 0; println("// constructor creation turned of with 'noConstructor' option"); println("#endif"); tabs = 1; } // Generate nextToken() rule. // nextToken() is a synthetic lexer rule that is the implicit OR of all // user-defined lexer rules. println(namespaceAntlr+"RefToken nextToken();"); // Generate code for each rule in the lexer Enumeration ids = grammar.rules.elements(); while ( ids.hasMoreElements() ) { RuleSymbol sym = (RuleSymbol) ids.nextElement(); // Don't generate the synthetic rules if (!sym.getId().equals("mnextToken")) { genRuleHeader(sym, false); } exitIfError(); } // Make the rest private tabs=0; println("private:"); tabs=1; // generate the rule name array for debugging if ( grammar.debuggingOutput ) { println("static const char* _ruleNames[];"); } // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) println("static const char* _semPredNames[];"); // Generate the bitsets used throughout the lexer genBitsetsHeader(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size()); tabs=0; println("};"); println(""); if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Generate a guard wrapper println("#endif /*INC_"+grammar.getClassName()+"_hpp_*/"); // Close the lexer output stream currentOutput.close(); currentOutput = null; } public void genInclude(ParserGrammar g) throws IOException { // Open the output stream for the parser and set the currentOutput outputFile = grammar.getClassName() + ".hpp"; outputLine = 1; currentOutput = antlrTool.openOutputFile(outputFile); //SAS: changed for proper text file io genAST = grammar.buildAST; tabs = 0; // Generate a guard wrapper println("#ifndef INC_"+grammar.getClassName()+"_hpp_"); println("#define INC_"+grammar.getClassName()+"_hpp_"); println(""); printHeaderAction(preIncludeHpp); println("#include "); // Generate the header common to all output files. genHeader(outputFile); // Generate header for the parser println("#include "); println("#include "); println("#include \"" + grammar.tokenManager.getName() + TokenTypesFileSuffix+".hpp\""); // Generate parser class definition String sup=null; if ( grammar.superClass!=null ) { sup = grammar.superClass; println("\n// Include correct superclass header with a header statement for example:"); println("// header \"post_include_hpp\" {"); println("// #include \""+sup+".hpp\""); println("// }"); println("// Or...."); println("// header {"); println("// #include \""+sup+".hpp\""); println("// }\n"); } else { sup = grammar.getSuperClass(); if (sup.lastIndexOf('.') != -1) sup = sup.substring(sup.lastIndexOf('.')+1); println("#include "); sup = namespaceAntlr + sup; } println(""); // Do not use printAction because we assume tabs==0 printHeaderAction(postIncludeHpp); if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); printHeaderAction(""); // print javadoc comment if any if ( grammar.comment!=null ) { _println(grammar.comment); } // generate the actual class definition print("class CUSTOM_API " + grammar.getClassName() + " : public " + sup); println(", public " + grammar.tokenManager.getName() + TokenTypesFileSuffix); Token tsuffix = (Token)grammar.options.get("classHeaderSuffix"); if ( tsuffix != null ) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\""); if ( suffix != null ) print(", "+suffix); // must be an interface name for Java } println("{"); // set up an array of all the rule names so the debugger can // keep track of them only by number -- less to store in tree... if (grammar.debuggingOutput) { println("public: static const char* _ruleNames[];"); } // Generate user-defined parser class members if (grammar.classMemberAction != null) { genLineNo(grammar.classMemberAction.getLine()); print( processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null) ); genLineNo2(); } println("public:"); tabs = 1; println("void initializeASTFactory( "+namespaceAntlr+"ASTFactory& factory );"); // println("// called from constructors"); // println("void _initialize( void );"); // Generate parser class constructor from TokenBuffer tabs=0; if( noConstructors ) { println("#if 0"); println("// constructor creation turned of with 'noConstructor' option"); } println("protected:"); tabs=1; println(grammar.getClassName() + "("+namespaceAntlr+"TokenBuffer& tokenBuf, int k);"); tabs=0; println("public:"); tabs=1; println(grammar.getClassName() + "("+namespaceAntlr+"TokenBuffer& tokenBuf);"); // Generate parser class constructor from TokenStream tabs=0; println("protected:"); tabs=1; println(grammar.getClassName()+"("+namespaceAntlr+"TokenStream& lexer, int k);"); tabs=0; println("public:"); tabs=1; println(grammar.getClassName()+"("+namespaceAntlr+"TokenStream& lexer);"); println(grammar.getClassName()+"(const "+namespaceAntlr+"ParserSharedInputState& state);"); if( noConstructors ) { tabs = 0; println("// constructor creation turned of with 'noConstructor' option"); println("#endif"); tabs = 1; } println("int getNumTokens() const"); println("{"); tabs++; println("return "+grammar.getClassName()+"::NUM_TOKENS;"); tabs--; println("}"); println("const char* getTokenName( int type ) const"); println("{"); tabs++; println("if( type > getNumTokens() ) return 0;"); println("return "+grammar.getClassName()+"::tokenNames[type];"); tabs--; println("}"); println("const char* const* getTokenNames() const"); println("{"); tabs++; println("return "+grammar.getClassName()+"::tokenNames;"); tabs--; println("}"); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if ( sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol)sym; genRuleHeader(rs, rs.references.size()==0); } exitIfError(); } // RK: when we are using a custom ast override Parser::getAST to return // the custom AST type. Ok, this does not work anymore with newer // compilers gcc 3.2.x and up. The reference counter is probably // getting in the way. // So now we just patch the return type back to RefAST tabs = 0; println("public:"); tabs = 1; println(namespaceAntlr+"RefAST getAST()"); println("{"); if( usingCustomAST ) { tabs++; println("return "+namespaceAntlr+"RefAST(returnAST);"); tabs--; } else { tabs++; println("return returnAST;"); tabs--; } println("}"); println(""); tabs=0; println("protected:"); tabs=1; println(labeledElementASTType+" returnAST;"); // Make the rest private tabs=0; println("private:"); tabs=1; // Generate the token names println("static const char* tokenNames[];"); // and how many there are of them _println("#ifndef NO_STATIC_CONSTS"); println("static const int NUM_TOKENS = "+grammar.tokenManager.getVocabulary().size()+";"); _println("#else"); println("enum {"); println("\tNUM_TOKENS = "+grammar.tokenManager.getVocabulary().size()); println("};"); _println("#endif"); // Generate the bitsets used throughout the grammar genBitsetsHeader(bitsetsUsed, grammar.tokenManager.maxTokenType()); // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) println("static const char* _semPredNames[];"); // Close class definition tabs=0; println("};"); println(""); if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Generate a guard wrapper println("#endif /*INC_"+grammar.getClassName()+"_hpp_*/"); // Close the parser output stream currentOutput.close(); currentOutput = null; } public void genInclude(TreeWalkerGrammar g) throws IOException { // Open the output stream for the parser and set the currentOutput outputFile = grammar.getClassName() + ".hpp"; outputLine = 1; currentOutput = antlrTool.openOutputFile(outputFile); //SAS: changed for proper text file io genAST = grammar.buildAST; tabs = 0; // Generate a guard wrapper println("#ifndef INC_"+grammar.getClassName()+"_hpp_"); println("#define INC_"+grammar.getClassName()+"_hpp_"); println(""); printHeaderAction(preIncludeHpp); println("#include "); println("#include \"" + grammar.tokenManager.getName() + TokenTypesFileSuffix+".hpp\""); // Generate the header common to all output files. genHeader(outputFile); // Find the name of the super class String sup=null; if ( grammar.superClass!=null ) { sup = grammar.superClass; println("\n// Include correct superclass header with a header statement for example:"); println("// header \"post_include_hpp\" {"); println("// #include \""+sup+".hpp\""); println("// }"); println("// Or...."); println("// header {"); println("// #include \""+sup+".hpp\""); println("// }\n"); } else { sup = grammar.getSuperClass(); if (sup.lastIndexOf('.') != -1) sup = sup.substring(sup.lastIndexOf('.')+1); println("#include "); sup = namespaceAntlr + sup; } println(""); // Generate header for the parser // // Do not use printAction because we assume tabs==0 printHeaderAction(postIncludeHpp); if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); printHeaderAction(""); // print javadoc comment if any if ( grammar.comment!=null ) { _println(grammar.comment); } // Generate parser class definition print("class CUSTOM_API " + grammar.getClassName() + " : public "+sup); println(", public " + grammar.tokenManager.getName() + TokenTypesFileSuffix); Token tsuffix = (Token)grammar.options.get("classHeaderSuffix"); if ( tsuffix != null ) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\""); if ( suffix != null ) { print(", "+suffix); // must be an interface name for Java } } println("{"); // Generate user-defined parser class members if (grammar.classMemberAction != null) { genLineNo(grammar.classMemberAction.getLine()); print( processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null) ); genLineNo2(); } // Generate default parser class constructor tabs=0; println("public:"); if( noConstructors ) { println("#if 0"); println("// constructor creation turned of with 'noConstructor' option"); } tabs=1; println(grammar.getClassName() + "();"); if( noConstructors ) { tabs = 0; println("#endif"); tabs = 1; } // Generate declaration for the initializeFactory method println("static void initializeASTFactory( "+namespaceAntlr+"ASTFactory& factory );"); println("int getNumTokens() const"); println("{"); tabs++; println("return "+grammar.getClassName()+"::NUM_TOKENS;"); tabs--; println("}"); println("const char* getTokenName( int type ) const"); println("{"); tabs++; println("if( type > getNumTokens() ) return 0;"); println("return "+grammar.getClassName()+"::tokenNames[type];"); tabs--; println("}"); println("const char* const* getTokenNames() const"); println("{"); tabs++; println("return "+grammar.getClassName()+"::tokenNames;"); tabs--; println("}"); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); String ruleNameInits = ""; while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol) ids.nextElement(); if ( sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol)sym; genRuleHeader(rs, rs.references.size()==0); } exitIfError(); } tabs = 0; println("public:"); tabs = 1; println(namespaceAntlr+"RefAST getAST()"); println("{"); if( usingCustomAST ) { tabs++; println("return "+namespaceAntlr+"RefAST(returnAST);"); tabs--; } else { tabs++; println("return returnAST;"); tabs--; } println("}"); println(""); tabs=0; println("protected:"); tabs=1; println(labeledElementASTType+" returnAST;"); println(labeledElementASTType+" _retTree;"); // Make the rest private tabs=0; println("private:"); tabs=1; // Generate the token names println("static const char* tokenNames[];"); // and how many there are of them _println("#ifndef NO_STATIC_CONSTS"); println("static const int NUM_TOKENS = "+grammar.tokenManager.getVocabulary().size()+";"); _println("#else"); println("enum {"); println("\tNUM_TOKENS = "+grammar.tokenManager.getVocabulary().size()); println("};"); _println("#endif"); // Generate the bitsets used throughout the grammar genBitsetsHeader(bitsetsUsed, grammar.tokenManager.maxTokenType()); // Close class definition tabs=0; println("};"); println(""); if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Generate a guard wrapper println("#endif /*INC_"+grammar.getClassName()+"_hpp_*/"); // Close the parser output stream currentOutput.close(); currentOutput = null; } /// for convenience protected void genASTDeclaration( AlternativeElement el ) { genASTDeclaration( el, labeledElementASTType ); } /// for convenience protected void genASTDeclaration( AlternativeElement el, String node_type ) { genASTDeclaration( el, el.getLabel(), node_type ); } /// Generate (if not already done) a declaration for the AST for el. protected void genASTDeclaration( AlternativeElement el, String var_name, String node_type ) { // already declared? if( declaredASTVariables.contains(el) ) return; String init = labeledElementASTInit; if (el instanceof GrammarAtom && ((GrammarAtom)el).getASTNodeType() != null ) init = "Ref"+((GrammarAtom)el).getASTNodeType()+"("+labeledElementASTInit+")"; // emit code println(node_type+" " + var_name + "_AST = "+init+";"); // mark as declared declaredASTVariables.put(el, el); } private void genLiteralsTest() { println("_ttype = testLiteralsTable(_ttype);"); } private void genLiteralsTestForPartialToken() { println("_ttype = testLiteralsTable(text.substr(_begin, text.length()-_begin),_ttype);"); } protected void genMatch(BitSet b) { } protected void genMatch(GrammarAtom atom) { if ( atom instanceof StringLiteralElement ) { if ( grammar instanceof LexerGrammar ) { genMatchUsingAtomText(atom); } else { genMatchUsingAtomTokenType(atom); } } else if ( atom instanceof CharLiteralElement ) { // Lexer case is handled in the gen( CharLiteralElement x ) antlrTool.error("cannot ref character literals in grammar: "+atom); } else if ( atom instanceof TokenRefElement ) { genMatchUsingAtomTokenType(atom); } else if (atom instanceof WildcardElement) { gen((WildcardElement)atom); } } protected void genMatchUsingAtomText(GrammarAtom atom) { // match() for trees needs the _t cursor String astArgs=""; if (grammar instanceof TreeWalkerGrammar) { if( usingCustomAST ) astArgs=namespaceAntlr+"RefAST"+"(_t),"; else astArgs="_t,"; } // if in lexer and ! on element, save buffer index to kill later if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) { println("_saveIndex = text.length();"); } print(atom.not ? "matchNot(" : "match("); _print(astArgs); // print out what to match if (atom.atomText.equals("EOF")) { // horrible hack to handle EOF case _print(namespaceAntlr+"Token::EOF_TYPE"); } else { if( grammar instanceof LexerGrammar ) // lexer needs special handling { String cppstring = convertJavaToCppString( atom.atomText, false ); _print(cppstring); } else _print(atom.atomText); } _println(");"); if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) { println("text.erase(_saveIndex);"); // kill text atom put in buffer } } protected void genMatchUsingAtomTokenType(GrammarAtom atom) { // match() for trees needs the _t cursor String astArgs=""; if (grammar instanceof TreeWalkerGrammar) { if( usingCustomAST ) astArgs=namespaceAntlr+"RefAST"+"(_t),"; else astArgs="_t,"; } // If the literal can be mangled, generate the symbolic constant instead String s = astArgs + getValueString(atom.getType()); // matching println( (atom.not ? "matchNot(" : "match(") + s + ");"); } /** Generate the nextToken() rule. * nextToken() is a synthetic lexer rule that is the implicit OR of all * user-defined lexer rules. * @param RuleBlock */ public void genNextToken() { // Are there any public rules? If not, then just generate a // fake nextToken(). boolean hasPublicRules = false; for (int i = 0; i < grammar.rules.size(); i++) { RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i); if ( rs.isDefined() && rs.access.equals("public") ) { hasPublicRules = true; break; } } if (!hasPublicRules) { println(""); println(namespaceAntlr+"RefToken "+grammar.getClassName()+"::nextToken() { return "+namespaceAntlr+"RefToken(new "+namespaceAntlr+"CommonToken("+namespaceAntlr+"Token::EOF_TYPE, \"\")); }"); println(""); return; } // Create the synthesized nextToken() rule RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken"); // Define the nextToken rule symbol RuleSymbol nextTokenRs = new RuleSymbol("mnextToken"); nextTokenRs.setDefined(); nextTokenRs.setBlock(nextTokenBlk); nextTokenRs.access = "private"; grammar.define(nextTokenRs); // Analyze the nextToken rule boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk); // Generate the next token rule String filterRule=null; if ( ((LexerGrammar)grammar).filterMode ) { filterRule = ((LexerGrammar)grammar).filterRule; } println(""); println(namespaceAntlr+"RefToken "+grammar.getClassName()+"::nextToken()"); println("{"); tabs++; println(namespaceAntlr+"RefToken theRetToken;"); println("for (;;) {"); tabs++; println(namespaceAntlr+"RefToken theRetToken;"); println("int _ttype = "+namespaceAntlr+"Token::INVALID_TYPE;"); if ( ((LexerGrammar)grammar).filterMode ) { println("setCommitToPath(false);"); if ( filterRule!=null ) { // Here's a good place to ensure that the filter rule actually exists if ( !grammar.isDefined(CodeGenerator.encodeLexerRuleName(filterRule)) ) { grammar.antlrTool.error("Filter rule "+filterRule+" does not exist in this lexer"); } else { RuleSymbol rs = (RuleSymbol)grammar.getSymbol(CodeGenerator.encodeLexerRuleName(filterRule)); if ( !rs.isDefined() ) { grammar.antlrTool.error("Filter rule "+filterRule+" does not exist in this lexer"); } else if ( rs.access.equals("public") ) { grammar.antlrTool.error("Filter rule "+filterRule+" must be protected"); } } println("int _m;"); println("_m = mark();"); } } println("resetText();"); // Generate try around whole thing to trap scanner errors println("try { // for lexical and char stream error handling"); tabs++; // Test for public lexical rules with empty paths for (int i=0; igetType();"); if ( ((LexerGrammar)grammar).getTestLiterals()) { genLiteralsTest(); } // return token created by rule reference in switch println("_returnToken->setType(_ttype);"); println("return _returnToken;"); // Close try block tabs--; println("}"); println("catch ("+namespaceAntlr+"RecognitionException& e) {"); tabs++; if ( ((LexerGrammar)grammar).filterMode ) { if ( filterRule==null ) { println("if ( !getCommitToPath() ) {"); tabs++; println("consume();"); println("goto tryAgain;"); tabs--; println("}"); } else { println("if ( !getCommitToPath() ) {"); tabs++; println("rewind(_m);"); println("resetText();"); println("try {m"+filterRule+"(false);}"); println("catch("+namespaceAntlr+"RecognitionException& ee) {"); println(" // horrendous failure: error in filter rule"); println(" reportError(ee);"); println(" consume();"); println("}"); // println("goto tryAgain;"); tabs--; println("}"); println("else"); } } if ( nextTokenBlk.getDefaultErrorHandler() ) { println("{"); tabs++; println("reportError(e);"); println("consume();"); tabs--; println("}"); } else { // pass on to invoking routine tabs++; println("throw "+namespaceAntlr+"TokenStreamRecognitionException(e);"); tabs--; } // close CharStreamException try tabs--; println("}"); println("catch ("+namespaceAntlr+"CharStreamIOException& csie) {"); println("\tthrow "+namespaceAntlr+"TokenStreamIOException(csie.io);"); println("}"); println("catch ("+namespaceAntlr+"CharStreamException& cse) {"); println("\tthrow "+namespaceAntlr+"TokenStreamException(cse.getMessage());"); println("}"); // close for-loop _println("tryAgain:;"); tabs--; println("}"); // close method nextToken tabs--; println("}"); println(""); } /** Gen a named rule block. * ASTs are generated for each element of an alternative unless * the rule or the alternative have a '!' modifier. * * If an alternative defeats the default tree construction, it * must set _AST to the root of the returned AST. * * Each alternative that does automatic tree construction, builds * up root and child list pointers in an ASTPair structure. * * A rule finishes by setting the returnAST variable from the * ASTPair. * * @param rule The name of the rule to generate * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere) */ public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum, String prefix) { // tabs=1; // JavaCodeGenerator needs this if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRule("+ s.getId() +")"); if ( !s.isDefined() ) { antlrTool.error("undefined rule: "+ s.getId()); return; } // Generate rule return type, name, arguments RuleBlock rblk = s.getBlock(); currentRule = rblk; currentASTResult = s.getId(); // clear list of declared ast variables.. declaredASTVariables.clear(); // Save the AST generation state, and set it to that of the rule boolean savegenAST = genAST; genAST = genAST && rblk.getAutoGen(); // boolean oldsaveTest = saveText; saveText = rblk.getAutoGen(); // print javadoc comment if any if ( s.comment!=null ) { _println(s.comment); } // Gen method return type (note lexer return action set at rule creation) if (rblk.returnAction != null) { // Has specified return value _print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " "); } else { // No specified return value _print("void "); } // Gen method name _print(prefix + s.getId() + "("); // Additional rule parameters common to all rules for this grammar _print(commonExtraParams); if (commonExtraParams.length() != 0 && rblk.argAction != null ) { _print(","); } // Gen arguments if (rblk.argAction != null) { // Has specified arguments _println(""); // FIXME: make argAction also a token? Hmmmmm // genLineNo(rblk); tabs++; // Process arguments for default arguments // newer gcc's don't accept these in two places (header/cpp) // // Old appraoch with StringBuffer gave trouble with gcj. // // RK: Actually this breaks with string default arguments containing // a comma's or equal signs. Then again the old StringBuffer method // suffered from the same. String oldarg = rblk.argAction; String newarg = ""; String comma = ""; int eqpos = oldarg.indexOf( '=' ); if( eqpos != -1 ) { int cmpos = 0; while( cmpos != -1 && eqpos != -1 ) { newarg = newarg + comma + oldarg.substring( 0, eqpos ).trim(); comma = ", "; cmpos = oldarg.indexOf( ',', eqpos ); if( cmpos != -1 ) { // cut off part we just handled oldarg = oldarg.substring( cmpos+1 ).trim(); eqpos = oldarg.indexOf( '=' ); if( eqpos == -1 ) newarg = newarg+comma+oldarg; } } } else newarg = oldarg; println( newarg ); // println(rblk.argAction); tabs--; print(") "); // genLineNo2(); // gcc gives error on the brace... hope it works for the others too } else { // No specified arguments _print(") "); } _println("{"); tabs++; if (grammar.traceRules) { if ( grammar instanceof TreeWalkerGrammar ) { if ( usingCustomAST ) println("Tracer traceInOut(this,\""+ s.getId() +"\","+namespaceAntlr+"RefAST"+"(_t));"); else println("Tracer traceInOut(this,\""+ s.getId() +"\",_t);"); } else { println("Tracer traceInOut(this, \""+ s.getId() +"\");"); } } // Convert return action to variable declaration if (rblk.returnAction != null) { genLineNo(rblk); println(rblk.returnAction + ";"); genLineNo2(); } // print out definitions needed by rules for various grammar types if (!commonLocalVars.equals("")) println(commonLocalVars); if ( grammar instanceof LexerGrammar ) { // RK: why is this here? It seems not supported in the rest of the // tool. // lexer rule default return value is the rule's token name // This is a horrible hack to support the built-in EOF lexer rule. if (s.getId().equals("mEOF")) println("_ttype = "+namespaceAntlr+"Token::EOF_TYPE;"); else println("_ttype = "+ s.getId().substring(1)+";"); println(namespaceStd+"string::size_type _saveIndex;"); // used for element! (so we can kill text matched for element) /* println("boolean old_saveConsumedInput=saveConsumedInput;"); if ( !rblk.getAutoGen() ) { // turn off "save input" if ! on rule println("saveConsumedInput=false;"); } */ } // if debugging, write code to mark entry to the rule if ( grammar.debuggingOutput) if (grammar instanceof ParserGrammar) println("fireEnterRule(" + ruleNum + ",0);"); else if (grammar instanceof LexerGrammar) println("fireEnterRule(" + ruleNum + ",_ttype);"); // Generate trace code if desired // if ( grammar.debuggingOutput || grammar.traceRules) { // println("try { // debugging"); // tabs++; // } // Initialize AST variables if (grammar instanceof TreeWalkerGrammar) { // "Input" value for rule // println(labeledElementASTType+" " + s.getId() + "_AST_in = "+labeledElementASTType+"(_t);"); println(labeledElementASTType+" " + s.getId() + "_AST_in = (_t == ASTNULL) ? "+labeledElementASTInit+" : _t;"); } if (grammar.buildAST) { // Parser member used to pass AST returns from rule invocations println("returnAST = "+labeledElementASTInit+";"); // Tracks AST construction println(namespaceAntlr+"ASTPair currentAST;"); // = new ASTPair();"); // User-settable return value for rule. println(labeledElementASTType+" " + s.getId() + "_AST = "+labeledElementASTInit+";"); } genBlockPreamble(rblk); genBlockInitAction(rblk); println(""); // Search for an unlabeled exception specification attached to the rule ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec(""); // Generate try block around the entire rule for error handling if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) { println("try { // for error handling"); tabs++; } // Generate the alternatives if ( rblk.alternatives.size()==1 ) { // One alternative -- use simple form Alternative alt = rblk.getAlternativeAt(0); String pred = alt.semPred; if ( pred!=null ) genSemPred(pred, currentRule.line); if (alt.synPred != null) { antlrTool.warning( "Syntactic predicate ignored for single alternative", grammar.getFilename(), alt.synPred.getLine(), alt.synPred.getColumn() ); } genAlt(alt, rblk); } else { // Multiple alternatives -- generate complex form boolean ok = grammar.theLLkAnalyzer.deterministic(rblk); CppBlockFinishingInfo howToFinish = genCommonBlock(rblk, false); genBlockFinish(howToFinish, throwNoViable); } // Generate catch phrase for error handling if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) { // Close the try block tabs--; println("}"); } // Generate user-defined or default catch phrases if (unlabeledUserSpec != null) { genErrorHandler(unlabeledUserSpec); } else if (rblk.getDefaultErrorHandler()) { // Generate default catch phrase println("catch (" + exceptionThrown + "& ex) {"); tabs++; // Generate code to handle error if not guessing if (grammar.hasSyntacticPredicate) { println("if( inputState->guessing == 0 ) {"); tabs++; } println("reportError(ex);"); if ( !(grammar instanceof TreeWalkerGrammar) ) { // Generate code to consume until token in k==1 follow set Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode); String followSetName = getBitsetName(markBitsetForGen(follow.fset)); println("recover(ex," + followSetName + ");"); } else { // Just consume one token println("if ( _t != "+labeledElementASTInit+" )"); tabs++; println("_t = _t->getNextSibling();"); tabs--; } if (grammar.hasSyntacticPredicate) { tabs--; // When guessing, rethrow exception println("} else {"); tabs++; println("throw;"); tabs--; println("}"); } // Close catch phrase tabs--; println("}"); } // Squirrel away the AST "return" value if (grammar.buildAST) { println("returnAST = " + s.getId() + "_AST;"); } // Set return tree value for tree walkers if ( grammar instanceof TreeWalkerGrammar ) { println("_retTree = _t;"); } // Generate literals test for lexer rules so marked if (rblk.getTestLiterals()) { if ( s.access.equals("protected") ) { genLiteralsTestForPartialToken(); } else { genLiteralsTest(); } } // if doing a lexer rule, dump code to create token if necessary if ( grammar instanceof LexerGrammar ) { println("if ( _createToken && _token=="+namespaceAntlr+"nullToken && _ttype!="+namespaceAntlr+"Token::SKIP ) {"); println(" _token = makeToken(_ttype);"); println(" _token->setText(text.substr(_begin, text.length()-_begin));"); println("}"); println("_returnToken = _token;"); // It should be easy for an optimizing compiler to realize this does nothing // but it avoids the warning about the variable being unused. println("_saveIndex=0;"); } // Gen the return statement if there is one (lexer has hard-wired return action) if (rblk.returnAction != null) { println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + ";"); } // if ( grammar.debuggingOutput || grammar.traceRules) { //// tabs--; //// println("} finally { // debugging"); //// tabs++; // // // Generate trace code if desired // if ( grammar.debuggingOutput) // if (grammar instanceof ParserGrammar) // println("fireExitRule(" + ruleNum + ",0);"); // else if (grammar instanceof LexerGrammar) // println("fireExitRule(" + ruleNum + ",_ttype);"); // //// if (grammar.traceRules) { //// if ( grammar instanceof TreeWalkerGrammar ) { //// println("traceOut(\""+ s.getId() +"\",_t);"); //// } //// else { //// println("traceOut(\""+ s.getId() +"\");"); //// } //// } //// //// tabs--; //// println("}"); // } tabs--; println("}"); println(""); // Restore the AST generation state genAST = savegenAST; // restore char save state // saveText = oldsaveTest; } public void genRuleHeader(RuleSymbol s, boolean startSymbol) { tabs=1; if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRuleHeader("+ s.getId() +")"); if ( !s.isDefined() ) { antlrTool.error("undefined rule: "+ s.getId()); return; } // Generate rule return type, name, arguments RuleBlock rblk = s.getBlock(); currentRule = rblk; currentASTResult = s.getId(); // Save the AST generation state, and set it to that of the rule boolean savegenAST = genAST; genAST = genAST && rblk.getAutoGen(); // boolean oldsaveTest = saveText; saveText = rblk.getAutoGen(); // Gen method access print(s.access + ": "); // Gen method return type (note lexer return action set at rule creation) if (rblk.returnAction != null) { // Has specified return value _print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " "); } else { // No specified return value _print("void "); } // Gen method name _print(s.getId() + "("); // Additional rule parameters common to all rules for this grammar _print(commonExtraParams); if (commonExtraParams.length() != 0 && rblk.argAction != null ) { _print(","); } // Gen arguments if (rblk.argAction != null) { // Has specified arguments _println(""); tabs++; println(rblk.argAction); tabs--; print(")"); } else { // No specified arguments _print(")"); } _println(";"); tabs--; // Restore the AST generation state genAST = savegenAST; // restore char save state // saveText = oldsaveTest; } private void GenRuleInvocation(RuleRefElement rr) { // dump rule name _print(rr.targetRule + "("); // lexers must tell rule if it should set _returnToken if ( grammar instanceof LexerGrammar ) { // if labeled, could access Token, so tell rule to create if ( rr.getLabel() != null ) { _print("true"); } else { _print("false"); } if (commonExtraArgs.length() != 0 || rr.args!=null ) { _print(","); } } // Extra arguments common to all rules for this grammar _print(commonExtraArgs); if (commonExtraArgs.length() != 0 && rr.args!=null ) { _print(","); } // Process arguments to method, if any RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); if (rr.args != null) { // When not guessing, execute user arg action ActionTransInfo tInfo = new ActionTransInfo(); // FIXME: fix line number passed to processActionForTreeSpecifiers here.. // this one might be a bit off.. String args = processActionForSpecialSymbols(rr.args, rr.line, currentRule, tInfo); if ( tInfo.assignToRoot || tInfo.refRuleRoot!=null ) { antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #"+ currentRule.getRuleName()+" on line "+rr.getLine()); } _print(args); // Warn if the rule accepts no arguments if (rs.block.argAction == null) { antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments", grammar.getFilename(), rr.getLine(), rr.getColumn()); } } else { // For C++, no warning if rule has parameters, because there may be default // values for all of the parameters //if (rs.block.argAction != null) { // tool.warning("Missing parameters on reference to rule "+rr.targetRule, rr.getLine()); //} } _println(");"); // move down to the first child while parsing if ( grammar instanceof TreeWalkerGrammar ) { println("_t = _retTree;"); } } protected void genSemPred(String pred, int line) { // translate $ and # references ActionTransInfo tInfo = new ActionTransInfo(); pred = processActionForSpecialSymbols(pred, line, currentRule, tInfo); // ignore translation info...we don't need to do anything with it. String escapedPred = charFormatter.escapeString(pred); // if debugging, wrap the semantic predicate evaluation in a method // that can tell SemanticPredicateListeners the result if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) pred = "fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.VALIDATING," //FIXME + addSemPred(escapedPred) + "," + pred + ")"; println("if (!(" + pred + "))"); tabs++; println("throw "+namespaceAntlr+"SemanticException(\"" + escapedPred + "\");"); tabs--; } /** Write an array of Strings which are the semantic predicate * expressions. The debugger will reference them by number only */ protected void genSemPredMap(String prefix) { Enumeration e = semPreds.elements(); println("const char* " + prefix + "_semPredNames[] = {"); tabs++; while(e.hasMoreElements()) println("\""+e.nextElement()+"\","); println("0"); tabs--; println("};"); } protected void genSynPred(SynPredBlock blk, String lookaheadExpr) { if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen=>("+blk+")"); // Dump synpred result variable println("bool synPredMatched" + blk.ID + " = false;"); // Gen normal lookahead test println("if (" + lookaheadExpr + ") {"); tabs++; // Save input state if ( grammar instanceof TreeWalkerGrammar ) { println(labeledElementType + " __t" + blk.ID + " = _t;"); } else { println("int _m" + blk.ID + " = mark();"); } // Once inside the try, assume synpred works unless exception caught println("synPredMatched" + blk.ID + " = true;"); println("inputState->guessing++;"); // if debugging, tell listeners that a synpred has started if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) { println("fireSyntacticPredicateStarted();"); } syntacticPredLevel++; println("try {"); tabs++; gen((AlternativeBlock)blk); // gen code to test predicate tabs--; //println("System.out.println(\"pred "+blk+" succeeded\");"); println("}"); println("catch (" + exceptionThrown + "& pe) {"); tabs++; println("synPredMatched"+blk.ID+" = false;"); //println("System.out.println(\"pred "+blk+" failed\");"); tabs--; println("}"); // Restore input state if ( grammar instanceof TreeWalkerGrammar ) { println("_t = __t"+blk.ID+";"); } else { println("rewind(_m"+blk.ID+");"); } println("inputState->guessing--;"); // if debugging, tell listeners how the synpred turned out if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) { println("if (synPredMatched" + blk.ID +")"); println(" fireSyntacticPredicateSucceeded();"); println("else"); println(" fireSyntacticPredicateFailed();"); } syntacticPredLevel--; tabs--; // Close lookahead test println("}"); // Test synpred result println("if ( synPredMatched"+blk.ID+" ) {"); } /** Generate a static array containing the names of the tokens, * indexed by the token type values. This static array is used * to format error messages so that the token identifers or literal * strings are displayed instead of the token numbers. * * If a lexical rule has a paraphrase, use it rather than the * token label. */ public void genTokenStrings(String prefix) { // Generate a string for each token. This creates a static // array of Strings indexed by token type. // println(""); println("const char* " + prefix + "tokenNames[] = {"); tabs++; // Walk the token vocabulary and generate a Vector of strings // from the tokens. Vector v = grammar.tokenManager.getVocabulary(); for (int i = 0; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s == null) { s = "<"+String.valueOf(i)+">"; } if ( !s.startsWith("\"") && !s.startsWith("<") ) { TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s); if ( ts!=null && ts.getParaphrase()!=null ) { s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\""); } } print(charFormatter.literalString(s)); _println(","); } println("0"); // Close the string array initailizer tabs--; println("};"); } /** Generate the token types C++ file */ protected void genTokenTypes(TokenManager tm) throws IOException { // Open the token output header file and set the currentOutput stream outputFile = tm.getName() + TokenTypesFileSuffix+".hpp"; outputLine = 1; currentOutput = antlrTool.openOutputFile(outputFile); //SAS: changed for proper text file io tabs = 0; // Generate a guard wrapper println("#ifndef INC_"+tm.getName()+TokenTypesFileSuffix+"_hpp_"); println("#define INC_"+tm.getName()+TokenTypesFileSuffix+"_hpp_"); println(""); if (nameSpace != null) nameSpace.emitDeclarations(currentOutput); // Generate the header common to all C++ files genHeader(outputFile); // Encapsulate the definitions in an interface. This can be done // because they are all constants. println(""); println("#ifndef CUSTOM_API"); println("# define CUSTOM_API"); println("#endif"); println(""); // In the case that the .hpp is included from C source (flexLexer!) // we just turn things into a plain enum println("#ifdef __cplusplus"); println("struct CUSTOM_API " + tm.getName() + TokenTypesFileSuffix+" {"); println("#endif"); tabs++; println("enum {"); tabs++; // Generate a definition for each token type Vector v = tm.getVocabulary(); // Do special tokens manually println("EOF_ = " + Token.EOF_TYPE + ","); // Move the other special token to the end, so we can solve // the superfluous comma problem easily for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s != null) { if ( s.startsWith("\"") ) { // a string literal StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s); if ( sl==null ) { antlrTool.panic("String literal "+s+" not in symbol table"); } else if ( sl.label != null ) { println(sl.label + " = " + i + ","); } else { String mangledName = mangleLiteral(s); if (mangledName != null) { // We were able to create a meaningful mangled token name println(mangledName + " = " + i + ","); // if no label specified, make the label equal to the mangled name sl.label = mangledName; } else { println("// " + s + " = " + i); } } } else if ( !s.startsWith("<") ) { println(s + " = " + i + ","); } } } // Moved from above println("NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD); // Close the enum tabs--; println("};"); // Close the interface tabs--; println("#ifdef __cplusplus"); println("};"); println("#endif"); if (nameSpace != null) nameSpace.emitClosures(currentOutput); // Generate a guard wrapper println("#endif /*INC_"+tm.getName()+TokenTypesFileSuffix+"_hpp_*/"); // Close the tokens output file currentOutput.close(); currentOutput = null; exitIfError(); } /** Process a string for an simple expression for use in xx/action.g * it is used to cast simple tokens/references to the right type for * the generated language. Basically called for every element in * the vector to getASTCreateString(vector V) * @param str A String. */ public String processStringForASTConstructor( String str ) { if( usingCustomAST && ((grammar instanceof TreeWalkerGrammar) || (grammar instanceof ParserGrammar)) && !(grammar.tokenManager.tokenDefined(str) ) ) { // System.out.println("processStringForASTConstructor: "+str+" with cast"); return namespaceAntlr+"RefAST("+str+")"; } else { // System.out.println("processStringForASTConstructor: "+str); return str; } } /** Get a string for an expression to generate creation of an AST subtree. * @param v A Vector of String, where each element is an expression * in the target language yielding an AST node. */ public String getASTCreateString(Vector v) { if (v.size() == 0) { return ""; } StringBuffer buf = new StringBuffer(); // the labeledElementASTType here can probably be a cast or nothing // in the case of ! usingCustomAST buf.append(labeledElementASTType+ "(astFactory->make((new "+namespaceAntlr+ "ASTArray("+v.size()+"))"); for (int i = 0; i < v.size(); i++) { buf.append("->add("+ v.elementAt(i) + ")"); } buf.append("))"); return buf.toString(); } /** Get a string for an expression to generate creating of an AST node * @param str The arguments to the AST constructor */ public String getASTCreateString(GrammarAtom atom, String str) { if ( atom!=null && atom.getASTNodeType() != null ) { // this atom is using a heterogeneous AST type. (and maybe a local // override we can't see at the TokenManager level) // make note of the factory needed to generate it.. // later this is inserted into the initializeFactory method. astTypes.ensureCapacity(atom.getType()); String type = (String)astTypes.elementAt(atom.getType()); if( type == null ) astTypes.setElementAt(atom.getASTNodeType(),atom.getType()); else { // give a warning over action taken if the types are unequal if( ! atom.getASTNodeType().equals(type) ) { antlrTool.warning("Attempt to redefine AST type for "+atom.getText(),grammar.getFilename(),atom.getLine(),atom.getColumn()); antlrTool.warning(" from \""+type+"\" to \""+atom.getASTNodeType()+"\" sticking to \""+type+"\"",grammar.getFilename(),atom.getLine(),atom.getColumn()); } else astTypes.setElementAt(atom.getASTNodeType(),atom.getType()); } // after above init the factory knows what to generate... return "astFactory->create("+str+")"; } else { // FIXME: This is *SO* ugly! but it will have to do for now... // 2.7.2 will have better I hope // this is due to the usage of getASTCreateString from inside // actions/cpp/action.g boolean is_constructor = false; if( str.indexOf(',') != -1 ) is_constructor = grammar.tokenManager.tokenDefined(str.substring(0,str.indexOf(','))); // System.out.println("getAstCreateString(as): "+str+" "+grammar.tokenManager.tokenDefined(str)); if( usingCustomAST && (grammar instanceof TreeWalkerGrammar) && !(grammar.tokenManager.tokenDefined(str) ) && ! is_constructor ) return "astFactory->create("+namespaceAntlr+"RefAST("+str+"))"; else return "astFactory->create("+str+")"; } } /** Get a string for an expression to generate creating of an AST node * @param str The arguments to the AST constructor */ public String getASTCreateString(String str) { // System.out.println("getAstCreateString(str): "+str+" "+grammar.tokenManager.tokenDefined(str)); if( usingCustomAST ) return labeledElementASTType+"(astFactory->create("+namespaceAntlr+"RefAST("+str+")))"; else return "astFactory->create("+str+")"; } protected String getLookaheadTestExpression(Lookahead[] look, int k) { StringBuffer e = new StringBuffer(100); boolean first = true; e.append("("); for (int i = 1; i <= k; i++) { BitSet p = look[i].fset; if (!first) { e.append(") && ("); } first = false; // Syn preds can yield (epsilon) lookahead. // There is no way to predict what that token would be. Just // allow anything instead. if (look[i].containsEpsilon()) { e.append("true"); } else { e.append(getLookaheadTestTerm(i, p)); } } e.append(")"); return e.toString(); } /** Generate a lookahead test expression for an alternate. This * will be a series of tests joined by '&&' and enclosed by '()', * the number of such tests being determined by the depth of the lookahead. */ protected String getLookaheadTestExpression(Alternative alt, int maxDepth) { int depth = alt.lookaheadDepth; if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) { // if the decision is nondeterministic, do the best we can: LL(k) // any predicates that are around will be generated later. depth = grammar.maxk; } if ( maxDepth==0 ) { // empty lookahead can result from alt with sem pred // that can see end of token. E.g., A : {pred}? ('a')? ; return "true"; } /* boolean first = true; for (int i=1; i<=depth && i<=maxDepth; i++) { BitSet p = alt.cache[i].fset; if (!first) { e.append(") && ("); } first = false; // Syn preds can yield (epsilon) lookahead. // There is no way to predict what that token would be. Just // allow anything instead. if ( alt.cache[i].containsEpsilon() ) { e.append("true"); } else { e.append(getLookaheadTestTerm(i, p)); } } e.append(")"); */ return "(" + getLookaheadTestExpression(alt.cache,depth) + ")"; } /**Generate a depth==1 lookahead test expression given the BitSet. * This may be one of: * 1) a series of 'x==X||' tests * 2) a range test using >= && <= where possible, * 3) a bitset membership test for complex comparisons * @param k The lookahead level * @param p The lookahead set for level k */ protected String getLookaheadTestTerm(int k, BitSet p) { // Determine the name of the item to be compared String ts = lookaheadString(k); // Generate a range expression if possible int[] elems = p.toArray(); if (elementsAreRange(elems)) { return getRangeExpression(k, elems); } // Generate a bitset membership test if possible StringBuffer e; int degree = p.degree(); if ( degree == 0 ) { return "true"; } if (degree >= bitsetTestThreshold) { int bitsetIdx = markBitsetForGen(p); return getBitsetName(bitsetIdx) + ".member(" + ts + ")"; } // Otherwise, generate the long-winded series of "x==X||" tests e = new StringBuffer(); for (int i = 0; i < elems.length; i++) { // Get the compared-to item (token or character value) String cs = getValueString(elems[i]); // Generate the element comparison if( i > 0 ) e.append(" || "); e.append(ts); e.append(" == "); e.append(cs); } return e.toString(); } /** Return an expression for testing a contiguous renage of elements * @param k The lookahead level * @param elems The elements representing the set, usually from BitSet.toArray(). * @return String containing test expression. */ public String getRangeExpression(int k, int[] elems) { if (!elementsAreRange(elems)) { antlrTool.panic("getRangeExpression called with non-range"); } int begin = elems[0]; int end = elems[elems.length-1]; return "(" + lookaheadString(k) + " >= " + getValueString(begin) + " && " + lookaheadString(k) + " <= " + getValueString(end) + ")"; } /** getValueString: get a string representation of a token or char value * @param value The token or char value */ private String getValueString(int value) { String cs; if ( grammar instanceof LexerGrammar ) { cs = charFormatter.literalChar(value); } else { TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt(value); if ( ts == null ) { return ""+value; // return token type as string // tool.panic("vocabulary for token type " + value + " is null"); } String tId = ts.getId(); if ( ts instanceof StringLiteralSymbol ) { // if string literal, use predefined label if any // if no predefined, try to mangle into LITERAL_xxx. // if can't mangle, use int value as last resort StringLiteralSymbol sl = (StringLiteralSymbol)ts; String label = sl.getLabel(); if ( label!=null ) { cs = label; } else { cs = mangleLiteral(tId); if (cs == null) { cs = String.valueOf(value); } } } else { if ( tId.equals("EOF") ) cs = namespaceAntlr+"Token::EOF_TYPE"; else cs = tId; } } return cs; } /**Is the lookahead for this alt empty? */ protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) { int depth = alt.lookaheadDepth; if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) { depth = grammar.maxk; } for (int i=1; i<=depth && i<=maxDepth; i++) { BitSet p = alt.cache[i].fset; if (p.degree() != 0) { return false; } } return true; } private String lookaheadString(int k) { if (grammar instanceof TreeWalkerGrammar) { return "_t->getType()"; } return "LA(" + k + ")"; } /** Mangle a string literal into a meaningful token name. This is * only possible for literals that are all characters. The resulting * mangled literal name is literalsPrefix with the text of the literal * appended. * @return A string representing the mangled literal, or null if not possible. */ private String mangleLiteral(String s) { String mangled = antlrTool.literalsPrefix; for (int i = 1; i < s.length()-1; i++) { if (!Character.isLetter(s.charAt(i)) && s.charAt(i) != '_') { return null; } mangled += s.charAt(i); } if ( antlrTool.upperCaseMangledLiterals ) { mangled = mangled.toUpperCase(); } return mangled; } /** Map an identifier to it's corresponding tree-node variable. * This is context-sensitive, depending on the rule and alternative * being generated * @param idParam The identifier name to map * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates */ public String mapTreeId(String idParam, ActionTransInfo transInfo) { // if not in an action of a rule, nothing to map. if ( currentRule==null ) return idParam; // System.out.print("mapTreeId: "+idParam+" "+currentRule.getRuleName()+" "); boolean in_var = false; String id = idParam; if (grammar instanceof TreeWalkerGrammar) { // RK: hmmm this seems odd. If buildAST is false it translates // #rulename_in to 'rulename_in' else to 'rulename_AST_in' which indeed // exists. disabling for now.. and hope it doesn't blow up somewhere. if ( !grammar.buildAST ) { in_var = true; // System.out.println("in_var1"); } // If the id ends with "_in", then map it to the input variable // else if (id.length() > 3 && id.lastIndexOf("_in") == id.length()-3) { // Strip off the "_in" id = id.substring(0, id.length()-3); in_var = true; // System.out.println("in_var2"); } } // System.out.print(in_var+"\t"); // Check the rule labels. If id is a label, then the output // variable is label_AST, and the input variable is plain label. for (int i = 0; i < currentRule.labeledElements.size(); i++) { AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i); if (elt.getLabel().equals(id)) { // if( in_var ) // System.out.println("returning (vec) "+(in_var ? id : id + "_AST")); return in_var ? id : id + "_AST"; } } // Failing that, check the id-to-variable map for the alternative. // If the id is in the map, then output variable is the name in the // map, and input variable is name_in String s = (String)treeVariableMap.get(id); if (s != null) { if (s == NONUNIQUE) { // if( in_var ) // System.out.println("returning null (nonunique)"); // There is more than one element with this id antlrTool.error("Ambiguous reference to AST element "+id+ " in rule "+currentRule.getRuleName()); return null; } else if (s.equals(currentRule.getRuleName())) { // a recursive call to the enclosing rule is // ambiguous with the rule itself. // if( in_var ) // System.out.println("returning null (rulename)"); antlrTool.error("Ambiguous reference to AST element "+id+ " in rule "+currentRule.getRuleName()); return null; } else { // if( in_var ) // System.out.println("returning "+(in_var?s+"_in":s)); return in_var ? s + "_in" : s; } } // System.out.println("Last check: "+id+" == "+currentRule.getRuleName()); // Failing that, check the rule name itself. Output variable // is rule_AST; input variable is rule_AST_in (treeparsers). if( id.equals(currentRule.getRuleName()) ) { String r = in_var ? id + "_AST_in" : id + "_AST"; if ( transInfo!=null ) { if ( !in_var ) { transInfo.refRuleRoot = r; } } // if( in_var ) // System.out.println("returning (r) "+r); return r; } else { // if( in_var ) // System.out.println("returning (last) "+id); // id does not map to anything -- return itself. return id; } } /** Given an element and the name of an associated AST variable, * create a mapping between the element "name" and the variable name. */ private void mapTreeVariable(AlternativeElement e, String name) { // For tree elements, defer to the root if (e instanceof TreeElement) { mapTreeVariable( ((TreeElement)e).root, name); return; } // Determine the name of the element, if any, for mapping purposes String elName = null; // Don't map labeled items if (e.getLabel() == null) { if (e instanceof TokenRefElement) { // use the token id elName = ((TokenRefElement)e).atomText; } else if (e instanceof RuleRefElement) { // use the rule name elName = ((RuleRefElement)e).targetRule; } } // Add the element to the tree variable map if it has a name if (elName != null) { if (treeVariableMap.get(elName) != null) { // Name is already in the map -- mark it as duplicate treeVariableMap.remove(elName); treeVariableMap.put(elName, NONUNIQUE); } else { treeVariableMap.put(elName, name); } } } /** Lexically process tree-specifiers in the action. * This will replace #id and #(...) with the appropriate * function calls and/or variables. */ protected String processActionForSpecialSymbols(String actionStr, int line, RuleBlock currentRule, ActionTransInfo tInfo) { if ( actionStr==null || actionStr.length()==0 ) return null; // The action trans info tells us (at the moment) whether an // assignment was done to the rule's tree root. if (grammar==null) return actionStr; if ((grammar.buildAST && actionStr.indexOf('#') != -1) || grammar instanceof TreeWalkerGrammar || ((grammar instanceof LexerGrammar || grammar instanceof ParserGrammar) && actionStr.indexOf('$') != -1) ) { // Create a lexer to read an action and return the translated version antlr.actions.cpp.ActionLexer lexer = new antlr.actions.cpp.ActionLexer(actionStr, currentRule, this, tInfo); lexer.setLineOffset(line); lexer.setFilename(grammar.getFilename()); lexer.setTool(antlrTool); try { lexer.mACTION(true); actionStr = lexer.getTokenObject().getText(); // System.out.println("action translated: "+actionStr); // System.out.println("trans info is "+tInfo); } catch (RecognitionException ex) { lexer.reportError(ex); return actionStr; } catch (TokenStreamException tex) { antlrTool.panic("Error reading action:"+actionStr); return actionStr; } catch (CharStreamException io) { antlrTool.panic("Error reading action:"+actionStr); return actionStr; } } return actionStr; } private String fixNameSpaceOption( String ns ) { ns = StringUtils.stripFrontBack(ns,"\"","\""); if( ns.length() > 2 && !ns.substring(ns.length()-2, ns.length()).equals("::") ) ns += "::"; return ns; } private void setupGrammarParameters(Grammar g) { if (g instanceof ParserGrammar || g instanceof LexerGrammar || g instanceof TreeWalkerGrammar ) { /* RK: options also have to be added to Grammar.java and for options * on the file level entries have to be defined in * DefineGrammarSymbols.java and passed around via 'globals' in * antlrTool.java */ if( antlrTool.nameSpace != null ) nameSpace = antlrTool.nameSpace; if( antlrTool.namespaceStd != null ) namespaceStd = fixNameSpaceOption(antlrTool.namespaceStd); if( antlrTool.namespaceAntlr != null ) namespaceAntlr = fixNameSpaceOption(antlrTool.namespaceAntlr); genHashLines = antlrTool.genHashLines; /* let grammar level options override filelevel ones... */ if( g.hasOption("namespace") ) { Token t = g.getOption("namespace"); if( t != null ) { nameSpace = new NameSpace(t.getText()); } } if( g.hasOption("namespaceAntlr") ) { Token t = g.getOption("namespaceAntlr"); if( t != null ) { String ns = StringUtils.stripFrontBack(t.getText(),"\"","\""); if ( ns != null ) { if( ns.length() > 2 && !ns.substring(ns.length()-2, ns.length()).equals("::") ) ns += "::"; namespaceAntlr = ns; } } } if( g.hasOption("namespaceStd") ) { Token t = g.getOption("namespaceStd"); if( t != null ) { String ns = StringUtils.stripFrontBack(t.getText(),"\"","\""); if ( ns != null ) { if( ns.length() > 2 && !ns.substring(ns.length()-2, ns.length()).equals("::") ) ns += "::"; namespaceStd = ns; } } } if( g.hasOption("genHashLines") ) { Token t = g.getOption("genHashLines"); if( t != null ) { String val = StringUtils.stripFrontBack(t.getText(),"\"","\""); genHashLines = val.equals("true"); } } noConstructors = antlrTool.noConstructors; // get the default if( g.hasOption("noConstructors") ) { Token t = g.getOption("noConstructors"); if( (t != null) && !(t.getText().equals("true") || t.getText().equals("false"))) antlrTool.error("noConstructors option must be true or false", antlrTool.getGrammarFile(), t.getLine(), t.getColumn()); noConstructors = t.getText().equals("true"); } } if (g instanceof ParserGrammar) { labeledElementASTType = namespaceAntlr+"RefAST"; labeledElementASTInit = namespaceAntlr+"nullAST"; if ( g.hasOption("ASTLabelType") ) { Token tsuffix = g.getOption("ASTLabelType"); if ( tsuffix != null ) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\""); if ( suffix != null ) { usingCustomAST = true; labeledElementASTType = suffix; labeledElementASTInit = suffix+"("+namespaceAntlr+"nullAST)"; } } } labeledElementType = namespaceAntlr+"RefToken "; labeledElementInit = namespaceAntlr+"nullToken"; commonExtraArgs = ""; commonExtraParams = ""; commonLocalVars = ""; lt1Value = "LT(1)"; exceptionThrown = namespaceAntlr+"RecognitionException"; throwNoViable = "throw "+namespaceAntlr+"NoViableAltException(LT(1), getFilename());"; } else if (g instanceof LexerGrammar) { labeledElementType = "char "; labeledElementInit = "'\\0'"; commonExtraArgs = ""; commonExtraParams = "bool _createToken"; commonLocalVars = "int _ttype; "+namespaceAntlr+"RefToken _token; "+namespaceStd+"string::size_type _begin = text.length();"; lt1Value = "LA(1)"; exceptionThrown = namespaceAntlr+"RecognitionException"; throwNoViable = "throw "+namespaceAntlr+"NoViableAltForCharException(LA(1), getFilename(), getLine(), getColumn());"; } else if (g instanceof TreeWalkerGrammar) { labeledElementInit = namespaceAntlr+"nullAST"; labeledElementASTInit = namespaceAntlr+"nullAST"; labeledElementASTType = namespaceAntlr+"RefAST"; labeledElementType = namespaceAntlr+"RefAST"; commonExtraParams = namespaceAntlr+"RefAST _t"; throwNoViable = "throw "+namespaceAntlr+"NoViableAltException(_t);"; lt1Value = "_t"; if ( g.hasOption("ASTLabelType") ) { Token tsuffix = g.getOption("ASTLabelType"); if ( tsuffix != null ) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\""); if ( suffix != null ) { usingCustomAST = true; labeledElementASTType = suffix; labeledElementType = suffix; labeledElementInit = suffix+"("+namespaceAntlr+"nullAST)"; labeledElementASTInit = labeledElementInit; commonExtraParams = suffix+" _t"; throwNoViable = "throw "+namespaceAntlr+"NoViableAltException("+namespaceAntlr+"RefAST(_t));"; lt1Value = "_t"; } } } if ( !g.hasOption("ASTLabelType") ) { g.setOption("ASTLabelType", new Token(ANTLRTokenTypes.STRING_LITERAL,namespaceAntlr+"RefAST")); } commonExtraArgs = "_t"; commonLocalVars = ""; exceptionThrown = namespaceAntlr+"RecognitionException"; } else { antlrTool.panic("Unknown grammar type"); } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/DefaultFileLineFormatter.java000066400000000000000000000014651161462365500266470ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/DefaultFileLineFormatter.java#1 $ */ public class DefaultFileLineFormatter extends FileLineFormatter { public String getFormatString(String fileName, int line, int column) { StringBuffer buf = new StringBuffer(); if (fileName != null) buf.append(fileName + ":"); if (line != -1) { if (fileName == null) buf.append("line "); buf.append(line); if (column != -1) buf.append(":" + column); buf.append(":"); } buf.append(" "); return buf.toString(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/DefaultToolErrorHandler.java000066400000000000000000000104221161462365500265120ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/DefaultToolErrorHandler.java#1 $ */ import antlr.collections.impl.BitSet; class DefaultToolErrorHandler implements ToolErrorHandler { DefaultToolErrorHandler(antlr.Tool tool) { antlrTool = tool; } private final antlr.Tool antlrTool; CharFormatter javaCharFormatter = new JavaCharFormatter(); /** Dump token/character sets to a string array suitable for * {@link antlr.Tool.warning(String[], String, int, int) * @param output The array that will contain the token/character set dump, * one element per k (lookahead) value * @param outputStartIndex The index into output that the * dump should start at. * @param lexicalAnalysis true for lexical rule * @param depth The depth of the ambiguity * @param sets An array of bitsets containing the ambiguities */ private void dumpSets(String[] output, int outputStartIndex, Grammar grammar, boolean lexicalAnalysis, int depth, Lookahead[] sets) { StringBuffer line = new StringBuffer(100); for (int i = 1; i <= depth; i++) { line.append("k==").append(i).append(':'); if (lexicalAnalysis) { String bits = sets[i].fset.toStringWithRanges(",", javaCharFormatter); if (sets[i].containsEpsilon()) { line.append(""); if (bits.length() > 0) { line.append(','); } } line.append(bits); } else { line.append(sets[i].fset.toString(",", grammar.tokenManager.getVocabulary())); } output[outputStartIndex++] = line.toString(); line.setLength(0); } } /** Issue a warning about ambiguity between a alternates * @param blk The block being analyzed * @param lexicalAnalysis true for lexical rule * @param depth The depth of the ambiguity * @param sets An array of bitsets containing the ambiguities * @param altIdx1 The zero-based index of the first ambiguous alternative * @param altIdx2 The zero-based index of the second ambiguous alternative */ public void warnAltAmbiguity(Grammar grammar, AlternativeBlock blk, boolean lexicalAnalysis, int depth, Lookahead[] sets, int altIdx1, int altIdx2) { final StringBuffer line = new StringBuffer(100); if (blk instanceof RuleBlock && ((RuleBlock)blk).isLexerAutoGenRule()) { Alternative ai = blk.getAlternativeAt(altIdx1); Alternative aj = blk.getAlternativeAt(altIdx2); RuleRefElement rri = (RuleRefElement)ai.head; RuleRefElement rrj = (RuleRefElement)aj.head; String ri = CodeGenerator.reverseLexerRuleName(rri.targetRule); String rj = CodeGenerator.reverseLexerRuleName(rrj.targetRule); line.append("lexical nondeterminism between rules "); line.append(ri).append(" and ").append(rj).append(" upon"); } else { if (lexicalAnalysis) { line.append("lexical "); } line.append("nondeterminism between alts "); line.append(altIdx1 + 1).append(" and "); line.append(altIdx2 + 1).append(" of block upon"); } final String [] output = new String [depth + 1];; output[0] = line.toString(); dumpSets(output, 1, grammar, lexicalAnalysis, depth, sets); antlrTool.warning(output, grammar.getFilename(), blk.getLine(), blk.getColumn()); } /** Issue a warning about ambiguity between an alternate and exit path. * @param blk The block being analyzed * @param lexicalAnalysis true for lexical rule * @param depth The depth of the ambiguity * @param sets An array of bitsets containing the ambiguities * @param altIdx The zero-based index of the ambiguous alternative */ public void warnAltExitAmbiguity(Grammar grammar, BlockWithImpliedExitPath blk, boolean lexicalAnalysis, int depth, Lookahead[] sets, int altIdx ) { String [] output = new String[depth + 2]; output[0] = (lexicalAnalysis ? "lexical " : "") + "nondeterminism upon"; dumpSets(output, 1, grammar, lexicalAnalysis, depth, sets); output[depth + 1] = "between alt " + (altIdx + 1) + " and exit branch of block"; antlrTool.warning(output, grammar.getFilename(), blk.getLine(), blk.getColumn()); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/DefineGrammarSymbols.java000066400000000000000000001000351161462365500260320ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/DefineGrammarSymbols.java#1 $ */ import java.util.Hashtable; import antlr.collections.impl.BitSet; /**DefineGrammarSymbols is a behavior for the ANTLRParser that adds all * the token and rule symbols to the grammar symbol table. * * Token types are assigned to token symbols in this class also. * The token type for a token is done in the order seen (lexically). */ public class DefineGrammarSymbols implements ANTLRGrammarParseBehavior { // Contains all of the defined parser and lexer Grammar's indexed by name protected Hashtable grammars = new Hashtable(); // Contains all the TokenManagers indexed by name protected Hashtable tokenManagers = new Hashtable(); // Current grammar (parser or lexer) protected Grammar grammar; // The tool under which this is invoked protected Tool tool; // The grammar analyzer object LLkAnalyzer analyzer; // The command-line arguments passed to the tool. // This allows each grammar to parse the arguments as it is created String[] args; // Name for default token manager does not match any valid name static final String DEFAULT_TOKENMANAGER_NAME = "*default"; // Header actions apply to all parsers unless redefined // Contains all of the header actions indexed by name protected Hashtable headerActions = new Hashtable(); // Place where preamble is stored until a grammar is defined Token thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); // init to empty token // The target language String language = "Java"; protected int numLexers = 0; protected int numParsers = 0; protected int numTreeParsers = 0; public DefineGrammarSymbols(Tool tool_, String[] args_, LLkAnalyzer analyzer_) { tool = tool_; args = args_; analyzer = analyzer_; } public void _refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) { if (!(grammar instanceof LexerGrammar)) { // String literals are treated like tokens except by the lexer String str = lit.getText(); if (grammar.tokenManager.getTokenSymbol(str) != null) { // string symbol is already defined return; } StringLiteralSymbol sl = new StringLiteralSymbol(str); int tt = grammar.tokenManager.nextTokenType(); sl.setTokenType(tt); grammar.tokenManager.define(sl); } } /** Reference a token */ public void _refToken(Token assignId, Token t, Token label, Token args, boolean inverted, int autoGenType, boolean lastInRule) { String id = t.getText(); if (!grammar.tokenManager.tokenDefined(id)) { /* // RK: dish out a warning if the token was not defined before. tool.warning("Token '" + id + "' defined outside tokens section", tool.grammarFile, t.getLine(), t.getColumn()); */ int tt = grammar.tokenManager.nextTokenType(); TokenSymbol ts = new TokenSymbol(id); ts.setTokenType(tt); grammar.tokenManager.define(ts); } } /** Abort the processing of a grammar due to syntax errors */ public void abortGrammar() { if (grammar != null && grammar.getClassName() != null) { grammars.remove(grammar.getClassName()); } grammar = null; } public void beginAlt(boolean doAST_) { } public void beginChildList() { } // Exception handling public void beginExceptionGroup() { } public void beginExceptionSpec(Token label) { } public void beginSubRule(Token label, Token start, boolean not) { } public void beginTree(Token tok) throws SemanticException { } /** Define a lexer or parser rule */ public void defineRuleName(Token r, String access, boolean ruleAutoGen, String docComment) throws SemanticException { String id = r.getText(); // if ( Character.isUpperCase(id.charAt(0)) ) { if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule id = CodeGenerator.encodeLexerRuleName(id); // make sure we define it as token identifier also if (!grammar.tokenManager.tokenDefined(r.getText())) { int tt = grammar.tokenManager.nextTokenType(); TokenSymbol ts = new TokenSymbol(r.getText()); ts.setTokenType(tt); grammar.tokenManager.define(ts); } } RuleSymbol rs; if (grammar.isDefined(id)) { // symbol seen before? rs = (RuleSymbol)grammar.getSymbol(id); // rule just referenced or has it been defined yet? if (rs.isDefined()) { tool.error("redefinition of rule " + id, grammar.getFilename(), r.getLine(), r.getColumn()); } } else { rs = new RuleSymbol(id); grammar.define(rs); } rs.setDefined(); rs.access = access; rs.comment = docComment; } /** Define a token from tokens {...}. * Must be label and literal or just label or just a literal. */ public void defineToken(Token tokname, Token tokliteral) { String name = null; String literal = null; if (tokname != null) { name = tokname.getText(); } if (tokliteral != null) { literal = tokliteral.getText(); } // System.out.println("defining " + name + " with literal " + literal); // if (literal != null) { StringLiteralSymbol sl = (StringLiteralSymbol)grammar.tokenManager.getTokenSymbol(literal); if (sl != null) { // This literal is known already. // If the literal has no label already, but we can provide // one here, then no problem, just map the label to the literal // and don't change anything else. // Otherwise, labels conflict: error. if (name == null || sl.getLabel() != null) { tool.warning("Redefinition of literal in tokens {...}: " + literal, grammar.getFilename(), tokliteral.getLine(), tokliteral.getColumn()); return; } else if (name != null) { // The literal had no label, but new def does. Set it. sl.setLabel(name); // Also, map the label to the literal. grammar.tokenManager.mapToTokenSymbol(name, sl); } } // if they provide a name/label and that name/label already // exists, just hook this literal onto old token. if (name != null) { TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(name); if (ts != null) { // watch out that the label is not more than just a token. // If it already has a literal attached, then: conflict. if (ts instanceof StringLiteralSymbol) { tool.warning("Redefinition of token in tokens {...}: " + name, grammar.getFilename(), tokliteral.getLine(), tokliteral.getColumn()); return; } // a simple token symbol such as DECL is defined // must convert it to a StringLiteralSymbol with a // label by co-opting token type and killing old // TokenSymbol. Kill mapping and entry in vector // of token manager. // First, claim token type. int ttype = ts.getTokenType(); // now, create string literal with label sl = new StringLiteralSymbol(literal); sl.setTokenType(ttype); sl.setLabel(name); // redefine this critter as a string literal grammar.tokenManager.define(sl); // make sure the label can be used also. grammar.tokenManager.mapToTokenSymbol(name, sl); return; } // here, literal was labeled but not by a known token symbol. } sl = new StringLiteralSymbol(literal); int tt = grammar.tokenManager.nextTokenType(); sl.setTokenType(tt); sl.setLabel(name); grammar.tokenManager.define(sl); if (name != null) { // make the label point at token symbol too grammar.tokenManager.mapToTokenSymbol(name, sl); } } // create a token in the token manager not a literal else { if (grammar.tokenManager.tokenDefined(name)) { tool.warning("Redefinition of token in tokens {...}: " + name, grammar.getFilename(), tokname.getLine(), tokname.getColumn()); return; } int tt = grammar.tokenManager.nextTokenType(); TokenSymbol ts = new TokenSymbol(name); ts.setTokenType(tt); grammar.tokenManager.define(ts); } } public void endAlt() { } public void endChildList() { } public void endExceptionGroup() { } public void endExceptionSpec() { } public void endGrammar() { } /** Called after the optional options section, to compensate for * options that may not have been set. * This method is bigger than it needs to be, but is much more * clear if I delineate all the cases. */ public void endOptions() { // NO VOCAB OPTIONS if (grammar.exportVocab == null && grammar.importVocab == null) { grammar.exportVocab = grammar.getClassName(); // Can we get initial vocab from default shared vocab? if (tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) { // Use the already-defined token manager grammar.exportVocab = DEFAULT_TOKENMANAGER_NAME; TokenManager tm = (TokenManager)tokenManagers.get(DEFAULT_TOKENMANAGER_NAME); // System.out.println("No tokenVocabulary for '" + grammar.getClassName() + "', using default '" + tm.getName() + "'"); grammar.setTokenManager(tm); return; } // no shared vocab for file, make new one // System.out.println("No exportVocab for '" + grammar.getClassName() + "', creating default '" + grammar.exportVocab + "'"); TokenManager tm = new SimpleTokenManager(grammar.exportVocab, tool); grammar.setTokenManager(tm); // Add the token manager to the list of token managers tokenManagers.put(grammar.exportVocab, tm); // no default vocab, so make this the default vocab tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm); return; } // NO OUTPUT, BUT HAS INPUT VOCAB if (grammar.exportVocab == null && grammar.importVocab != null) { grammar.exportVocab = grammar.getClassName(); // first make sure input!=output if (grammar.importVocab.equals(grammar.exportVocab)) { tool.warning("Grammar " + grammar.getClassName() + " cannot have importVocab same as default output vocab (grammar name); ignored."); // kill importVocab option and try again: use default vocab grammar.importVocab = null; endOptions(); return; } // check to see if the vocab is already in memory // (defined by another grammar in the file). Not normal situation. if (tokenManagers.containsKey(grammar.importVocab)) { // make a copy since we'll be generating a new output vocab // and we don't want to affect this one. Set the name to // the default output vocab==classname. TokenManager tm = (TokenManager)tokenManagers.get(grammar.importVocab); // System.out.println("Duping importVocab of " + grammar.importVocab); TokenManager dup = (TokenManager)tm.clone(); dup.setName(grammar.exportVocab); // System.out.println("Setting name to " + grammar.exportVocab); dup.setReadOnly(false); grammar.setTokenManager(dup); tokenManagers.put(grammar.exportVocab, dup); return; } // System.out.println("reading in vocab "+grammar.importVocab); // Must be a file, go get it. ImportVocabTokenManager tm = new ImportVocabTokenManager(grammar, grammar.importVocab + CodeGenerator.TokenTypesFileSuffix + CodeGenerator.TokenTypesFileExt, grammar.exportVocab, tool); tm.setReadOnly(false); // since renamed, can write out // Add this token manager to the list so its tokens will be generated tokenManagers.put(grammar.exportVocab, tm); // System.out.println("vocab renamed to default output vocab of "+tm.getName()); // Assign the token manager to this grammar. grammar.setTokenManager(tm); // set default vocab if none if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) { tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm); } return; } // OUTPUT VOCAB, BUT NO INPUT VOCAB if (grammar.exportVocab != null && grammar.importVocab == null) { // share with previous vocab if it exists if (tokenManagers.containsKey(grammar.exportVocab)) { // Use the already-defined token manager TokenManager tm = (TokenManager)tokenManagers.get(grammar.exportVocab); // System.out.println("Sharing exportVocab of " + grammar.exportVocab); grammar.setTokenManager(tm); return; } // create new output vocab // System.out.println("Creating exportVocab " + grammar.exportVocab); TokenManager tm = new SimpleTokenManager(grammar.exportVocab, tool); grammar.setTokenManager(tm); // Add the token manager to the list of token managers tokenManagers.put(grammar.exportVocab, tm); // set default vocab if none if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) { tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm); } return; } // BOTH INPUT AND OUTPUT VOCAB if (grammar.exportVocab != null && grammar.importVocab != null) { // don't want input==output if (grammar.importVocab.equals(grammar.exportVocab)) { tool.error("exportVocab of " + grammar.exportVocab + " same as importVocab; probably not what you want"); } // does the input vocab already exist in memory? if (tokenManagers.containsKey(grammar.importVocab)) { // make a copy since we'll be generating a new output vocab // and we don't want to affect this one. TokenManager tm = (TokenManager)tokenManagers.get(grammar.importVocab); // System.out.println("Duping importVocab of " + grammar.importVocab); TokenManager dup = (TokenManager)tm.clone(); dup.setName(grammar.exportVocab); // System.out.println("Setting name to " + grammar.exportVocab); dup.setReadOnly(false); grammar.setTokenManager(dup); tokenManagers.put(grammar.exportVocab, dup); return; } // Must be a file, go get it. ImportVocabTokenManager tm = new ImportVocabTokenManager(grammar, grammar.importVocab + CodeGenerator.TokenTypesFileSuffix + CodeGenerator.TokenTypesFileExt, grammar.exportVocab, tool); tm.setReadOnly(false); // write it out as we've changed name // Add this token manager to the list so its tokens will be generated tokenManagers.put(grammar.exportVocab, tm); // Assign the token manager to this grammar. grammar.setTokenManager(tm); // set default vocab if none if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) { tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm); } return; } } public void endRule(String r) { } public void endSubRule() { } public void endTree() { } public void hasError() { } public void noASTSubRule() { } public void oneOrMoreSubRule() { } public void optionalSubRule() { } public void setUserExceptions(String thr) { } public void refAction(Token action) { } public void refArgAction(Token action) { } public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) { } public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) { } public void refElementOption(Token option, Token value) { } public void refTokensSpecElementOption(Token tok, Token option, Token value) { } public void refExceptionHandler(Token exTypeAndName, Token action) { } // Header action applies to all parsers and lexers. public void refHeaderAction(Token name, Token act) { String key; if (name == null) key = ""; else key = StringUtils.stripFrontBack(name.getText(), "\"", "\""); // FIXME: depending on the mode the inserted header actions should // be checked for sanity. if (headerActions.containsKey(key)) { if (key.equals("")) tool.error(act.getLine() + ": header action already defined"); else tool.error(act.getLine() + ": header action '" + key + "' already defined"); } headerActions.put(key, act); } public String getHeaderAction(String name) { Token t = (Token)headerActions.get(name); if (t == null) { return ""; } return t.getText(); } public void refInitAction(Token action) { } public void refMemberAction(Token act) { } public void refPreambleAction(Token act) { thePreambleAction = act; } public void refReturnAction(Token returnAction) { } public void refRule(Token idAssign, Token r, Token label, Token args, int autoGenType) { String id = r.getText(); // if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule? if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule? id = CodeGenerator.encodeLexerRuleName(id); } if (!grammar.isDefined(id)) { grammar.define(new RuleSymbol(id)); } } public void refSemPred(Token pred) { } public void refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) { _refStringLiteral(lit, label, autoGenType, lastInRule); } /** Reference a token */ public void refToken(Token assignId, Token t, Token label, Token args, boolean inverted, int autoGenType, boolean lastInRule) { _refToken(assignId, t, label, args, inverted, autoGenType, lastInRule); } public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) { // ensure that the DefineGrammarSymbols methods are called; otherwise a range addes more // token refs to the alternative by calling MakeGrammar.refToken etc... if (t1.getText().charAt(0) == '"') { refStringLiteral(t1, null, GrammarElement.AUTO_GEN_NONE, lastInRule); } else { _refToken(null, t1, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule); } if (t2.getText().charAt(0) == '"') { _refStringLiteral(t2, null, GrammarElement.AUTO_GEN_NONE, lastInRule); } else { _refToken(null, t2, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule); } } public void refTreeSpecifier(Token treeSpec) { } public void refWildcard(Token t, Token label, int autoGenType) { } /** Get ready to process a new grammar */ public void reset() { grammar = null; } public void setArgOfRuleRef(Token argaction) { } /** Set the character vocabulary for a lexer */ public void setCharVocabulary(BitSet b) { // grammar should enforce that this is only called for lexer ((LexerGrammar)grammar).setCharVocabulary(b); } /** setFileOption: Associate an option value with a key. * This applies to options for an entire grammar file. * @param key The token containing the option name * @param value The token containing the option value. */ public void setFileOption(Token key, Token value, String filename) { if (key.getText().equals("language")) { if (value.getType() == ANTLRParser.STRING_LITERAL) { language = StringUtils.stripBack(StringUtils.stripFront(value.getText(), '"'), '"'); } else if (value.getType() == ANTLRParser.TOKEN_REF || value.getType() == ANTLRParser.RULE_REF) { language = value.getText(); } else { tool.error("language option must be string or identifier", filename, value.getLine(), value.getColumn()); } } else if (key.getText().equals("mangleLiteralPrefix")) { if (value.getType() == ANTLRParser.STRING_LITERAL) { tool.literalsPrefix = StringUtils.stripFrontBack(value.getText(), "\"", "\""); } else { tool.error("mangleLiteralPrefix option must be string", filename, value.getLine(), value.getColumn()); } } else if (key.getText().equals("upperCaseMangledLiterals")) { if (value.getText().equals("true")) { tool.upperCaseMangledLiterals = true; } else if (value.getText().equals("false")) { tool.upperCaseMangledLiterals = false; } else { grammar.antlrTool.error("Value for upperCaseMangledLiterals must be true or false", filename, key.getLine(), key.getColumn()); } } else if ( key.getText().equals("namespaceStd") || key.getText().equals("namespaceAntlr") || key.getText().equals("genHashLines") ) { if (!language.equals("Cpp")) { tool.error(key.getText() + " option only valid for C++", filename, key.getLine(), key.getColumn()); } else { if (key.getText().equals("noConstructors")) { if (!(value.getText().equals("true") || value.getText().equals("false"))) tool.error("noConstructors option must be true or false", filename, value.getLine(), value.getColumn()); tool.noConstructors = value.getText().equals("true"); } else if (key.getText().equals("genHashLines")) { if (!(value.getText().equals("true") || value.getText().equals("false"))) tool.error("genHashLines option must be true or false", filename, value.getLine(), value.getColumn()); tool.genHashLines = value.getText().equals("true"); } else { if (value.getType() != ANTLRParser.STRING_LITERAL) { tool.error(key.getText() + " option must be a string", filename, value.getLine(), value.getColumn()); } else { if (key.getText().equals("namespaceStd")) tool.namespaceStd = value.getText(); else if (key.getText().equals("namespaceAntlr")) tool.namespaceAntlr = value.getText(); } } } } else if ( key.getText().equals("namespace") ) { if ( !language.equals("Cpp") && !language.equals("CSharp") && !language.equals("Boo")) { tool.error(key.getText() + " option only valid for C++ and C# (a.k.a CSharp)", filename, key.getLine(), key.getColumn()); } else { if (value.getType() != ANTLRParser.STRING_LITERAL) { tool.error(key.getText() + " option must be a string", filename, value.getLine(), value.getColumn()); } else { if (key.getText().equals("namespace")) tool.setNameSpace(value.getText()); } } } else { tool.error("Invalid file-level option: " + key.getText(), filename, key.getLine(), value.getColumn()); } } /** setGrammarOption: Associate an option value with a key. * This function forwards to Grammar.setOption for some options. * @param key The token containing the option name * @param value The token containing the option value. */ public void setGrammarOption(Token key, Token value) { if (key.getText().equals("tokdef") || key.getText().equals("tokenVocabulary")) { tool.error("tokdef/tokenVocabulary options are invalid >= ANTLR 2.6.0.\n" + " Use importVocab/exportVocab instead. Please see the documentation.\n" + " The previous options were so heinous that Terence changed the whole\n" + " vocabulary mechanism; it was better to change the names rather than\n" + " subtly change the functionality of the known options. Sorry!", grammar.getFilename(), value.getLine(), value.getColumn()); } else if (key.getText().equals("literal") && grammar instanceof LexerGrammar) { tool.error("the literal option is invalid >= ANTLR 2.6.0.\n" + " Use the \"tokens {...}\" mechanism instead.", grammar.getFilename(), value.getLine(), value.getColumn()); } else if (key.getText().equals("exportVocab")) { // Set the token manager associated with the parser if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) { grammar.exportVocab = value.getText(); } else { tool.error("exportVocab must be an identifier", grammar.getFilename(), value.getLine(), value.getColumn()); } } else if (key.getText().equals("importVocab")) { if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) { grammar.importVocab = value.getText(); } else { tool.error("importVocab must be an identifier", grammar.getFilename(), value.getLine(), value.getColumn()); } } else { // Forward all unrecognized options to the grammar grammar.setOption(key.getText(), value); } } public void setRuleOption(Token key, Token value) { } public void setSubruleOption(Token key, Token value) { } /** Start a new lexer */ public void startLexer(String file, Token name, String superClass, String doc) { if (numLexers > 0) { tool.panic("You may only have one lexer per grammar file: class " + name.getText()); } numLexers++; reset(); //System.out.println("Processing lexer '" + name.getText() + "'"); // Does the lexer already exist? Grammar g = (Grammar)grammars.get(name); if (g != null) { if (!(g instanceof LexerGrammar)) { tool.panic("'" + name.getText() + "' is already defined as a non-lexer"); } else { tool.panic("Lexer '" + name.getText() + "' is already defined"); } } else { // Create a new lexer grammar LexerGrammar lg = new LexerGrammar(name.getText(), tool, superClass); lg.comment = doc; lg.processArguments(args); lg.setFilename(file); grammars.put(lg.getClassName(), lg); // Use any preamble action lg.preambleAction = thePreambleAction; thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); // This is now the current grammar grammar = lg; } } /** Start a new parser */ public void startParser(String file, Token name, String superClass, String doc) { if (numParsers > 0) { tool.panic("You may only have one parser per grammar file: class " + name.getText()); } numParsers++; reset(); //System.out.println("Processing parser '" + name.getText() + "'"); // Is this grammar already defined? Grammar g = (Grammar)grammars.get(name); if (g != null) { if (!(g instanceof ParserGrammar)) { tool.panic("'" + name.getText() + "' is already defined as a non-parser"); } else { tool.panic("Parser '" + name.getText() + "' is already defined"); } } else { // Create a new grammar grammar = new ParserGrammar(name.getText(), tool, superClass); grammar.comment = doc; grammar.processArguments(args); grammar.setFilename(file); grammars.put(grammar.getClassName(), grammar); // Use any preamble action grammar.preambleAction = thePreambleAction; thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); } } /** Start a new tree-walker */ public void startTreeWalker(String file, Token name, String superClass, String doc) { if (numTreeParsers > 0) { tool.panic("You may only have one tree parser per grammar file: class " + name.getText()); } numTreeParsers++; reset(); //System.out.println("Processing tree-walker '" + name.getText() + "'"); // Is this grammar already defined? Grammar g = (Grammar)grammars.get(name); if (g != null) { if (!(g instanceof TreeWalkerGrammar)) { tool.panic("'" + name.getText() + "' is already defined as a non-tree-walker"); } else { tool.panic("Tree-walker '" + name.getText() + "' is already defined"); } } else { // Create a new grammar grammar = new TreeWalkerGrammar(name.getText(), tool, superClass); grammar.comment = doc; grammar.processArguments(args); grammar.setFilename(file); grammars.put(grammar.getClassName(), grammar); // Use any preamble action grammar.preambleAction = thePreambleAction; thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); } } public void synPred() { } public void zeroOrMoreSubRule() { } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/DiagnosticCodeGenerator.java000066400000000000000000000774621161462365500265270ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/DiagnosticCodeGenerator.java#1 $ */ import java.util.Enumeration; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; import java.io.PrintWriter; //SAS: changed for proper text file io import java.io.IOException; import java.io.FileWriter; /**Generate MyParser.txt, MyLexer.txt and MyParserTokenTypes.txt */ public class DiagnosticCodeGenerator extends CodeGenerator { /** non-zero if inside syntactic predicate generation */ protected int syntacticPredLevel = 0; /** true during lexer generation, false during parser generation */ protected boolean doingLexRules = false; /** Create a Diagnostic code-generator using the given Grammar * The caller must still call setTool, setBehavior, and setAnalyzer * before generating code. */ public DiagnosticCodeGenerator() { super(); charFormatter = new JavaCharFormatter(); } /**Generate the parser, lexer, and token types documentation */ public void gen() { // Do the code generation try { // Loop over all grammars Enumeration grammarIter = behavior.grammars.elements(); while (grammarIter.hasMoreElements()) { Grammar g = (Grammar)grammarIter.nextElement(); // Connect all the components to each other g.setGrammarAnalyzer(analyzer); g.setCodeGenerator(this); analyzer.setGrammar(g); // To get right overloading behavior across hetrogeneous grammars g.generate(); if (antlrTool.hasError()) { antlrTool.panic("Exiting due to errors."); } } // Loop over all token managers (some of which are lexers) Enumeration tmIter = behavior.tokenManagers.elements(); while (tmIter.hasMoreElements()) { TokenManager tm = (TokenManager)tmIter.nextElement(); if (!tm.isReadOnly()) { // Write the token manager tokens as Java genTokenTypes(tm); } } } catch (IOException e) { antlrTool.reportException(e, null); } } /** Generate code for the given grammar element. * @param blk The {...} action to generate */ public void gen(ActionElement action) { if (action.isSemPred) { // handled elsewhere } else { print("ACTION: "); _printAction(action.actionText); } } /** Generate code for the given grammar element. * @param blk The "x|y|z|..." block to generate */ public void gen(AlternativeBlock blk) { println("Start of alternative block."); tabs++; genBlockPreamble(blk); boolean ok = grammar.theLLkAnalyzer.deterministic(blk); if (!ok) { println("Warning: This alternative block is non-deterministic"); } genCommonBlock(blk); tabs--; } /** Generate code for the given grammar element. * @param blk The block-end element to generate. Block-end * elements are synthesized by the grammar parser to represent * the end of a block. */ public void gen(BlockEndElement end) { // no-op } /** Generate code for the given grammar element. * @param blk The character literal reference to generate */ public void gen(CharLiteralElement atom) { print("Match character "); if (atom.not) { _print("NOT "); } _print(atom.atomText); if (atom.label != null) { _print(", label=" + atom.label); } _println(""); } /** Generate code for the given grammar element. * @param blk The character-range reference to generate */ public void gen(CharRangeElement r) { print("Match character range: " + r.beginText + ".." + r.endText); if (r.label != null) { _print(", label = " + r.label); } _println(""); } /** Generate the lexer TXT file */ public void gen(LexerGrammar g) throws IOException { setGrammar(g); antlrTool.reportProgress("Generating " + grammar.getClassName() + TokenTypesFileExt); currentOutput = antlrTool.openOutputFile(grammar.getClassName() + TokenTypesFileExt); //SAS: changed for proper text file io tabs = 0; doingLexRules = true; // Generate header common to all TXT output files genHeader(); // Output the user-defined lexer premamble println(""); println("*** Lexer Preamble Action."); println("This action will appear before the declaration of your lexer class:"); tabs++; println(grammar.preambleAction.getText()); tabs--; println("*** End of Lexer Preamble Action"); // Generate lexer class definition println(""); println("*** Your lexer class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'."); // Generate user-defined parser class members println(""); println("*** User-defined lexer class members:"); println("These are the member declarations that you defined for your class:"); tabs++; printAction(grammar.classMemberAction.getText()); tabs--; println("*** End of user-defined lexer class members"); // Generate string literals println(""); println("*** String literals used in the parser"); println("The following string literals were used in the parser."); println("An actual code generator would arrange to place these literals"); println("into a table in the generated lexer, so that actions in the"); println("generated lexer could match token text against the literals."); println("String literals used in the lexer are not listed here, as they"); println("are incorporated into the mainstream lexer processing."); tabs++; // Enumerate all of the symbols and look for string literal symbols Enumeration ids = grammar.getSymbols(); while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); // Only processing string literals -- reject other symbol entries if (sym instanceof StringLiteralSymbol) { StringLiteralSymbol s = (StringLiteralSymbol)sym; println(s.getId() + " = " + s.getTokenType()); } } tabs--; println("*** End of string literals used by the parser"); // Generate nextToken() rule. // nextToken() is a synthetic lexer rule that is the implicit OR of all // user-defined lexer rules. genNextToken(); // Generate code for each rule in the lexer println(""); println("*** User-defined Lexer rules:"); tabs++; ids = grammar.rules.elements(); while (ids.hasMoreElements()) { RuleSymbol rs = (RuleSymbol)ids.nextElement(); if (!rs.id.equals("mnextToken")) { genRule(rs); } } tabs--; println(""); println("*** End User-defined Lexer rules:"); // Close the lexer output file currentOutput.close(); currentOutput = null; doingLexRules = false; } /** Generate code for the given grammar element. * @param blk The (...)+ block to generate */ public void gen(OneOrMoreBlock blk) { println("Start ONE-OR-MORE (...)+ block:"); tabs++; genBlockPreamble(blk); boolean ok = grammar.theLLkAnalyzer.deterministic(blk); if (!ok) { println("Warning: This one-or-more block is non-deterministic"); } genCommonBlock(blk); tabs--; println("End ONE-OR-MORE block."); } /** Generate the parser TXT file */ public void gen(ParserGrammar g) throws IOException { setGrammar(g); // Open the output stream for the parser and set the currentOutput antlrTool.reportProgress("Generating " + grammar.getClassName() + TokenTypesFileExt); currentOutput = antlrTool.openOutputFile(grammar.getClassName() + TokenTypesFileExt); //SAS: changed for proper text file io tabs = 0; // Generate the header common to all output files. genHeader(); // Output the user-defined parser premamble println(""); println("*** Parser Preamble Action."); println("This action will appear before the declaration of your parser class:"); tabs++; println(grammar.preambleAction.getText()); tabs--; println("*** End of Parser Preamble Action"); // Generate parser class definition println(""); println("*** Your parser class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'."); // Generate user-defined parser class members println(""); println("*** User-defined parser class members:"); println("These are the member declarations that you defined for your class:"); tabs++; printAction(grammar.classMemberAction.getText()); tabs--; println("*** End of user-defined parser class members"); // Generate code for each rule in the grammar println(""); println("*** Parser rules:"); tabs++; // Enumerate the parser rules Enumeration rules = grammar.rules.elements(); while (rules.hasMoreElements()) { println(""); // Get the rules from the list and downcast it to proper type GrammarSymbol sym = (GrammarSymbol)rules.nextElement(); // Only process parser rules if (sym instanceof RuleSymbol) { genRule((RuleSymbol)sym); } } tabs--; println(""); println("*** End of parser rules"); println(""); println("*** End of parser"); // Close the parser output stream currentOutput.close(); currentOutput = null; } /** Generate code for the given grammar element. * @param blk The rule-reference to generate */ public void gen(RuleRefElement rr) { RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); // Generate the actual rule description print("Rule Reference: " + rr.targetRule); if (rr.idAssign != null) { _print(", assigned to '" + rr.idAssign + "'"); } if (rr.args != null) { _print(", arguments = " + rr.args); } _println(""); // Perform diagnostics if (rs == null || !rs.isDefined()) { println("Rule '" + rr.targetRule + "' is referenced, but that rule is not defined."); println("\tPerhaps the rule is misspelled, or you forgot to define it."); return; } if (!(rs instanceof RuleSymbol)) { // Should this ever happen?? println("Rule '" + rr.targetRule + "' is referenced, but that is not a grammar rule."); return; } if (rr.idAssign != null) { // Warn if the rule has no return type if (rs.block.returnAction == null) { println("Error: You assigned from Rule '" + rr.targetRule + "', but that rule has no return type."); } } else { // Warn about return value if any, but not inside syntactic predicate if (!(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null) { println("Warning: Rule '" + rr.targetRule + "' returns a value"); } } if (rr.args != null && rs.block.argAction == null) { println("Error: Rule '" + rr.targetRule + "' accepts no arguments."); } } /** Generate code for the given grammar element. * @param blk The string-literal reference to generate */ public void gen(StringLiteralElement atom) { print("Match string literal "); _print(atom.atomText); if (atom.label != null) { _print(", label=" + atom.label); } _println(""); } /** Generate code for the given grammar element. * @param blk The token-range reference to generate */ public void gen(TokenRangeElement r) { print("Match token range: " + r.beginText + ".." + r.endText); if (r.label != null) { _print(", label = " + r.label); } _println(""); } /** Generate code for the given grammar element. * @param blk The token-reference to generate */ public void gen(TokenRefElement atom) { print("Match token "); if (atom.not) { _print("NOT "); } _print(atom.atomText); if (atom.label != null) { _print(", label=" + atom.label); } _println(""); } public void gen(TreeElement t) { print("Tree reference: " + t); } /** Generate the tree-walker TXT file */ public void gen(TreeWalkerGrammar g) throws IOException { setGrammar(g); // Open the output stream for the parser and set the currentOutput antlrTool.reportProgress("Generating " + grammar.getClassName() + TokenTypesFileExt); currentOutput = antlrTool.openOutputFile(grammar.getClassName() + TokenTypesFileExt); //SAS: changed for proper text file io tabs = 0; // Generate the header common to all output files. genHeader(); // Output the user-defined parser premamble println(""); println("*** Tree-walker Preamble Action."); println("This action will appear before the declaration of your tree-walker class:"); tabs++; println(grammar.preambleAction.getText()); tabs--; println("*** End of tree-walker Preamble Action"); // Generate tree-walker class definition println(""); println("*** Your tree-walker class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'."); // Generate user-defined tree-walker class members println(""); println("*** User-defined tree-walker class members:"); println("These are the member declarations that you defined for your class:"); tabs++; printAction(grammar.classMemberAction.getText()); tabs--; println("*** End of user-defined tree-walker class members"); // Generate code for each rule in the grammar println(""); println("*** tree-walker rules:"); tabs++; // Enumerate the tree-walker rules Enumeration rules = grammar.rules.elements(); while (rules.hasMoreElements()) { println(""); // Get the rules from the list and downcast it to proper type GrammarSymbol sym = (GrammarSymbol)rules.nextElement(); // Only process tree-walker rules if (sym instanceof RuleSymbol) { genRule((RuleSymbol)sym); } } tabs--; println(""); println("*** End of tree-walker rules"); println(""); println("*** End of tree-walker"); // Close the tree-walker output stream currentOutput.close(); currentOutput = null; } /** Generate a wildcard element */ public void gen(WildcardElement wc) { print("Match wildcard"); if (wc.getLabel() != null) { _print(", label = " + wc.getLabel()); } _println(""); } /** Generate code for the given grammar element. * @param blk The (...)* block to generate */ public void gen(ZeroOrMoreBlock blk) { println("Start ZERO-OR-MORE (...)+ block:"); tabs++; genBlockPreamble(blk); boolean ok = grammar.theLLkAnalyzer.deterministic(blk); if (!ok) { println("Warning: This zero-or-more block is non-deterministic"); } genCommonBlock(blk); tabs--; println("End ZERO-OR-MORE block."); } protected void genAlt(Alternative alt) { for ( AlternativeElement elem = alt.head; !(elem instanceof BlockEndElement); elem = elem.next ) { elem.generate(); } if (alt.getTreeSpecifier() != null) { println("AST will be built as: " + alt.getTreeSpecifier().getText()); } } /** Generate the header for a block, which may be a RuleBlock or a * plain AlternativeBLock. This generates any variable declarations, * init-actions, and syntactic-predicate-testing variables. * @blk The block for which the preamble is to be generated. */ protected void genBlockPreamble(AlternativeBlock blk) { // dump out init action if (blk.initAction != null) { printAction("Init action: " + blk.initAction); } } /**Generate common code for a block of alternatives; return a postscript * that needs to be generated at the end of the block. Other routines * may append else-clauses and such for error checking before the postfix * is generated. */ public void genCommonBlock(AlternativeBlock blk) { boolean singleAlt = (blk.alternatives.size() == 1); println("Start of an alternative block."); tabs++; println("The lookahead set for this block is:"); tabs++; genLookaheadSetForBlock(blk); tabs--; if (singleAlt) { println("This block has a single alternative"); if (blk.getAlternativeAt(0).synPred != null) { // Generate a warning if there is one alt and it has a synPred println("Warning: you specified a syntactic predicate for this alternative,"); println("and it is the only alternative of a block and will be ignored."); } } else { println("This block has multiple alternatives:"); tabs++; } for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); AlternativeElement elem = alt.head; // Print lookahead set for alternate println(""); if (i != 0) { print("Otherwise, "); } else { print(""); } _println("Alternate(" + (i + 1) + ") will be taken IF:"); println("The lookahead set: "); tabs++; genLookaheadSetForAlt(alt); tabs--; if (alt.semPred != null || alt.synPred != null) { print("is matched, AND "); } else { println("is matched."); } // Dump semantic predicates if (alt.semPred != null) { _println("the semantic predicate:"); tabs++; println(alt.semPred); if (alt.synPred != null) { print("is true, AND "); } else { println("is true."); } } // Dump syntactic predicate if (alt.synPred != null) { _println("the syntactic predicate:"); tabs++; genSynPred(alt.synPred); tabs--; println("is matched."); } // Dump the alternative genAlt(alt); } println(""); println("OTHERWISE, a NoViableAlt exception will be thrown"); println(""); if (!singleAlt) { tabs--; println("End of alternatives"); } tabs--; println("End of alternative block."); } /** Generate a textual representation of the follow set * for a block. * @param blk The rule block of interest */ public void genFollowSetForRuleBlock(RuleBlock blk) { Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, blk.endNode); printSet(grammar.maxk, 1, follow); } /** Generate a header that is common to all TXT files */ protected void genHeader() { println("ANTLR-generated file resulting from grammar " + antlrTool.grammarFile); println("Diagnostic output"); println(""); println("Terence Parr, MageLang Institute"); println("with John Lilley, Empathy Software"); println("ANTLR Version " + antlrTool.version + "; 1989-2005"); println(""); println("*** Header Action."); println("This action will appear at the top of all generated files."); tabs++; printAction(behavior.getHeaderAction("")); tabs--; println("*** End of Header Action"); println(""); } /**Generate the lookahead set for an alternate. */ protected void genLookaheadSetForAlt(Alternative alt) { if (doingLexRules && alt.cache[1].containsEpsilon()) { println("MATCHES ALL"); return; } int depth = alt.lookaheadDepth; if (depth == GrammarAnalyzer.NONDETERMINISTIC) { // if the decision is nondeterministic, do the best we can: LL(k) // any predicates that are around will be generated later. depth = grammar.maxk; } for (int i = 1; i <= depth; i++) { Lookahead lookahead = alt.cache[i]; printSet(depth, i, lookahead); } } /** Generate a textual representation of the lookahead set * for a block. * @param blk The block of interest */ public void genLookaheadSetForBlock(AlternativeBlock blk) { // Find the maximal lookahead depth over all alternatives int depth = 0; for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) { depth = grammar.maxk; break; } else if (depth < alt.lookaheadDepth) { depth = alt.lookaheadDepth; } } for (int i = 1; i <= depth; i++) { Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk); printSet(depth, i, lookahead); } } /** Generate the nextToken rule. * nextToken is a synthetic lexer rule that is the implicit OR of all * user-defined lexer rules. */ public void genNextToken() { println(""); println("*** Lexer nextToken rule:"); println("The lexer nextToken rule is synthesized from all of the user-defined"); println("lexer rules. It logically consists of one big alternative block with"); println("each user-defined rule being an alternative."); println(""); // Create the synthesized rule block for nextToken consisting // of an alternate block containing all the user-defined lexer rules. RuleBlock blk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken"); // Define the nextToken rule symbol RuleSymbol nextTokenRs = new RuleSymbol("mnextToken"); nextTokenRs.setDefined(); nextTokenRs.setBlock(blk); nextTokenRs.access = "private"; grammar.define(nextTokenRs); // Analyze the synthesized block if (!grammar.theLLkAnalyzer.deterministic(blk)) { println("The grammar analyzer has determined that the synthesized"); println("nextToken rule is non-deterministic (i.e., it has ambiguities)"); println("This means that there is some overlap of the character"); println("lookahead for two or more of your lexer rules."); } genCommonBlock(blk); println("*** End of nextToken lexer rule."); } /** Generate code for a named rule block * @param s The RuleSymbol describing the rule to generate */ public void genRule(RuleSymbol s) { println(""); String ruleType = (doingLexRules ? "Lexer" : "Parser"); println("*** " + ruleType + " Rule: " + s.getId()); if (!s.isDefined()) { println("This rule is undefined."); println("This means that the rule was referenced somewhere in the grammar,"); println("but a definition for the rule was not encountered."); println("It is also possible that syntax errors during the parse of"); println("your grammar file prevented correct processing of the rule."); println("*** End " + ruleType + " Rule: " + s.getId()); return; } tabs++; if (s.access.length() != 0) { println("Access: " + s.access); } // Get rule return type and arguments RuleBlock rblk = s.getBlock(); // Gen method return value(s) if (rblk.returnAction != null) { println("Return value(s): " + rblk.returnAction); if (doingLexRules) { println("Error: you specified return value(s) for a lexical rule."); println("\tLexical rules have an implicit return type of 'int'."); } } else { if (doingLexRules) { println("Return value: lexical rule returns an implicit token type"); } else { println("Return value: none"); } } // Gen arguments if (rblk.argAction != null) { println("Arguments: " + rblk.argAction); } // Dump any init-action genBlockPreamble(rblk); // Analyze the rule boolean ok = grammar.theLLkAnalyzer.deterministic(rblk); if (!ok) { println("Error: This rule is non-deterministic"); } // Dump the alternates of the rule genCommonBlock(rblk); // Search for an unlabeled exception specification attached to the rule ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec(""); // Generate user-defined or default catch phrases if (unlabeledUserSpec != null) { println("You specified error-handler(s) for this rule:"); tabs++; for (int i = 0; i < unlabeledUserSpec.handlers.size(); i++) { if (i != 0) { println(""); } ExceptionHandler handler = (ExceptionHandler)unlabeledUserSpec.handlers.elementAt(i); println("Error-handler(" + (i + 1) + ") catches [" + handler.exceptionTypeAndName.getText() + "] and executes:"); printAction(handler.action.getText()); } tabs--; println("End error-handlers."); } else if (!doingLexRules) { println("Default error-handling will be generated, which catches all"); println("parser exceptions and consumes tokens until the follow-set is seen."); } // Dump the follow set // Doesn't seem to work for lexical rules... if (!doingLexRules) { println("The follow set for this rule is:"); tabs++; genFollowSetForRuleBlock(rblk); tabs--; } tabs--; println("*** End " + ruleType + " Rule: " + s.getId()); } /** Generate the syntactic predicate. This basically generates * the alternative block, buts tracks if we are inside a synPred * @param blk The syntactic predicate block */ protected void genSynPred(SynPredBlock blk) { syntacticPredLevel++; gen((AlternativeBlock)blk); syntacticPredLevel--; } /** Generate the token types TXT file */ protected void genTokenTypes(TokenManager tm) throws IOException { // Open the token output TXT file and set the currentOutput stream antlrTool.reportProgress("Generating " + tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt); currentOutput = antlrTool.openOutputFile(tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt); //SAS: changed for proper text file io tabs = 0; // Generate the header common to all diagnostic files genHeader(); // Generate a string for each token. This creates a static // array of Strings indexed by token type. println(""); println("*** Tokens used by the parser"); println("This is a list of the token numeric values and the corresponding"); println("token identifiers. Some tokens are literals, and because of that"); println("they have no identifiers. Literals are double-quoted."); tabs++; // Enumerate all the valid token types Vector v = tm.getVocabulary(); for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s != null) { println(s + " = " + i); } } // Close the interface tabs--; println("*** End of tokens used by the parser"); // Close the tokens output file currentOutput.close(); currentOutput = null; } /** Get a string for an expression to generate creation of an AST subtree. * @param v A Vector of String, where each element is an expression in the target language yielding an AST node. */ public String getASTCreateString(Vector v) { return "***Create an AST from a vector here***" + System.getProperty("line.separator"); } /** Get a string for an expression to generate creating of an AST node * @param str The arguments to the AST constructor */ public String getASTCreateString(GrammarAtom atom, String str) { return "[" + str + "]"; } /// unused. protected String processActionForSpecialSymbols(String actionStr, int line, RuleBlock currentRule, ActionTransInfo tInfo) { return actionStr; } /** Map an identifier to it's corresponding tree-node variable. * This is context-sensitive, depending on the rule and alternative * being generated * @param id The identifier name to map * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned. */ public String mapTreeId(String id, ActionTransInfo tInfo) { return id; } /** Format a lookahead or follow set. * @param depth The depth of the entire lookahead/follow * @param k The lookahead level to print * @param lookahead The lookahead/follow set to print */ public void printSet(int depth, int k, Lookahead lookahead) { int numCols = 5; int[] elems = lookahead.fset.toArray(); if (depth != 1) { print("k==" + k + ": {"); } else { print("{ "); } if (elems.length > numCols) { _println(""); tabs++; print(""); } int column = 0; for (int i = 0; i < elems.length; i++) { column++; if (column > numCols) { _println(""); print(""); column = 0; } if (doingLexRules) { _print(charFormatter.literalChar(elems[i])); } else { _print((String)grammar.tokenManager.getVocabulary().elementAt(elems[i])); } if (i != elems.length - 1) { _print(", "); } } if (elems.length > numCols) { _println(""); tabs--; print(""); } _println(" }"); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/DocBookCodeGenerator.java000066400000000000000000000650471161462365500257570ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id:$ */ /** TODO: strip comments from javadoc entries */ import java.util.Enumeration; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; import java.io.PrintWriter; //SAS: changed for proper text file io import java.io.IOException; import java.io.FileWriter; /**Generate P.sgml, a cross-linked representation of P with or without actions */ public class DocBookCodeGenerator extends CodeGenerator { /** non-zero if inside syntactic predicate generation */ protected int syntacticPredLevel = 0; /** true during lexer generation, false during parser generation */ protected boolean doingLexRules = false; protected boolean firstElementInAlt; protected AlternativeElement prevAltElem = null; // what was generated last? /** Create a Diagnostic code-generator using the given Grammar * The caller must still call setTool, setBehavior, and setAnalyzer * before generating code. */ public DocBookCodeGenerator() { super(); charFormatter = new JavaCharFormatter(); } /** Encode a string for printing in a HTML document.. * e.g. encode '<' '>' and similar stuff * @param s the string to encode */ static String HTMLEncode(String s) { StringBuffer buf = new StringBuffer(); for (int i = 0, len = s.length(); i < len; i++) { char c = s.charAt(i); if (c == '&') buf.append("&"); else if (c == '\"') buf.append("""); else if (c == '\'') buf.append("'"); else if (c == '<') buf.append("<"); else if (c == '>') buf.append(">"); else buf.append(c); } return buf.toString(); } /** Encode a string for printing in a HTML document.. * e.g. encode '<' '>' and similar stuff * @param s the string to encode */ static String QuoteForId(String s) { StringBuffer buf = new StringBuffer(); for (int i = 0, len = s.length(); i < len; i++) { char c = s.charAt(i); if (c == '_') buf.append("."); else buf.append(c); } return buf.toString(); } public void gen() { // Do the code generation try { // Loop over all grammars Enumeration grammarIter = behavior.grammars.elements(); while (grammarIter.hasMoreElements()) { Grammar g = (Grammar)grammarIter.nextElement(); // Connect all the components to each other /* g.setGrammarAnalyzer(analyzer); analyzer.setGrammar(g); */ g.setCodeGenerator(this); // To get right overloading behavior across hetrogeneous grammars g.generate(); if (antlrTool.hasError()) { antlrTool.fatalError("Exiting due to errors."); } } } catch (IOException e) { antlrTool.reportException(e, null); } } /** Generate code for the given grammar element. * @param blk The {...} action to generate */ public void gen(ActionElement action) { // no-op } /** Generate code for the given grammar element. * @param blk The "x|y|z|..." block to generate */ public void gen(AlternativeBlock blk) { genGenericBlock(blk, ""); } /** Generate code for the given grammar element. * @param blk The block-end element to generate. Block-end * elements are synthesized by the grammar parser to represent * the end of a block. */ public void gen(BlockEndElement end) { // no-op } /** Generate code for the given grammar element. * @param blk The character literal reference to generate */ public void gen(CharLiteralElement atom) { if (atom.not) { _print("~"); } _print(HTMLEncode(atom.atomText) + " "); } /** Generate code for the given grammar element. * @param blk The character-range reference to generate */ public void gen(CharRangeElement r) { print(r.beginText + ".." + r.endText + " "); } /** Generate the lexer HTML file */ public void gen(LexerGrammar g) throws IOException { setGrammar(g); antlrTool.reportProgress("Generating " + grammar.getClassName() + ".sgml"); currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".sgml"); tabs = 0; doingLexRules = true; // Generate header common to all TXT output files genHeader(); // Output the user-defined lexer premamble // RK: guess not.. // println(grammar.preambleAction.getText()); // Generate lexer class definition println(""); // print javadoc comment if any if (grammar.comment != null) { _println(HTMLEncode(grammar.comment)); } println("Definition of lexer " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + "."); // Generate user-defined parser class members // printAction(grammar.classMemberAction.getText()); /* // Generate string literals println(""); println("*** String literals used in the parser"); println("The following string literals were used in the parser."); println("An actual code generator would arrange to place these literals"); println("into a table in the generated lexer, so that actions in the"); println("generated lexer could match token text against the literals."); println("String literals used in the lexer are not listed here, as they"); println("are incorporated into the mainstream lexer processing."); tabs++; // Enumerate all of the symbols and look for string literal symbols Enumeration ids = grammar.getSymbols(); while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); // Only processing string literals -- reject other symbol entries if ( sym instanceof StringLiteralSymbol ) { StringLiteralSymbol s = (StringLiteralSymbol)sym; println(s.getId() + " = " + s.getTokenType()); } } tabs--; println("*** End of string literals used by the parser"); */ // Generate nextToken() rule. // nextToken() is a synthetic lexer rule that is the implicit OR of all // user-defined lexer rules. genNextToken(); // Generate code for each rule in the lexer Enumeration ids = grammar.rules.elements(); while (ids.hasMoreElements()) { RuleSymbol rs = (RuleSymbol)ids.nextElement(); if (!rs.id.equals("mnextToken")) { genRule(rs); } } // Close the lexer output file currentOutput.close(); currentOutput = null; doingLexRules = false; } /** Generate code for the given grammar element. * @param blk The (...)+ block to generate */ public void gen(OneOrMoreBlock blk) { genGenericBlock(blk, "+"); } /** Generate the parser HTML file */ public void gen(ParserGrammar g) throws IOException { setGrammar(g); // Open the output stream for the parser and set the currentOutput antlrTool.reportProgress("Generating " + grammar.getClassName() + ".sgml"); currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".sgml"); tabs = 0; // Generate the header common to all output files. genHeader(); // Generate parser class definition println(""); // print javadoc comment if any if (grammar.comment != null) { _println(HTMLEncode(grammar.comment)); } println("Definition of parser " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + "."); // Enumerate the parser rules Enumeration rules = grammar.rules.elements(); while (rules.hasMoreElements()) { println(""); // Get the rules from the list and downcast it to proper type GrammarSymbol sym = (GrammarSymbol)rules.nextElement(); // Only process parser rules if (sym instanceof RuleSymbol) { genRule((RuleSymbol)sym); } } tabs--; println(""); genTail(); // Close the parser output stream currentOutput.close(); currentOutput = null; } /** Generate code for the given grammar element. * @param blk The rule-reference to generate */ public void gen(RuleRefElement rr) { RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); // Generate the actual rule description _print(""); _print(rr.targetRule); _print(""); // RK: Leave out args.. // if (rr.args != null) { // _print("["+rr.args+"]"); // } _print(" "); } /** Generate code for the given grammar element. * @param blk The string-literal reference to generate */ public void gen(StringLiteralElement atom) { if (atom.not) { _print("~"); } _print(HTMLEncode(atom.atomText)); _print(" "); } /** Generate code for the given grammar element. * @param blk The token-range reference to generate */ public void gen(TokenRangeElement r) { print(r.beginText + ".." + r.endText + " "); } /** Generate code for the given grammar element. * @param blk The token-reference to generate */ public void gen(TokenRefElement atom) { if (atom.not) { _print("~"); } _print(atom.atomText); _print(" "); } public void gen(TreeElement t) { print(t + " "); } /** Generate the tree-walker TXT file */ public void gen(TreeWalkerGrammar g) throws IOException { setGrammar(g); // Open the output stream for the parser and set the currentOutput antlrTool.reportProgress("Generating " + grammar.getClassName() + ".sgml"); currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".sgml"); //SAS: changed for proper text file io tabs = 0; // Generate the header common to all output files. genHeader(); // Output the user-defined parser premamble println(""); // println("*** Tree-walker Preamble Action."); // println("This action will appear before the declaration of your tree-walker class:"); // tabs++; // println(grammar.preambleAction.getText()); // tabs--; // println("*** End of tree-walker Preamble Action"); // Generate tree-walker class definition println(""); // print javadoc comment if any if (grammar.comment != null) { _println(HTMLEncode(grammar.comment)); } println("Definition of tree parser " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + "."); // Generate user-defined tree-walker class members // println(""); // println("*** User-defined tree-walker class members:"); // println("These are the member declarations that you defined for your class:"); // tabs++; // printAction(grammar.classMemberAction.getText()); // tabs--; // println("*** End of user-defined tree-walker class members"); // Generate code for each rule in the grammar println(""); // println("*** tree-walker rules:"); tabs++; // Enumerate the tree-walker rules Enumeration rules = grammar.rules.elements(); while (rules.hasMoreElements()) { println(""); // Get the rules from the list and downcast it to proper type GrammarSymbol sym = (GrammarSymbol)rules.nextElement(); // Only process tree-walker rules if (sym instanceof RuleSymbol) { genRule((RuleSymbol)sym); } } tabs--; println(""); // println("*** End of tree-walker rules"); // println(""); // println("*** End of tree-walker"); // Close the tree-walker output stream currentOutput.close(); currentOutput = null; } /** Generate a wildcard element */ public void gen(WildcardElement wc) { /* if ( wc.getLabel()!=null ) { _print(wc.getLabel()+"="); } */ _print(". "); } /** Generate code for the given grammar element. * @param blk The (...)* block to generate */ public void gen(ZeroOrMoreBlock blk) { genGenericBlock(blk, "*"); } protected void genAlt(Alternative alt) { if (alt.getTreeSpecifier() != null) { _print(alt.getTreeSpecifier().getText()); } prevAltElem = null; for (AlternativeElement elem = alt.head; !(elem instanceof BlockEndElement); elem = elem.next) { elem.generate(); firstElementInAlt = false; prevAltElem = elem; } } /** Generate the header for a block, which may be a RuleBlock or a * plain AlternativeBLock. This generates any variable declarations, * init-actions, and syntactic-predicate-testing variables. * @blk The block for which the preamble is to be generated. */ // protected void genBlockPreamble(AlternativeBlock blk) { // RK: don't dump out init actions // dump out init action // if ( blk.initAction!=null ) { // printAction("{" + blk.initAction + "}"); // } // } /** Generate common code for a block of alternatives; return a postscript * that needs to be generated at the end of the block. Other routines * may append else-clauses and such for error checking before the postfix * is generated. */ public void genCommonBlock(AlternativeBlock blk) { if (blk.alternatives.size() > 1) println(""); for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); AlternativeElement elem = alt.head; if (blk.alternatives.size() > 1) print(""); // dump alt operator | if (i > 0 && blk.alternatives.size() > 1) { _print("| "); } // Dump the alternative, starting with predicates // boolean save = firstElementInAlt; firstElementInAlt = true; tabs++; // in case we do a newline in alt, increase the tab indent genAlt(alt); tabs--; firstElementInAlt = save; if (blk.alternatives.size() > 1) _println(""); } if (blk.alternatives.size() > 1) println(""); } /** Generate a textual representation of the follow set * for a block. * @param blk The rule block of interest */ public void genFollowSetForRuleBlock(RuleBlock blk) { Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, blk.endNode); printSet(grammar.maxk, 1, follow); } protected void genGenericBlock(AlternativeBlock blk, String blkOp) { if (blk.alternatives.size() > 1) { // make sure we start on a new line _println(""); if (!firstElementInAlt) { // only do newline if the last element wasn't a multi-line block //if ( prevAltElem==null || // !(prevAltElem instanceof AlternativeBlock) || // ((AlternativeBlock)prevAltElem).alternatives.size()==1 ) //{ _println("("); //} //else //{ // _print("("); //} // _println(""); // print("(\t"); } else { _print("("); } } else { _print("( "); } // RK: don't dump init actions // genBlockPreamble(blk); genCommonBlock(blk); if (blk.alternatives.size() > 1) { _println(""); print(")" + blkOp + " "); // if not last element of alt, need newline & to indent if (!(blk.next instanceof BlockEndElement)) { _println(""); print(""); } } else { _print(")" + blkOp + " "); } } /** Generate a header that is common to all TXT files */ protected void genHeader() { println(""); println(""); println(""); println(""); println("Grammar " + grammar.getClassName() + ""); println(" "); println(" "); println(" "); println(" "); println(" "); println("

"); println(" "); println("
"); println(" "); println(" "); println(" "); println(" "); println(" Generated by ANTLR" + antlrTool.version); println(" from " + antlrTool.grammarFile); println(" "); println(" "); println(" "); println(" "); println(" "); println(" "); println(" "); println(""); println(""); println(""); } /**Generate the lookahead set for an alternate. */ protected void genLookaheadSetForAlt(Alternative alt) { if (doingLexRules && alt.cache[1].containsEpsilon()) { println("MATCHES ALL"); return; } int depth = alt.lookaheadDepth; if (depth == GrammarAnalyzer.NONDETERMINISTIC) { // if the decision is nondeterministic, do the best we can: LL(k) // any predicates that are around will be generated later. depth = grammar.maxk; } for (int i = 1; i <= depth; i++) { Lookahead lookahead = alt.cache[i]; printSet(depth, i, lookahead); } } /** Generate a textual representation of the lookahead set * for a block. * @param blk The block of interest */ public void genLookaheadSetForBlock(AlternativeBlock blk) { // Find the maximal lookahead depth over all alternatives int depth = 0; for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) { depth = grammar.maxk; break; } else if (depth < alt.lookaheadDepth) { depth = alt.lookaheadDepth; } } for (int i = 1; i <= depth; i++) { Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk); printSet(depth, i, lookahead); } } /** Generate the nextToken rule. * nextToken is a synthetic lexer rule that is the implicit OR of all * user-defined lexer rules. */ public void genNextToken() { println(""); println("/** Lexer nextToken rule:"); println(" * The lexer nextToken rule is synthesized from all of the user-defined"); println(" * lexer rules. It logically consists of one big alternative block with"); println(" * each user-defined rule being an alternative."); println(" */"); // Create the synthesized rule block for nextToken consisting // of an alternate block containing all the user-defined lexer rules. RuleBlock blk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken"); // Define the nextToken rule symbol RuleSymbol nextTokenRs = new RuleSymbol("mnextToken"); nextTokenRs.setDefined(); nextTokenRs.setBlock(blk); nextTokenRs.access = "private"; grammar.define(nextTokenRs); /* // Analyze the synthesized block if (!grammar.theLLkAnalyzer.deterministic(blk)) { println("The grammar analyzer has determined that the synthesized"); println("nextToken rule is non-deterministic (i.e., it has ambiguities)"); println("This means that there is some overlap of the character"); println("lookahead for two or more of your lexer rules."); } */ genCommonBlock(blk); } /** Generate code for a named rule block * @param s The RuleSymbol describing the rule to generate */ public void genRule(RuleSymbol s) { if (s == null || !s.isDefined()) return; // undefined rule println(""); if (s.access.length() != 0) { if (!s.access.equals("public")) { _print("" + s.access + " "); } } println("
"); println("" + s.getId() + ""); if (s.comment != null) { _println("" + HTMLEncode(s.comment) + ""); } println(""); // Get rule return type and arguments RuleBlock rblk = s.getBlock(); // RK: for HTML output not of much value... // Gen method return value(s) // if (rblk.returnAction != null) { // _print("["+rblk.returnAction+"]"); // } // Gen arguments // if (rblk.argAction != null) // { // _print(" returns [" + rblk.argAction+"]"); // } _println(""); print(s.getId() + ":\t"); tabs++; // Dump any init-action // genBlockPreamble(rblk); // Dump the alternates of the rule genCommonBlock(rblk); _println(""); // println(";"); tabs--; _println(""); _println("
"); } /** Generate the syntactic predicate. This basically generates * the alternative block, buts tracks if we are inside a synPred * @param blk The syntactic predicate block */ protected void genSynPred(SynPredBlock blk) { // no op } public void genTail() { println("
"); println(""); } /** Generate the token types TXT file */ protected void genTokenTypes(TokenManager tm) throws IOException { // Open the token output TXT file and set the currentOutput stream antlrTool.reportProgress("Generating " + tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt); currentOutput = antlrTool.openOutputFile(tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt); //SAS: changed for proper text file io tabs = 0; // Generate the header common to all diagnostic files genHeader(); // Generate a string for each token. This creates a static // array of Strings indexed by token type. println(""); println("*** Tokens used by the parser"); println("This is a list of the token numeric values and the corresponding"); println("token identifiers. Some tokens are literals, and because of that"); println("they have no identifiers. Literals are double-quoted."); tabs++; // Enumerate all the valid token types Vector v = tm.getVocabulary(); for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s != null) { println(s + " = " + i); } } // Close the interface tabs--; println("*** End of tokens used by the parser"); // Close the tokens output file currentOutput.close(); currentOutput = null; } /// unused. protected String processActionForSpecialSymbols(String actionStr, int line, RuleBlock currentRule, ActionTransInfo tInfo) { return actionStr; } /** Get a string for an expression to generate creation of an AST subtree. * @param v A Vector of String, where each element is an expression in the target language yielding an AST node. */ public String getASTCreateString(Vector v) { return null; } /** Get a string for an expression to generate creating of an AST node * @param str The arguments to the AST constructor */ public String getASTCreateString(GrammarAtom atom, String str) { return null; } /** Map an identifier to it's corresponding tree-node variable. * This is context-sensitive, depending on the rule and alternative * being generated * @param id The identifier name to map * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned. */ public String mapTreeId(String id, ActionTransInfo tInfo) { return id; } /** Format a lookahead or follow set. * @param depth The depth of the entire lookahead/follow * @param k The lookahead level to print * @param lookahead The lookahead/follow set to print */ public void printSet(int depth, int k, Lookahead lookahead) { int numCols = 5; int[] elems = lookahead.fset.toArray(); if (depth != 1) { print("k==" + k + ": {"); } else { print("{ "); } if (elems.length > numCols) { _println(""); tabs++; print(""); } int column = 0; for (int i = 0; i < elems.length; i++) { column++; if (column > numCols) { _println(""); print(""); column = 0; } if (doingLexRules) { _print(charFormatter.literalChar(elems[i])); } else { _print((String)grammar.tokenManager.getVocabulary().elementAt(elems[i])); } if (i != elems.length - 1) { _print(", "); } } if (elems.length > numCols) { _println(""); tabs--; print(""); } _println(" }"); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/DumpASTVisitor.java000066400000000000000000000033061161462365500246200ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/DumpASTVisitor.java#1 $ */ import java.io.*; import antlr.collections.AST; /** Simple class to dump the contents of an AST to the output */ public class DumpASTVisitor implements ASTVisitor { protected int level = 0; private void tabs() { for (int i = 0; i < level; i++) { System.out.print(" "); } } public void visit(AST node) { // Flatten this level of the tree if it has no children boolean flatten = /*true*/ false; AST node2; for (node2 = node; node2 != null; node2 = node2.getNextSibling()) { if (node2.getFirstChild() != null) { flatten = false; break; } } for (node2 = node; node2 != null; node2 = node2.getNextSibling()) { if (!flatten || node2 == node) { tabs(); } if (node2.getText() == null) { System.out.print("nil"); } else { System.out.print(node2.getText()); } System.out.print(" [" + node2.getType() + "] "); if (flatten) { System.out.print(" "); } else { System.out.println(""); } if (node2.getFirstChild() != null) { level++; visit(node2.getFirstChild()); level--; } } if (flatten) { System.out.println(""); } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ExceptionHandler.java000066400000000000000000000012331161462365500252140ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ExceptionHandler.java#1 $ */ class ExceptionHandler { // Type of the ANTLR exception class to catch and the variable decl protected Token exceptionTypeAndName; // The action to be executed when the exception is caught protected Token action; public ExceptionHandler(Token exceptionTypeAndName_, Token action_) { exceptionTypeAndName = exceptionTypeAndName_; action = action_; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ExceptionSpec.java000066400000000000000000000013421161462365500245320ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ExceptionSpec.java#1 $ */ import antlr.collections.impl.Vector; class ExceptionSpec { // Non-null if this refers to a labeled rule // Use a token instead of a string to get the line information protected Token label; // List of ExceptionHandler (catch phrases) protected Vector handlers; public ExceptionSpec(Token label_) { label = label_; handlers = new Vector(); } public void addHandler(ExceptionHandler handler) { handlers.appendElement(handler); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/FileCopyException.java000066400000000000000000000005641161462365500253570ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/FileCopyException.java#1 $ */ class FileCopyException extends java.io.IOException { public FileCopyException(String msg) { super(msg); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/FileLineFormatter.java000066400000000000000000000014311161462365500253330ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/FileLineFormatter.java#1 $ */ public abstract class FileLineFormatter { private static FileLineFormatter formatter = new DefaultFileLineFormatter(); public static FileLineFormatter getFormatter() { return formatter; } public static void setFormatter(FileLineFormatter f) { formatter = f; } /** @param fileName the file that should appear in the prefix. (or null) * @param line the line (or -1) * @param column the column (or -1) */ public abstract String getFormatString(String fileName, int line, int column); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/Grammar.java000066400000000000000000000226171161462365500233570ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/Grammar.java#1 $ */ import java.util.Hashtable; import java.util.Enumeration; import java.io.IOException; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; /**A Grammar holds a set of rules (which are stored * in a symbol table). Most of the time a grammar * needs a code generator and an LLkAnalyzer too. */ public abstract class Grammar { protected Tool antlrTool; protected CodeGenerator generator; protected LLkGrammarAnalyzer theLLkAnalyzer; protected Hashtable symbols; protected boolean buildAST = false; protected boolean analyzerDebug = false; protected boolean interactive = false; protected String superClass = null; /** The token manager associated with the grammar, if any. // The token manager is responsible for maintaining the set of valid tokens, and // is conceptually shared between the lexer and parser. This may be either a // LexerGrammar or a ImportVocabTokenManager. */ protected TokenManager tokenManager; /** The name of the export vocabulary...used to generate the output * token types interchange file. */ protected String exportVocab = null; /** The name of the import vocabulary. "Initial conditions" */ protected String importVocab = null; // Mapping from String keys to Token option values protected Hashtable options; // Vector of RuleSymbol entries protected Vector rules; protected Token preambleAction = new CommonToken(Token.INVALID_TYPE, ""); protected String className = null; protected String fileName = null; protected Token classMemberAction = new CommonToken(Token.INVALID_TYPE, ""); protected boolean hasSyntacticPredicate = false; protected boolean hasUserErrorHandling = false; // max lookahead that can be attempted for this parser. protected int maxk = 1; // options protected boolean traceRules = false; protected boolean debuggingOutput = false; protected boolean defaultErrorHandler = true; protected String comment = null; // javadoc comment public Grammar(String className_, Tool tool_, String superClass) { className = className_; antlrTool = tool_; symbols = new Hashtable(); options = new Hashtable(); rules = new Vector(100); this.superClass = superClass; } /** Define a rule */ public void define(RuleSymbol rs) { rules.appendElement(rs); // add the symbol to the rules hash table symbols.put(rs.getId(), rs); } /** Top-level call to generate the code for this grammar */ public abstract void generate() throws IOException; protected String getClassName() { return className; } /* Does this grammar have a default error handler? */ public boolean getDefaultErrorHandler() { return defaultErrorHandler; } public String getFilename() { return fileName; } /** Get an integer option. Given the name of the option find its * associated integer value. If the associated value is not an integer or * is not in the table, then throw an exception of type NumberFormatException. * @param key The name of the option * @return The value associated with the key. */ public int getIntegerOption(String key) throws NumberFormatException { Token t = (Token)options.get(key); if (t == null || t.getType() != ANTLRTokenTypes.INT) { throw new NumberFormatException(); } else { return Integer.parseInt(t.getText()); } } /** Get an option. Given the name of the option find its associated value. * @param key The name of the option * @return The value associated with the key, or null if the key has not been set. */ public Token getOption(String key) { return (Token)options.get(key); } // Get name of class from which generated parser/lexer inherits protected abstract String getSuperClass(); public GrammarSymbol getSymbol(String s) { return (GrammarSymbol)symbols.get(s); } public Enumeration getSymbols() { return symbols.elements(); } /** Check the existence of an option in the table * @param key The name of the option * @return true if the option is in the table */ public boolean hasOption(String key) { return options.containsKey(key); } /** Is a rule symbol defined? (not used for tokens) */ public boolean isDefined(String s) { return symbols.containsKey(s); } /**Process command line arguments. Implemented in subclasses */ public abstract void processArguments(String[] args); public void setCodeGenerator(CodeGenerator gen) { generator = gen; } public void setFilename(String s) { fileName = s; } public void setGrammarAnalyzer(LLkGrammarAnalyzer a) { theLLkAnalyzer = a; } /** Set a generic option. * This associates a generic option key with a Token value. * No validation is performed by this method, although users of the value * (code generation and/or analysis) may require certain formats. * The value is stored as a token so that the location of an error * can be reported. * @param key The name of the option. * @param value The value to associate with the key. * @return true if the option was a valid generic grammar option, false o/w */ public boolean setOption(String key, Token value) { options.put(key, value); String s = value.getText(); int i; if (key.equals("k")) { try { maxk = getIntegerOption("k"); if ( maxk<=0 ) { antlrTool.error("option 'k' must be greater than 0 (was " + value.getText() + ")", getFilename(), value.getLine(), value.getColumn()); maxk = 1; } } catch (NumberFormatException e) { antlrTool.error("option 'k' must be an integer (was " + value.getText() + ")", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("codeGenMakeSwitchThreshold")) { try { i = getIntegerOption("codeGenMakeSwitchThreshold"); } catch (NumberFormatException e) { antlrTool.error("option 'codeGenMakeSwitchThreshold' must be an integer", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("codeGenBitsetTestThreshold")) { try { i = getIntegerOption("codeGenBitsetTestThreshold"); } catch (NumberFormatException e) { antlrTool.error("option 'codeGenBitsetTestThreshold' must be an integer", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("defaultErrorHandler")) { if (s.equals("true")) { defaultErrorHandler = true; } else if (s.equals("false")) { defaultErrorHandler = false; } else { antlrTool.error("Value for defaultErrorHandler must be true or false", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("analyzerDebug")) { if (s.equals("true")) { analyzerDebug = true; } else if (s.equals("false")) { analyzerDebug = false; } else { antlrTool.error("option 'analyzerDebug' must be true or false", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("codeGenDebug")) { if (s.equals("true")) { analyzerDebug = true; } else if (s.equals("false")) { analyzerDebug = false; } else { antlrTool.error("option 'codeGenDebug' must be true or false", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("classHeaderSuffix")) { return true; } if (key.equals("classHeaderPrefix")) { return true; } if (key.equals("namespaceAntlr")) { return true; } if (key.equals("namespaceStd")) { return true; } if (key.equals("genHashLines")) { return true; } if (key.equals("noConstructors")) { return true; } return false; } public void setTokenManager(TokenManager tokenManager_) { tokenManager = tokenManager_; } /** Print out the grammar without actions */ public String toString() { StringBuffer buf = new StringBuffer(20000); Enumeration ids = rules.elements(); while (ids.hasMoreElements()) { RuleSymbol rs = (RuleSymbol)ids.nextElement(); if (!rs.id.equals("mnextToken")) { buf.append(rs.getBlock().toString()); buf.append("\n\n"); } } return buf.toString(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/GrammarAnalyzer.java000066400000000000000000000030021161462365500250500ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/GrammarAnalyzer.java#1 $ */ /**A GrammarAnalyzer computes lookahead from Grammar (which contains * a grammar symbol table) and can then answer questions about the * grammar. * * To access the RuleBlock for a rule name, the grammar symbol table * is consulted. * * There should be no distinction between static & dynamic analysis. * In other words, some of the easy analysis can be done statically * and then the part that is hard statically can be deferred to * parse-time. Interestingly, computing LL(k) for k>1 lookahead * statically is O(|T|^k) where T is the grammar vocabulary, but, * is O(k) at run-time (ignoring the large constant associated with * the size of the grammar). In English, the difference can be * described as "find the set of all possible k-sequences of input" * versus "does this specific k-sequence match?". */ public interface GrammarAnalyzer { /**The epsilon token type is an imaginary type used * during analysis. It indicates an incomplete look() computation. * Must be kept consistent with Token constants to be between * MIN_USER_TYPE and INVALID_TYPE. */ // public static final int EPSILON_TYPE = 2; public static final int NONDETERMINISTIC = Integer.MAX_VALUE; // lookahead depth public static final int LOOKAHEAD_DEPTH_INIT = -1; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/GrammarAtom.java000066400000000000000000000034051161462365500241720ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/GrammarAtom.java#1 $ */ /** A GrammarAtom is either a token ref, a character ref, or string. * The analysis doesn't care. */ abstract class GrammarAtom extends AlternativeElement { protected String label; protected String atomText; protected int tokenType = Token.INVALID_TYPE; protected boolean not = false; // ~T or ~'c' or ~"foo" /** Set to type of AST node to create during parse. Defaults to what is * set in the TokenSymbol. */ protected String ASTNodeType = null; public GrammarAtom(Grammar g, Token t, int autoGenType) { super(g, t, autoGenType); atomText = t.getText(); } public String getLabel() { return label; } public String getText() { return atomText; } public int getType() { return tokenType; } public void setLabel(String label_) { label = label_; } public String getASTNodeType() { return ASTNodeType; } public void setASTNodeType(String type) { ASTNodeType = type; } public void setOption(Token option, Token value) { if (option.getText().equals("AST")) { setASTNodeType(value.getText()); } else { grammar.antlrTool.error("Invalid element option:" + option.getText(), grammar.getFilename(), option.getLine(), option.getColumn()); } } public String toString() { String s = " "; if (label != null) s += label + ":"; if (not) s += "~"; return s + atomText; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/GrammarElement.java000066400000000000000000000031051161462365500246600ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/GrammarElement.java#1 $ */ /**A GrammarElement is a generic node in our * data structure that holds a grammar in memory. * This data structure can be used for static * analysis or for dynamic analysis (during parsing). * Every node must know which grammar owns it, how * to generate code, and how to do analysis. */ abstract class GrammarElement { public static final int AUTO_GEN_NONE = 1; public static final int AUTO_GEN_CARET = 2; public static final int AUTO_GEN_BANG = 3; /* * Note that Java does static argument type matching to * determine which function to execute on the receiver. * Here, that implies that we cannot simply say * grammar.generator.gen(this) in GrammarElement or * only CodeGenerator.gen(GrammarElement ge) would * ever be called. */ protected Grammar grammar; protected int line; protected int column; public GrammarElement(Grammar g) { grammar = g; line = -1; column = -1; } public GrammarElement(Grammar g, Token start) { grammar = g; line = start.getLine(); column = start.getColumn(); } public void generate() { } public int getLine() { return line; } public int getColumn() { return column; } public Lookahead look(int k) { return null; } public abstract String toString(); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/GrammarSymbol.java000066400000000000000000000011371161462365500245370ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/GrammarSymbol.java#1 $ */ /**A GrammarSymbol is a generic symbol that can be * added to the symbol table for a grammar. */ abstract class GrammarSymbol { protected String id; public GrammarSymbol() { } public GrammarSymbol(String s) { id = s; } public String getId() { return id; } public void setId(String s) { id = s; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/HTMLCodeGenerator.java000066400000000000000000000635701161462365500252020ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/HTMLCodeGenerator.java#1 $ */ import java.util.Enumeration; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; import java.io.PrintWriter; //SAS: changed for proper text file io import java.io.IOException; import java.io.FileWriter; /**Generate P.html, a cross-linked representation of P with or without actions */ public class HTMLCodeGenerator extends CodeGenerator { /** non-zero if inside syntactic predicate generation */ protected int syntacticPredLevel = 0; /** true during lexer generation, false during parser generation */ protected boolean doingLexRules = false; protected boolean firstElementInAlt; protected AlternativeElement prevAltElem = null; // what was generated last? /** Create a Diagnostic code-generator using the given Grammar * The caller must still call setTool, setBehavior, and setAnalyzer * before generating code. */ public HTMLCodeGenerator() { super(); charFormatter = new JavaCharFormatter(); } /** Encode a string for printing in a HTML document.. * e.g. encode '<' '>' and similar stuff * @param s the string to encode */ static String HTMLEncode(String s) { StringBuffer buf = new StringBuffer(); for (int i = 0, len = s.length(); i < len; i++) { char c = s.charAt(i); if (c == '&') buf.append("&"); else if (c == '\"') buf.append("""); else if (c == '\'') buf.append("'"); else if (c == '<') buf.append("<"); else if (c == '>') buf.append(">"); else buf.append(c); } return buf.toString(); } public void gen() { // Do the code generation try { // Loop over all grammars Enumeration grammarIter = behavior.grammars.elements(); while (grammarIter.hasMoreElements()) { Grammar g = (Grammar)grammarIter.nextElement(); // Connect all the components to each other /* g.setGrammarAnalyzer(analyzer); analyzer.setGrammar(g); */ g.setCodeGenerator(this); // To get right overloading behavior across hetrogeneous grammars g.generate(); if (antlrTool.hasError()) { antlrTool.fatalError("Exiting due to errors."); } } } catch (IOException e) { antlrTool.reportException(e, null); } } /** Generate code for the given grammar element. * @param blk The {...} action to generate */ public void gen(ActionElement action) { // no-op } /** Generate code for the given grammar element. * @param blk The "x|y|z|..." block to generate */ public void gen(AlternativeBlock blk) { genGenericBlock(blk, ""); } /** Generate code for the given grammar element. * @param blk The block-end element to generate. Block-end * elements are synthesized by the grammar parser to represent * the end of a block. */ public void gen(BlockEndElement end) { // no-op } /** Generate code for the given grammar element. * @param blk The character literal reference to generate */ public void gen(CharLiteralElement atom) { if (atom.not) { _print("~"); } _print(HTMLEncode(atom.atomText) + " "); } /** Generate code for the given grammar element. * @param blk The character-range reference to generate */ public void gen(CharRangeElement r) { print(r.beginText + ".." + r.endText + " "); } /** Generate the lexer HTML file */ public void gen(LexerGrammar g) throws IOException { setGrammar(g); antlrTool.reportProgress("Generating " + grammar.getClassName() + ".html"); currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".html"); //SAS: changed for proper text file io tabs = 0; doingLexRules = true; // Generate header common to all TXT output files genHeader(); // Output the user-defined lexer premamble // RK: guess not.. // println(grammar.preambleAction.getText()); // Generate lexer class definition println(""); // print javadoc comment if any if (grammar.comment != null) { _println(HTMLEncode(grammar.comment)); } println("Definition of lexer " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + "."); // Generate user-defined parser class members // printAction(grammar.classMemberAction.getText()); /* // Generate string literals println(""); println("*** String literals used in the parser"); println("The following string literals were used in the parser."); println("An actual code generator would arrange to place these literals"); println("into a table in the generated lexer, so that actions in the"); println("generated lexer could match token text against the literals."); println("String literals used in the lexer are not listed here, as they"); println("are incorporated into the mainstream lexer processing."); tabs++; // Enumerate all of the symbols and look for string literal symbols Enumeration ids = grammar.getSymbols(); while ( ids.hasMoreElements() ) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); // Only processing string literals -- reject other symbol entries if ( sym instanceof StringLiteralSymbol ) { StringLiteralSymbol s = (StringLiteralSymbol)sym; println(s.getId() + " = " + s.getTokenType()); } } tabs--; println("*** End of string literals used by the parser"); */ // Generate nextToken() rule. // nextToken() is a synthetic lexer rule that is the implicit OR of all // user-defined lexer rules. genNextToken(); // Generate code for each rule in the lexer Enumeration ids = grammar.rules.elements(); while (ids.hasMoreElements()) { RuleSymbol rs = (RuleSymbol)ids.nextElement(); if (!rs.id.equals("mnextToken")) { genRule(rs); } } // Close the lexer output file currentOutput.close(); currentOutput = null; doingLexRules = false; } /** Generate code for the given grammar element. * @param blk The (...)+ block to generate */ public void gen(OneOrMoreBlock blk) { genGenericBlock(blk, "+"); } /** Generate the parser HTML file */ public void gen(ParserGrammar g) throws IOException { setGrammar(g); // Open the output stream for the parser and set the currentOutput antlrTool.reportProgress("Generating " + grammar.getClassName() + ".html"); currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".html"); tabs = 0; // Generate the header common to all output files. genHeader(); // Generate parser class definition println(""); // print javadoc comment if any if (grammar.comment != null) { _println(HTMLEncode(grammar.comment)); } println("Definition of parser " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + "."); // Enumerate the parser rules Enumeration rules = grammar.rules.elements(); while (rules.hasMoreElements()) { println(""); // Get the rules from the list and downcast it to proper type GrammarSymbol sym = (GrammarSymbol)rules.nextElement(); // Only process parser rules if (sym instanceof RuleSymbol) { genRule((RuleSymbol)sym); } } tabs--; println(""); genTail(); // Close the parser output stream currentOutput.close(); currentOutput = null; } /** Generate code for the given grammar element. * @param blk The rule-reference to generate */ public void gen(RuleRefElement rr) { RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); // Generate the actual rule description _print(""); _print(rr.targetRule); _print(""); // RK: Leave out args.. // if (rr.args != null) { // _print("["+rr.args+"]"); // } _print(" "); } /** Generate code for the given grammar element. * @param blk The string-literal reference to generate */ public void gen(StringLiteralElement atom) { if (atom.not) { _print("~"); } _print(HTMLEncode(atom.atomText)); _print(" "); } /** Generate code for the given grammar element. * @param blk The token-range reference to generate */ public void gen(TokenRangeElement r) { print(r.beginText + ".." + r.endText + " "); } /** Generate code for the given grammar element. * @param blk The token-reference to generate */ public void gen(TokenRefElement atom) { if (atom.not) { _print("~"); } _print(atom.atomText); _print(" "); } public void gen(TreeElement t) { print(t + " "); } /** Generate the tree-walker TXT file */ public void gen(TreeWalkerGrammar g) throws IOException { setGrammar(g); // Open the output stream for the parser and set the currentOutput antlrTool.reportProgress("Generating " + grammar.getClassName() + ".html"); currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".html"); //SAS: changed for proper text file io tabs = 0; // Generate the header common to all output files. genHeader(); // Output the user-defined parser premamble println(""); // println("*** Tree-walker Preamble Action."); // println("This action will appear before the declaration of your tree-walker class:"); // tabs++; // println(grammar.preambleAction.getText()); // tabs--; // println("*** End of tree-walker Preamble Action"); // Generate tree-walker class definition println(""); // print javadoc comment if any if (grammar.comment != null) { _println(HTMLEncode(grammar.comment)); } println("Definition of tree parser " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + "."); // Generate user-defined tree-walker class members // println(""); // println("*** User-defined tree-walker class members:"); // println("These are the member declarations that you defined for your class:"); // tabs++; // printAction(grammar.classMemberAction.getText()); // tabs--; // println("*** End of user-defined tree-walker class members"); // Generate code for each rule in the grammar println(""); // println("*** tree-walker rules:"); tabs++; // Enumerate the tree-walker rules Enumeration rules = grammar.rules.elements(); while (rules.hasMoreElements()) { println(""); // Get the rules from the list and downcast it to proper type GrammarSymbol sym = (GrammarSymbol)rules.nextElement(); // Only process tree-walker rules if (sym instanceof RuleSymbol) { genRule((RuleSymbol)sym); } } tabs--; println(""); // println("*** End of tree-walker rules"); // println(""); // println("*** End of tree-walker"); // Close the tree-walker output stream currentOutput.close(); currentOutput = null; } /** Generate a wildcard element */ public void gen(WildcardElement wc) { /* if ( wc.getLabel()!=null ) { _print(wc.getLabel()+"="); } */ _print(". "); } /** Generate code for the given grammar element. * @param blk The (...)* block to generate */ public void gen(ZeroOrMoreBlock blk) { genGenericBlock(blk, "*"); } protected void genAlt(Alternative alt) { if (alt.getTreeSpecifier() != null) { _print(alt.getTreeSpecifier().getText()); } prevAltElem = null; for (AlternativeElement elem = alt.head; !(elem instanceof BlockEndElement); elem = elem.next) { elem.generate(); firstElementInAlt = false; prevAltElem = elem; } } /** Generate the header for a block, which may be a RuleBlock or a * plain AlternativeBLock. This generates any variable declarations, * init-actions, and syntactic-predicate-testing variables. * @blk The block for which the preamble is to be generated. */ // protected void genBlockPreamble(AlternativeBlock blk) { // RK: don't dump out init actions // dump out init action // if ( blk.initAction!=null ) { // printAction("{" + blk.initAction + "}"); // } // } /**Generate common code for a block of alternatives; return a postscript * that needs to be generated at the end of the block. Other routines * may append else-clauses and such for error checking before the postfix * is generated. */ public void genCommonBlock(AlternativeBlock blk) { for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); AlternativeElement elem = alt.head; // dump alt operator | if (i > 0 && blk.alternatives.size() > 1) { _println(""); print("|\t"); } // Dump the alternative, starting with predicates // boolean save = firstElementInAlt; firstElementInAlt = true; tabs++; // in case we do a newline in alt, increase the tab indent // RK: don't dump semantic/syntactic predicates // only obscures grammar. // // Dump semantic predicates // // if (alt.semPred != null) { // println("{" + alt.semPred + "}?"); // } // Dump syntactic predicate // if (alt.synPred != null) { // genSynPred(alt.synPred); // } genAlt(alt); tabs--; firstElementInAlt = save; } } /** Generate a textual representation of the follow set * for a block. * @param blk The rule block of interest */ public void genFollowSetForRuleBlock(RuleBlock blk) { Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, blk.endNode); printSet(grammar.maxk, 1, follow); } protected void genGenericBlock(AlternativeBlock blk, String blkOp) { if (blk.alternatives.size() > 1) { // make sure we start on a new line if (!firstElementInAlt) { // only do newline if the last element wasn't a multi-line block if (prevAltElem == null || !(prevAltElem instanceof AlternativeBlock) || ((AlternativeBlock)prevAltElem).alternatives.size() == 1) { _println(""); print("(\t"); } else { _print("(\t"); } // _println(""); // print("(\t"); } else { _print("(\t"); } } else { _print("( "); } // RK: don't dump init actions // genBlockPreamble(blk); genCommonBlock(blk); if (blk.alternatives.size() > 1) { _println(""); print(")" + blkOp + " "); // if not last element of alt, need newline & to indent if (!(blk.next instanceof BlockEndElement)) { _println(""); print(""); } } else { _print(")" + blkOp + " "); } } /** Generate a header that is common to all TXT files */ protected void genHeader() { println(""); println(""); println(""); println("Grammar " + antlrTool.grammarFile + ""); println(""); println(""); println(""); println(""); println(""); println(""); println("
"); println("Grammar " + grammar.getClassName() + "
"); println("ANTLR-generated HTML file from " + antlrTool.grammarFile); println("

"); println("Terence Parr, MageLang Institute"); println("
ANTLR Version " + antlrTool.version + "; 1989-2005"); println("

"); println("
");
        // RK: see no reason for printing include files and stuff...
//		tabs++;
//		printAction(behavior.getHeaderAction(""));
//		tabs--;
    }

    /**Generate the lookahead set for an alternate. */
    protected void genLookaheadSetForAlt(Alternative alt) {
        if (doingLexRules && alt.cache[1].containsEpsilon()) {
            println("MATCHES ALL");
            return;
        }
        int depth = alt.lookaheadDepth;
        if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
            // if the decision is nondeterministic, do the best we can: LL(k)
            // any predicates that are around will be generated later.
            depth = grammar.maxk;
        }
        for (int i = 1; i <= depth; i++) {
            Lookahead lookahead = alt.cache[i];
            printSet(depth, i, lookahead);
        }
    }

    /** Generate a textual representation of the lookahead set
     * for a block.
     * @param blk  The block of interest
     */
    public void genLookaheadSetForBlock(AlternativeBlock blk) {
        // Find the maximal lookahead depth over all alternatives
        int depth = 0;
        for (int i = 0; i < blk.alternatives.size(); i++) {
            Alternative alt = blk.getAlternativeAt(i);
            if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
                depth = grammar.maxk;
                break;
            }
            else if (depth < alt.lookaheadDepth) {
                depth = alt.lookaheadDepth;
            }
        }

        for (int i = 1; i <= depth; i++) {
            Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
            printSet(depth, i, lookahead);
        }
    }

    /** Generate the nextToken rule.
     * nextToken is a synthetic lexer rule that is the implicit OR of all
     * user-defined lexer rules.
     */
    public void genNextToken() {
        println("");
        println("/** Lexer nextToken rule:");
        println(" *  The lexer nextToken rule is synthesized from all of the user-defined");
        println(" *  lexer rules.  It logically consists of one big alternative block with");
        println(" *  each user-defined rule being an alternative.");
        println(" */");

        // Create the synthesized rule block for nextToken consisting
        // of an alternate block containing all the user-defined lexer rules.
        RuleBlock blk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");

        // Define the nextToken rule symbol
        RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
        nextTokenRs.setDefined();
        nextTokenRs.setBlock(blk);
        nextTokenRs.access = "private";
        grammar.define(nextTokenRs);

        /*
		// Analyze the synthesized block
		if (!grammar.theLLkAnalyzer.deterministic(blk))
		{
			println("The grammar analyzer has determined that the synthesized");
			println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
			println("This means that there is some overlap of the character");
			println("lookahead for two or more of your lexer rules.");
		}
		*/

        genCommonBlock(blk);
    }

    /** Generate code for a named rule block
     * @param s The RuleSymbol describing the rule to generate
     */
    public void genRule(RuleSymbol s) {
        if (s == null || !s.isDefined()) return;	// undefined rule
        println("");
        if (s.comment != null) {
            _println(HTMLEncode(s.comment));
        }
        if (s.access.length() != 0) {
            if (!s.access.equals("public")) {
                _print(s.access + " ");
            }
        }
        _print("");
        _print(s.getId());
        _print("");

        // Get rule return type and arguments
        RuleBlock rblk = s.getBlock();

        // RK: for HTML output not of much value...
        // Gen method return value(s)
//		if (rblk.returnAction != null) {
//			_print("["+rblk.returnAction+"]");
//		}
        // Gen arguments
//		if (rblk.argAction != null)
//		{
//				_print(" returns [" + rblk.argAction+"]");
//		}
        _println("");
        tabs++;
        print(":\t");

        // Dump any init-action
        // genBlockPreamble(rblk);

        // Dump the alternates of the rule
        genCommonBlock(rblk);

        _println("");
        println(";");
        tabs--;
    }

    /** Generate the syntactic predicate.  This basically generates
     * the alternative block, buts tracks if we are inside a synPred
     * @param blk  The syntactic predicate block
     */
    protected void genSynPred(SynPredBlock blk) {
        syntacticPredLevel++;
        genGenericBlock(blk, " =>");
        syntacticPredLevel--;
    }

    public void genTail() {
        println("
"); println(""); println(""); } /** Generate the token types TXT file */ protected void genTokenTypes(TokenManager tm) throws IOException { // Open the token output TXT file and set the currentOutput stream antlrTool.reportProgress("Generating " + tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt); currentOutput = antlrTool.openOutputFile(tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt); //SAS: changed for proper text file io tabs = 0; // Generate the header common to all diagnostic files genHeader(); // Generate a string for each token. This creates a static // array of Strings indexed by token type. println(""); println("*** Tokens used by the parser"); println("This is a list of the token numeric values and the corresponding"); println("token identifiers. Some tokens are literals, and because of that"); println("they have no identifiers. Literals are double-quoted."); tabs++; // Enumerate all the valid token types Vector v = tm.getVocabulary(); for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s != null) { println(s + " = " + i); } } // Close the interface tabs--; println("*** End of tokens used by the parser"); // Close the tokens output file currentOutput.close(); currentOutput = null; } /** Get a string for an expression to generate creation of an AST subtree. * @param v A Vector of String, where each element is an expression in the target language yielding an AST node. */ public String getASTCreateString(Vector v) { return null; } /** Get a string for an expression to generate creating of an AST node * @param str The arguments to the AST constructor */ public String getASTCreateString(GrammarAtom atom, String str) { return null; } /** Map an identifier to it's corresponding tree-node variable. * This is context-sensitive, depending on the rule and alternative * being generated * @param id The identifier name to map * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned. */ public String mapTreeId(String id, ActionTransInfo tInfo) { return id; } /// unused. protected String processActionForSpecialSymbols(String actionStr, int line, RuleBlock currentRule, ActionTransInfo tInfo) { return actionStr; } /** Format a lookahead or follow set. * @param depth The depth of the entire lookahead/follow * @param k The lookahead level to print * @param lookahead The lookahead/follow set to print */ public void printSet(int depth, int k, Lookahead lookahead) { int numCols = 5; int[] elems = lookahead.fset.toArray(); if (depth != 1) { print("k==" + k + ": {"); } else { print("{ "); } if (elems.length > numCols) { _println(""); tabs++; print(""); } int column = 0; for (int i = 0; i < elems.length; i++) { column++; if (column > numCols) { _println(""); print(""); column = 0; } if (doingLexRules) { _print(charFormatter.literalChar(elems[i])); } else { _print((String)grammar.tokenManager.getVocabulary().elementAt(elems[i])); } if (i != elems.length - 1) { _print(", "); } } if (elems.length > numCols) { _println(""); tabs--; print(""); } _println(" }"); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ImportVocabTokenManager.java000066400000000000000000000063431161462365500265100ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ImportVocabTokenManager.java#1 $ */ import java.io.*; import java.util.Hashtable; import java.util.Enumeration; import antlr.collections.impl.Vector; /** Static implementation of the TokenManager, used for importVocab option */ class ImportVocabTokenManager extends SimpleTokenManager implements Cloneable { private String filename; protected Grammar grammar; // FIXME: it would be nice if the path to the original grammar file was // also searched. ImportVocabTokenManager(Grammar grammar, String filename_, String name_, Tool tool_) { // initialize super(name_, tool_); this.grammar = grammar; this.filename = filename_; // Figure out exactly where the file lives. Check $PWD first, // and then search in -o . // File grammarFile = new File(filename); if (!grammarFile.exists()) { grammarFile = new File(antlrTool.getOutputDirectory(), filename); if (!grammarFile.exists()) { antlrTool.panic("Cannot find importVocab file '" + filename + "'"); } } setReadOnly(true); // Read a file with lines of the form ID=number try { Reader fileIn = new BufferedReader(new FileReader(grammarFile)); ANTLRTokdefLexer tokdefLexer = new ANTLRTokdefLexer(fileIn); ANTLRTokdefParser tokdefParser = new ANTLRTokdefParser(tokdefLexer); tokdefParser.setTool(antlrTool); tokdefParser.setFilename(filename); tokdefParser.file(this); } catch (FileNotFoundException fnf) { antlrTool.panic("Cannot find importVocab file '" + filename + "'"); } catch (RecognitionException ex) { antlrTool.panic("Error parsing importVocab file '" + filename + "': " + ex.toString()); } catch (TokenStreamException ex) { antlrTool.panic("Error reading importVocab file '" + filename + "'"); } } public Object clone() { ImportVocabTokenManager tm; tm = (ImportVocabTokenManager)super.clone(); tm.filename = this.filename; tm.grammar = this.grammar; return tm; } /** define a token. */ public void define(TokenSymbol ts) { super.define(ts); } /** define a token. Intended for use only when reading the importVocab file. */ public void define(String s, int ttype) { TokenSymbol ts = null; if (s.startsWith("\"")) { ts = new StringLiteralSymbol(s); } else { ts = new TokenSymbol(s); } ts.setTokenType(ttype); super.define(ts); maxToken = (ttype + 1) > maxToken ? (ttype + 1) : maxToken; // record maximum token type } /** importVocab token manager is read-only if output would be same as input */ public boolean isReadOnly() { return readOnly; } /** Get the next unused token type. */ public int nextTokenType() { return super.nextTokenType(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/InputBuffer.java000066400000000000000000000075141161462365500242210ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/InputBuffer.java#1 $ */ // SAS: Added this class to genericise the input buffers for scanners // This allows a scanner to use a binary (FileInputStream) or // text (FileReader) stream of data; the generated scanner // subclass will define the input stream // There are two subclasses to this: CharBuffer and ByteBuffer import java.io.IOException; /**A Stream of characters fed to the lexer from a InputStream that can * be rewound via mark()/rewind() methods. *

* A dynamic array is used to buffer up all the input characters. Normally, * "k" characters are stored in the buffer. More characters may be stored during * guess mode (testing syntactic predicate), or when LT(i>k) is referenced. * Consumption of characters is deferred. In other words, reading the next * character is not done by conume(), but deferred until needed by LA or LT. *

* * @see antlr.CharQueue */ public abstract class InputBuffer { // Number of active markers protected int nMarkers = 0; // Additional offset used when markers are active protected int markerOffset = 0; // Number of calls to consume() since last LA() or LT() call protected int numToConsume = 0; // Circular queue protected CharQueue queue; /** Create an input buffer */ public InputBuffer() { queue = new CharQueue(1); } /** This method updates the state of the input buffer so that * the text matched since the most recent mark() is no longer * held by the buffer. So, you either do a mark/rewind for * failed predicate or mark/commit to keep on parsing without * rewinding the input. */ public void commit() { nMarkers--; } /** Mark another character for deferred consumption */ public void consume() { numToConsume++; } /** Ensure that the input buffer is sufficiently full */ public abstract void fill(int amount) throws CharStreamException; public String getLAChars() { StringBuffer la = new StringBuffer(); for (int i = markerOffset; i < queue.nbrEntries; i++) la.append(queue.elementAt(i)); return la.toString(); } public String getMarkedChars() { StringBuffer marked = new StringBuffer(); for (int i = 0; i < markerOffset; i++) marked.append(queue.elementAt(i)); return marked.toString(); } public boolean isMarked() { return (nMarkers != 0); } /** Get a lookahead character */ public char LA(int i) throws CharStreamException { fill(i); return queue.elementAt(markerOffset + i - 1); } /**Return an integer marker that can be used to rewind the buffer to * its current state. */ public int mark() { syncConsume(); nMarkers++; return markerOffset; } /**Rewind the character buffer to a marker. * @param mark Marker returned previously from mark() */ public void rewind(int mark) { syncConsume(); markerOffset = mark; nMarkers--; } /** Reset the input buffer */ public void reset() { nMarkers = 0; markerOffset = 0; numToConsume = 0; queue.reset(); } /** Sync up deferred consumption */ protected void syncConsume() { while (numToConsume > 0) { if (nMarkers > 0) { // guess mode -- leave leading characters and bump offset. markerOffset++; } else { // normal mode -- remove first character queue.removeFirst(); } numToConsume--; } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/JavaBlockFinishingInfo.java000066400000000000000000000020341161462365500262670ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/JavaBlockFinishingInfo.java#1 $ */ class JavaBlockFinishingInfo { String postscript; // what to generate to terminate block boolean generatedSwitch;// did block finish with "default:" of switch? boolean generatedAnIf; /** When generating an if or switch, end-of-token lookahead sets * will become the else or default clause, don't generate an * error clause in this case. */ boolean needAnErrorClause; public JavaBlockFinishingInfo() { postscript = null; generatedSwitch = generatedSwitch = false; needAnErrorClause = true; } public JavaBlockFinishingInfo(String ps, boolean genS, boolean generatedAnIf, boolean n) { postscript = ps; generatedSwitch = genS; this.generatedAnIf = generatedAnIf; needAnErrorClause = n; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/JavaCharFormatter.java000066400000000000000000000062551161462365500253340ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/JavaCharFormatter.java#1 $ */ class JavaCharFormatter implements CharFormatter { /** Given a character value, return a string representing the character * that can be embedded inside a string literal or character literal * This works for Java/C/C++ code-generation and languages with compatible * special-character-escapment. * Code-generators for languages should override this method. * @param c The character of interest. * @param forCharLiteral true to escape for char literal, false for string literal */ public String escapeChar(int c, boolean forCharLiteral) { switch (c) { // case GrammarAnalyzer.EPSILON_TYPE : return ""; case '\n': return "\\n"; case '\t': return "\\t"; case '\r': return "\\r"; case '\\': return "\\\\"; case '\'': return forCharLiteral ? "\\'" : "'"; case '"': return forCharLiteral ? "\"" : "\\\""; default : if (c < ' ' || c > 126) { if ((0x0000 <= c) && (c <= 0x000F)) { return "\\u000" + Integer.toString(c, 16); } else if ((0x0010 <= c) && (c <= 0x00FF)) { return "\\u00" + Integer.toString(c, 16); } else if ((0x0100 <= c) && (c <= 0x0FFF)) { return "\\u0" + Integer.toString(c, 16); } else { return "\\u" + Integer.toString(c, 16); } } else { return String.valueOf((char)c); } } } /** Converts a String into a representation that can be use as a literal * when surrounded by double-quotes. * @param s The String to be changed into a literal */ public String escapeString(String s) { String retval = new String(); for (int i = 0; i < s.length(); i++) { retval += escapeChar(s.charAt(i), false); } return retval; } /** Given a character value, return a string representing the character * literal that can be recognized by the target language compiler. * This works for languages that use single-quotes for character literals. * Code-generators for languages should override this method. * @param c The character of interest. */ public String literalChar(int c) { return "'" + escapeChar(c, true) + "'"; } /** Converts a String into a string literal * This works for languages that use double-quotes for string literals. * Code-generators for languages should override this method. * @param s The String to be changed into a literal */ public String literalString(String s) { return "\"" + escapeString(s) + "\""; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/JavaCodeGenerator.java000066400000000000000000004050161161462365500253120ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/JavaCodeGenerator.java#1 $ */ import java.util.Enumeration; import java.util.Hashtable; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; import java.io.PrintWriter; //SAS: changed for proper text file io import java.io.IOException; import java.io.FileWriter; /**Generate MyParser.java, MyLexer.java and MyParserTokenTypes.java */ public class JavaCodeGenerator extends CodeGenerator { // non-zero if inside syntactic predicate generation protected int syntacticPredLevel = 0; // Are we generating ASTs (for parsers and tree parsers) right now? protected boolean genAST = false; // Are we saving the text consumed (for lexers) right now? protected boolean saveText = false; // Grammar parameters set up to handle different grammar classes. // These are used to get instanceof tests out of code generation String labeledElementType; String labeledElementASTType; String labeledElementInit; String commonExtraArgs; String commonExtraParams; String commonLocalVars; String lt1Value; String exceptionThrown; String throwNoViable; /** Tracks the rule being generated. Used for mapTreeId */ RuleBlock currentRule; /** Tracks the rule or labeled subrule being generated. Used for AST generation. */ String currentASTResult; /** Mapping between the ids used in the current alt, and the * names of variables used to represent their AST values. */ Hashtable treeVariableMap = new Hashtable(); /** Used to keep track of which AST variables have been defined in a rule * (except for the #rule_name and #rule_name_in var's */ Hashtable declaredASTVariables = new Hashtable(); /* Count of unnamed generated variables */ int astVarNumber = 1; /** Special value used to mark duplicate in treeVariableMap */ protected static final String NONUNIQUE = new String(); public static final int caseSizeThreshold = 127; // ascii is max private Vector semPreds; /** Create a Java code-generator using the given Grammar. * The caller must still call setTool, setBehavior, and setAnalyzer * before generating code. */ public JavaCodeGenerator() { super(); charFormatter = new JavaCharFormatter(); } /** Adds a semantic predicate string to the sem pred vector These strings will be used to build an array of sem pred names when building a debugging parser. This method should only be called when the debug option is specified */ protected int addSemPred(String predicate) { semPreds.appendElement(predicate); return semPreds.size() - 1; } public void exitIfError() { if (antlrTool.hasError()) { antlrTool.fatalError("Exiting due to errors."); } } /**Generate the parser, lexer, treeparser, and token types in Java */ public void gen() { // Do the code generation try { // Loop over all grammars Enumeration grammarIter = behavior.grammars.elements(); while (grammarIter.hasMoreElements()) { Grammar g = (Grammar)grammarIter.nextElement(); // Connect all the components to each other g.setGrammarAnalyzer(analyzer); g.setCodeGenerator(this); analyzer.setGrammar(g); // To get right overloading behavior across hetrogeneous grammars setupGrammarParameters(g); g.generate(); // print out the grammar with lookahead sets (and FOLLOWs) // System.out.print(g.toString()); exitIfError(); } // Loop over all token managers (some of which are lexers) Enumeration tmIter = behavior.tokenManagers.elements(); while (tmIter.hasMoreElements()) { TokenManager tm = (TokenManager)tmIter.nextElement(); if (!tm.isReadOnly()) { // Write the token manager tokens as Java // this must appear before genTokenInterchange so that // labels are set on string literals genTokenTypes(tm); // Write the token manager tokens as plain text genTokenInterchange(tm); } exitIfError(); } } catch (IOException e) { antlrTool.reportException(e, null); } } /** Generate code for the given grammar element. * @param blk The {...} action to generate */ public void gen(ActionElement action) { if (DEBUG_CODE_GENERATOR) System.out.println("genAction(" + action + ")"); if (action.isSemPred) { genSemPred(action.actionText, action.line); } else { if (grammar.hasSyntacticPredicate) { println("if ( inputState.guessing==0 ) {"); tabs++; } // get the name of the followSet for the current rule so that we // can replace $FOLLOW in the .g file. ActionTransInfo tInfo = new ActionTransInfo(); String actionStr = processActionForSpecialSymbols(action.actionText, action.getLine(), currentRule, tInfo); if (tInfo.refRuleRoot != null) { // Somebody referenced "#rule", make sure translated var is valid // assignment to #rule is left as a ref also, meaning that assignments // with no other refs like "#rule = foo();" still forces this code to be // generated (unnecessarily). println(tInfo.refRuleRoot + " = (" + labeledElementASTType + ")currentAST.root;"); } // dump the translated action printAction(actionStr); if (tInfo.assignToRoot) { // Somebody did a "#rule=", reset internal currentAST.root println("currentAST.root = " + tInfo.refRuleRoot + ";"); // reset the child pointer too to be last sibling in sibling list println("currentAST.child = " + tInfo.refRuleRoot + "!=null &&" + tInfo.refRuleRoot + ".getFirstChild()!=null ?"); tabs++; println(tInfo.refRuleRoot + ".getFirstChild() : " + tInfo.refRuleRoot + ";"); tabs--; println("currentAST.advanceChildToEnd();"); } if (grammar.hasSyntacticPredicate) { tabs--; println("}"); } } } /** Generate code for the given grammar element. * @param blk The "x|y|z|..." block to generate */ public void gen(AlternativeBlock blk) { if (DEBUG_CODE_GENERATOR) System.out.println("gen(" + blk + ")"); println("{"); genBlockPreamble(blk); genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, true); genBlockFinish(howToFinish, throwNoViable); println("}"); // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate code for the given grammar element. * @param blk The block-end element to generate. Block-end * elements are synthesized by the grammar parser to represent * the end of a block. */ public void gen(BlockEndElement end) { if (DEBUG_CODE_GENERATOR) System.out.println("genRuleEnd(" + end + ")"); } /** Generate code for the given grammar element. * @param blk The character literal reference to generate */ public void gen(CharLiteralElement atom) { if (DEBUG_CODE_GENERATOR) System.out.println("genChar(" + atom + ")"); if (atom.getLabel() != null) { println(atom.getLabel() + " = " + lt1Value + ";"); } boolean oldsaveText = saveText; saveText = saveText && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE; genMatch(atom); saveText = oldsaveText; } /** Generate code for the given grammar element. * @param blk The character-range reference to generate */ public void gen(CharRangeElement r) { if (r.getLabel() != null && syntacticPredLevel == 0) { println(r.getLabel() + " = " + lt1Value + ";"); } boolean flag = ( grammar instanceof LexerGrammar && ( !saveText || r.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) ); if (flag) { println("_saveIndex=text.length();"); } println("matchRange(" + r.beginText + "," + r.endText + ");"); if (flag) { println("text.setLength(_saveIndex);"); } } /** Generate the lexer Java file */ public void gen(LexerGrammar g) throws IOException { // If debugging, create a new sempred vector for this grammar if (g.debuggingOutput) semPreds = new Vector(); setGrammar(g); if (!(grammar instanceof LexerGrammar)) { antlrTool.panic("Internal error generating lexer"); } // SAS: moved output creation to method so a subclass can change // how the output is generated (for VAJ interface) setupOutput(grammar.getClassName()); genAST = false; // no way to gen trees. saveText = true; // save consumed characters. tabs = 0; // Generate header common to all Java output files genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Generate header specific to lexer Java file // println("import java.io.FileInputStream;"); println("import java.io.InputStream;"); println("import antlr.TokenStreamException;"); println("import antlr.TokenStreamIOException;"); println("import antlr.TokenStreamRecognitionException;"); println("import antlr.CharStreamException;"); println("import antlr.CharStreamIOException;"); println("import antlr.ANTLRException;"); println("import java.io.Reader;"); println("import java.util.Hashtable;"); println("import antlr." + grammar.getSuperClass() + ";"); println("import antlr.InputBuffer;"); println("import antlr.ByteBuffer;"); println("import antlr.CharBuffer;"); println("import antlr.Token;"); println("import antlr.CommonToken;"); println("import antlr.RecognitionException;"); println("import antlr.NoViableAltForCharException;"); println("import antlr.MismatchedCharException;"); println("import antlr.TokenStream;"); println("import antlr.ANTLRHashString;"); println("import antlr.LexerSharedInputState;"); println("import antlr.collections.impl.BitSet;"); println("import antlr.SemanticException;"); // Generate user-defined lexer file preamble println(grammar.preambleAction.getText()); // Generate lexer class definition String sup = null; if (grammar.superClass != null) { sup = grammar.superClass; } else { sup = "antlr." + grammar.getSuperClass(); } // print javadoc comment if any if (grammar.comment != null) { _println(grammar.comment); } // get prefix (replaces "public" and lets user specify) String prefix = "public"; Token tprefix = (Token)grammar.options.get("classHeaderPrefix"); if (tprefix != null) { String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\""); if (p != null) { prefix = p; } } print(prefix+" "); print("class " + grammar.getClassName() + " extends " + sup); println(" implements " + grammar.tokenManager.getName() + TokenTypesFileSuffix + ", TokenStream"); Token tsuffix = (Token)grammar.options.get("classHeaderSuffix"); if (tsuffix != null) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\""); if (suffix != null) { print(", " + suffix); // must be an interface name for Java } } println(" {"); // Generate user-defined lexer class members print( processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null) ); // // Generate the constructor from InputStream, which in turn // calls the ByteBuffer constructor // println("public " + grammar.getClassName() + "(InputStream in) {"); tabs++; println("this(new ByteBuffer(in));"); tabs--; println("}"); // // Generate the constructor from Reader, which in turn // calls the CharBuffer constructor // println("public " + grammar.getClassName() + "(Reader in) {"); tabs++; println("this(new CharBuffer(in));"); tabs--; println("}"); println("public " + grammar.getClassName() + "(InputBuffer ib) {"); tabs++; // if debugging, wrap the input buffer in a debugger if (grammar.debuggingOutput) println("this(new LexerSharedInputState(new antlr.debug.DebuggingInputBuffer(ib)));"); else println("this(new LexerSharedInputState(ib));"); tabs--; println("}"); // // Generate the constructor from InputBuffer (char or byte) // println("public " + grammar.getClassName() + "(LexerSharedInputState state) {"); tabs++; println("super(state);"); // if debugging, set up array variables and call user-overridable // debugging setup method if (grammar.debuggingOutput) { println(" ruleNames = _ruleNames;"); println(" semPredNames = _semPredNames;"); println(" setupDebugging();"); } // Generate the setting of various generated options. // These need to be before the literals since ANTLRHashString depends on // the casesensitive stuff. println("caseSensitiveLiterals = " + g.caseSensitiveLiterals + ";"); println("setCaseSensitive(" + g.caseSensitive + ");"); // Generate the initialization of a hashtable // containing the string literals used in the lexer // The literals variable itself is in CharScanner println("literals = new Hashtable();"); Enumeration keys = grammar.tokenManager.getTokenSymbolKeys(); while (keys.hasMoreElements()) { String key = (String)keys.nextElement(); if (key.charAt(0) != '"') { continue; } TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key); if (sym instanceof StringLiteralSymbol) { StringLiteralSymbol s = (StringLiteralSymbol)sym; println("literals.put(new ANTLRHashString(" + s.getId() + ", this), new Integer(" + s.getTokenType() + "));"); } } tabs--; Enumeration ids; println("}"); // generate the rule name array for debugging if (grammar.debuggingOutput) { println("private static final String _ruleNames[] = {"); ids = grammar.rules.elements(); int ruleNum = 0; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); if (sym instanceof RuleSymbol) println(" \"" + ((RuleSymbol)sym).getId() + "\","); } println("};"); } // Generate nextToken() rule. // nextToken() is a synthetic lexer rule that is the implicit OR of all // user-defined lexer rules. genNextToken(); // Generate code for each rule in the lexer ids = grammar.rules.elements(); int ruleNum = 0; while (ids.hasMoreElements()) { RuleSymbol sym = (RuleSymbol)ids.nextElement(); // Don't generate the synthetic rules if (!sym.getId().equals("mnextToken")) { genRule(sym, false, ruleNum++); } exitIfError(); } // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) genSemPredMap(); // Generate the bitsets used throughout the lexer genBitsets(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size()); println(""); println("}"); // Close the lexer output stream currentOutput.close(); currentOutput = null; } /** Generate code for the given grammar element. * @param blk The (...)+ block to generate */ public void gen(OneOrMoreBlock blk) { if (DEBUG_CODE_GENERATOR) System.out.println("gen+(" + blk + ")"); String label; String cnt; println("{"); genBlockPreamble(blk); if (blk.getLabel() != null) { cnt = "_cnt_" + blk.getLabel(); } else { cnt = "_cnt" + blk.ID; } println("int " + cnt + "=0;"); if (blk.getLabel() != null) { label = blk.getLabel(); } else { label = "_loop" + blk.ID; } println(label + ":"); println("do {"); tabs++; // generate the init action for ()+ ()* inside the loop // this allows us to do usefull EOF checking... genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); // generate exit test if greedy set to false // and an alt is ambiguous with exit branch // or when lookahead derived purely from end-of-file // Lookahead analysis stops when end-of-file is hit, // returning set {epsilon}. Since {epsilon} is not // ambig with any real tokens, no error is reported // by deterministic() routines and we have to check // for the case where the lookahead depth didn't get // set to NONDETERMINISTIC (this only happens when the // FOLLOW contains real atoms + epsilon). boolean generateNonGreedyExitPath = false; int nonGreedyExitDepth = grammar.maxk; if (!blk.greedy && blk.exitLookaheadDepth <= grammar.maxk && blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) { generateNonGreedyExitPath = true; nonGreedyExitDepth = blk.exitLookaheadDepth; } else if (!blk.greedy && blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) { generateNonGreedyExitPath = true; } // generate exit test if greedy set to false // and an alt is ambiguous with exit branch if (generateNonGreedyExitPath) { if (DEBUG_CODE_GENERATOR) { System.out.println("nongreedy (...)+ loop; exit depth is " + blk.exitLookaheadDepth); } String predictExit = getLookaheadTestExpression(blk.exitCache, nonGreedyExitDepth); println("// nongreedy exit test"); println("if ( " + cnt + ">=1 && " + predictExit + ") break " + label + ";"); } JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, false); genBlockFinish( howToFinish, "if ( " + cnt + ">=1 ) { break " + label + "; } else {" + throwNoViable + "}" ); println(cnt + "++;"); tabs--; println("} while (true);"); println("}"); // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate the parser Java file */ public void gen(ParserGrammar g) throws IOException { // if debugging, set up a new vector to keep track of sempred // strings for this grammar if (g.debuggingOutput) semPreds = new Vector(); setGrammar(g); if (!(grammar instanceof ParserGrammar)) { antlrTool.panic("Internal error generating parser"); } // Open the output stream for the parser and set the currentOutput // SAS: moved file setup so subclass could do it (for VAJ interface) setupOutput(grammar.getClassName()); genAST = grammar.buildAST; tabs = 0; // Generate the header common to all output files. genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Generate header for the parser println("import antlr.TokenBuffer;"); println("import antlr.TokenStreamException;"); println("import antlr.TokenStreamIOException;"); println("import antlr.ANTLRException;"); println("import antlr." + grammar.getSuperClass() + ";"); println("import antlr.Token;"); println("import antlr.TokenStream;"); println("import antlr.RecognitionException;"); println("import antlr.NoViableAltException;"); println("import antlr.MismatchedTokenException;"); println("import antlr.SemanticException;"); println("import antlr.ParserSharedInputState;"); println("import antlr.collections.impl.BitSet;"); if ( genAST ) { println("import antlr.collections.AST;"); println("import java.util.Hashtable;"); println("import antlr.ASTFactory;"); println("import antlr.ASTPair;"); println("import antlr.collections.impl.ASTArray;"); } // Output the user-defined parser preamble println(grammar.preambleAction.getText()); // Generate parser class definition String sup = null; if (grammar.superClass != null) sup = grammar.superClass; else sup = "antlr." + grammar.getSuperClass(); // print javadoc comment if any if (grammar.comment != null) { _println(grammar.comment); } // get prefix (replaces "public" and lets user specify) String prefix = "public"; Token tprefix = (Token)grammar.options.get("classHeaderPrefix"); if (tprefix != null) { String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\""); if (p != null) { prefix = p; } } print(prefix+" "); print("class " + grammar.getClassName() + " extends " + sup); println(" implements " + grammar.tokenManager.getName() + TokenTypesFileSuffix); Token tsuffix = (Token)grammar.options.get("classHeaderSuffix"); if (tsuffix != null) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\""); if (suffix != null) print(", " + suffix); // must be an interface name for Java } println(" {"); // set up an array of all the rule names so the debugger can // keep track of them only by number -- less to store in tree... if (grammar.debuggingOutput) { println("private static final String _ruleNames[] = {"); Enumeration ids = grammar.rules.elements(); int ruleNum = 0; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); if (sym instanceof RuleSymbol) println(" \"" + ((RuleSymbol)sym).getId() + "\","); } println("};"); } // Generate user-defined parser class members print( processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null) ); // Generate parser class constructor from TokenBuffer println(""); println("protected " + grammar.getClassName() + "(TokenBuffer tokenBuf, int k) {"); println(" super(tokenBuf,k);"); println(" tokenNames = _tokenNames;"); // if debugging, set up arrays and call the user-overridable // debugging setup method if (grammar.debuggingOutput) { println(" ruleNames = _ruleNames;"); println(" semPredNames = _semPredNames;"); println(" setupDebugging(tokenBuf);"); } if ( grammar.buildAST ) { println(" buildTokenTypeASTClassMap();"); println(" astFactory = new ASTFactory(getTokenTypeToASTClassMap());"); } println("}"); println(""); println("public " + grammar.getClassName() + "(TokenBuffer tokenBuf) {"); println(" this(tokenBuf," + grammar.maxk + ");"); println("}"); println(""); // Generate parser class constructor from TokenStream println("protected " + grammar.getClassName() + "(TokenStream lexer, int k) {"); println(" super(lexer,k);"); println(" tokenNames = _tokenNames;"); // if debugging, set up arrays and call the user-overridable // debugging setup method if (grammar.debuggingOutput) { println(" ruleNames = _ruleNames;"); println(" semPredNames = _semPredNames;"); println(" setupDebugging(lexer);"); } if ( grammar.buildAST ) { println(" buildTokenTypeASTClassMap();"); println(" astFactory = new ASTFactory(getTokenTypeToASTClassMap());"); } println("}"); println(""); println("public " + grammar.getClassName() + "(TokenStream lexer) {"); println(" this(lexer," + grammar.maxk + ");"); println("}"); println(""); println("public " + grammar.getClassName() + "(ParserSharedInputState state) {"); println(" super(state," + grammar.maxk + ");"); println(" tokenNames = _tokenNames;"); if ( grammar.buildAST ) { println(" buildTokenTypeASTClassMap();"); println(" astFactory = new ASTFactory(getTokenTypeToASTClassMap());"); } println("}"); println(""); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); int ruleNum = 0; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); if (sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol)sym; genRule(rs, rs.references.size() == 0, ruleNum++); } exitIfError(); } // Generate the token names genTokenStrings(); if ( grammar.buildAST ) { genTokenASTNodeMap(); } // Generate the bitsets used throughout the grammar genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType()); // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) genSemPredMap(); // Close class definition println(""); println("}"); // Close the parser output stream currentOutput.close(); currentOutput = null; } /** Generate code for the given grammar element. * @param blk The rule-reference to generate */ public void gen(RuleRefElement rr) { if (DEBUG_CODE_GENERATOR) System.out.println("genRR(" + rr + ")"); RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); if (rs == null || !rs.isDefined()) { // Is this redundant??? antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn()); return; } if (!(rs instanceof RuleSymbol)) { // Is this redundant??? antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn()); return; } genErrorTryForElement(rr); // AST value for labeled rule refs in tree walker. // This is not AST construction; it is just the input tree node value. if (grammar instanceof TreeWalkerGrammar && rr.getLabel() != null && syntacticPredLevel == 0) { println(rr.getLabel() + " = _t==ASTNULL ? null : " + lt1Value + ";"); } // if in lexer and ! on rule ref or alt or rule, save buffer index to kill later if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("_saveIndex=text.length();"); } // Process return value assignment if any printTabs(); if (rr.idAssign != null) { // Warn if the rule has no return type if (rs.block.returnAction == null) { antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr.getLine(), rr.getColumn()); } _print(rr.idAssign + "="); } else { // Warn about return value if any, but not inside syntactic predicate if (!(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null) { antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr.getLine(), rr.getColumn()); } } // Call the rule GenRuleInvocation(rr); // if in lexer and ! on element or alt or rule, save buffer index to kill later if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("text.setLength(_saveIndex);"); } // if not in a syntactic predicate if (syntacticPredLevel == 0) { boolean doNoGuessTest = ( grammar.hasSyntacticPredicate && ( grammar.buildAST && rr.getLabel() != null || (genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE) ) ); if (doNoGuessTest) { // println("if (inputState.guessing==0) {"); // tabs++; } if (grammar.buildAST && rr.getLabel() != null) { // always gen variable for rule return on labeled rules println(rr.getLabel() + "_AST = (" + labeledElementASTType + ")returnAST;"); } if (genAST) { switch (rr.getAutoGenType()) { case GrammarElement.AUTO_GEN_NONE: // println("theASTFactory.addASTChild(currentAST, returnAST);"); println("astFactory.addASTChild(currentAST, returnAST);"); break; case GrammarElement.AUTO_GEN_CARET: antlrTool.error("Internal: encountered ^ after rule reference"); break; default: break; } } // if a lexer and labeled, Token label defined at rule level, just set it here if (grammar instanceof LexerGrammar && rr.getLabel() != null) { println(rr.getLabel() + "=_returnToken;"); } if (doNoGuessTest) { // tabs--; // println("}"); } } genErrorCatchForElement(rr); } /** Generate code for the given grammar element. * @param blk The string-literal reference to generate */ public void gen(StringLiteralElement atom) { if (DEBUG_CODE_GENERATOR) System.out.println("genString(" + atom + ")"); // Variable declarations for labeled elements if (atom.getLabel() != null && syntacticPredLevel == 0) { println(atom.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(atom); // is there a bang on the literal? boolean oldsaveText = saveText; saveText = saveText && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE; // matching genMatch(atom); saveText = oldsaveText; // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling();"); } } /** Generate code for the given grammar element. * @param blk The token-range reference to generate */ public void gen(TokenRangeElement r) { genErrorTryForElement(r); if (r.getLabel() != null && syntacticPredLevel == 0) { println(r.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(r); // match println("matchRange(" + r.beginText + "," + r.endText + ");"); genErrorCatchForElement(r); } /** Generate code for the given grammar element. * @param blk The token-reference to generate */ public void gen(TokenRefElement atom) { if (DEBUG_CODE_GENERATOR) System.out.println("genTokenRef(" + atom + ")"); if (grammar instanceof LexerGrammar) { antlrTool.panic("Token reference found in lexer"); } genErrorTryForElement(atom); // Assign Token value to token label variable if (atom.getLabel() != null && syntacticPredLevel == 0) { println(atom.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(atom); // matching genMatch(atom); genErrorCatchForElement(atom); // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling();"); } } public void gen(TreeElement t) { // save AST cursor println("AST __t" + t.ID + " = _t;"); // If there is a label on the root, then assign that to the variable if (t.root.getLabel() != null) { println(t.root.getLabel() + " = _t==ASTNULL ? null :(" + labeledElementASTType + ")_t;"); } // check for invalid modifiers ! and ^ on tree element roots if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) { antlrTool.error("Suffixing a root node with '!' is not implemented", grammar.getFilename(), t.getLine(), t.getColumn()); t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE); } if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET ) { antlrTool.warning("Suffixing a root node with '^' is redundant; already a root", grammar.getFilename(), t.getLine(), t.getColumn()); t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE); } // Generate AST variables genElementAST(t.root); if (grammar.buildAST) { // Save the AST construction state println("ASTPair __currentAST" + t.ID + " = currentAST.copy();"); // Make the next item added a child of the TreeElement root println("currentAST.root = currentAST.child;"); println("currentAST.child = null;"); } // match root if ( t.root instanceof WildcardElement ) { println("if ( _t==null ) throw new MismatchedTokenException();"); } else { genMatch(t.root); } // move to list of children println("_t = _t.getFirstChild();"); // walk list of children, generating code for each for (int i = 0; i < t.getAlternatives().size(); i++) { Alternative a = t.getAlternativeAt(i); AlternativeElement e = a.head; while (e != null) { e.generate(); e = e.next; } } if (grammar.buildAST) { // restore the AST construction state to that just after the // tree root was added println("currentAST = __currentAST" + t.ID + ";"); } // restore AST cursor println("_t = __t" + t.ID + ";"); // move cursor to sibling of tree just parsed println("_t = _t.getNextSibling();"); } /** Generate the tree-parser Java file */ public void gen(TreeWalkerGrammar g) throws IOException { // SAS: debugging stuff removed for now... setGrammar(g); if (!(grammar instanceof TreeWalkerGrammar)) { antlrTool.panic("Internal error generating tree-walker"); } // Open the output stream for the parser and set the currentOutput // SAS: move file open to method so subclass can override it // (mainly for VAJ interface) setupOutput(grammar.getClassName()); genAST = grammar.buildAST; tabs = 0; // Generate the header common to all output files. genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Generate header for the parser println("import antlr." + grammar.getSuperClass() + ";"); println("import antlr.Token;"); println("import antlr.collections.AST;"); println("import antlr.RecognitionException;"); println("import antlr.ANTLRException;"); println("import antlr.NoViableAltException;"); println("import antlr.MismatchedTokenException;"); println("import antlr.SemanticException;"); println("import antlr.collections.impl.BitSet;"); println("import antlr.ASTPair;"); println("import antlr.collections.impl.ASTArray;"); // Output the user-defined parser premamble println(grammar.preambleAction.getText()); // Generate parser class definition String sup = null; if (grammar.superClass != null) { sup = grammar.superClass; } else { sup = "antlr." + grammar.getSuperClass(); } println(""); // print javadoc comment if any if (grammar.comment != null) { _println(grammar.comment); } // get prefix (replaces "public" and lets user specify) String prefix = "public"; Token tprefix = (Token)grammar.options.get("classHeaderPrefix"); if (tprefix != null) { String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\""); if (p != null) { prefix = p; } } print(prefix+" "); print("class " + grammar.getClassName() + " extends " + sup); println(" implements " + grammar.tokenManager.getName() + TokenTypesFileSuffix); Token tsuffix = (Token)grammar.options.get("classHeaderSuffix"); if (tsuffix != null) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\""); if (suffix != null) { print(", " + suffix); // must be an interface name for Java } } println(" {"); // Generate user-defined parser class members print( processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null) ); // Generate default parser class constructor println("public " + grammar.getClassName() + "() {"); tabs++; println("tokenNames = _tokenNames;"); tabs--; println("}"); println(""); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); int ruleNum = 0; String ruleNameInits = ""; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); if (sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol)sym; genRule(rs, rs.references.size() == 0, ruleNum++); } exitIfError(); } // Generate the token names genTokenStrings(); // Generate the bitsets used throughout the grammar genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType()); // Close class definition println("}"); println(""); // Close the parser output stream currentOutput.close(); currentOutput = null; } /** Generate code for the given grammar element. * @param wc The wildcard element to generate */ public void gen(WildcardElement wc) { // Variable assignment for labeled elements if (wc.getLabel() != null && syntacticPredLevel == 0) { println(wc.getLabel() + " = " + lt1Value + ";"); } // AST genElementAST(wc); // Match anything but EOF if (grammar instanceof TreeWalkerGrammar) { println("if ( _t==null ) throw new MismatchedTokenException();"); } else if (grammar instanceof LexerGrammar) { if (grammar instanceof LexerGrammar && (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("_saveIndex=text.length();"); } println("matchNot(EOF_CHAR);"); if (grammar instanceof LexerGrammar && (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("text.setLength(_saveIndex);"); // kill text atom put in buffer } } else { println("matchNot(" + getValueString(Token.EOF_TYPE) + ");"); } // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling();"); } } /** Generate code for the given grammar element. * @param blk The (...)* block to generate */ public void gen(ZeroOrMoreBlock blk) { if (DEBUG_CODE_GENERATOR) System.out.println("gen*(" + blk + ")"); println("{"); genBlockPreamble(blk); String label; if (blk.getLabel() != null) { label = blk.getLabel(); } else { label = "_loop" + blk.ID; } println(label + ":"); println("do {"); tabs++; // generate the init action for ()* inside the loop // this allows us to do usefull EOF checking... genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); // generate exit test if greedy set to false // and an alt is ambiguous with exit branch // or when lookahead derived purely from end-of-file // Lookahead analysis stops when end-of-file is hit, // returning set {epsilon}. Since {epsilon} is not // ambig with any real tokens, no error is reported // by deterministic() routines and we have to check // for the case where the lookahead depth didn't get // set to NONDETERMINISTIC (this only happens when the // FOLLOW contains real atoms + epsilon). boolean generateNonGreedyExitPath = false; int nonGreedyExitDepth = grammar.maxk; if (!blk.greedy && blk.exitLookaheadDepth <= grammar.maxk && blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) { generateNonGreedyExitPath = true; nonGreedyExitDepth = blk.exitLookaheadDepth; } else if (!blk.greedy && blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) { generateNonGreedyExitPath = true; } if (generateNonGreedyExitPath) { if (DEBUG_CODE_GENERATOR) { System.out.println("nongreedy (...)* loop; exit depth is " + blk.exitLookaheadDepth); } String predictExit = getLookaheadTestExpression(blk.exitCache, nonGreedyExitDepth); println("// nongreedy exit test"); println("if (" + predictExit + ") break " + label + ";"); } JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, false); genBlockFinish(howToFinish, "break " + label + ";"); tabs--; println("} while (true);"); println("}"); // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate an alternative. * @param alt The alternative to generate * @param blk The block to which the alternative belongs */ protected void genAlt(Alternative alt, AlternativeBlock blk) { // Save the AST generation state, and set it to that of the alt boolean savegenAST = genAST; genAST = genAST && alt.getAutoGen(); boolean oldsaveTest = saveText; saveText = saveText && alt.getAutoGen(); // Reset the variable name map for the alternative Hashtable saveMap = treeVariableMap; treeVariableMap = new Hashtable(); // Generate try block around the alt for error handling if (alt.exceptionSpec != null) { println("try { // for error handling"); tabs++; } AlternativeElement elem = alt.head; while (!(elem instanceof BlockEndElement)) { elem.generate(); // alt can begin with anything. Ask target to gen. elem = elem.next; } if (genAST) { if (blk instanceof RuleBlock) { // Set the AST return value for the rule RuleBlock rblk = (RuleBlock)blk; if (grammar.hasSyntacticPredicate) { // println("if ( inputState.guessing==0 ) {"); // tabs++; } println(rblk.getRuleName() + "_AST = (" + labeledElementASTType + ")currentAST.root;"); if (grammar.hasSyntacticPredicate) { // --tabs; // println("}"); } } else if (blk.getLabel() != null) { // ### future: also set AST value for labeled subrules. // println(blk.getLabel() + "_AST = ("+labeledElementASTType+")currentAST.root;"); antlrTool.warning("Labeled subrules not yet supported", grammar.getFilename(), blk.getLine(), blk.getColumn()); } } if (alt.exceptionSpec != null) { // close try block tabs--; println("}"); genErrorHandler(alt.exceptionSpec); } genAST = savegenAST; saveText = oldsaveTest; treeVariableMap = saveMap; } /** Generate all the bitsets to be used in the parser or lexer * Generate the raw bitset data like "long _tokenSet1_data[] = {...};" * and the BitSet object declarations like "BitSet _tokenSet1 = new BitSet(_tokenSet1_data);" * Note that most languages do not support object initialization inside a * class definition, so other code-generators may have to separate the * bitset declarations from the initializations (e.g., put the initializations * in the generated constructor instead). * @param bitsetList The list of bitsets to generate. * @param maxVocabulary Ensure that each generated bitset can contain at least this value. */ protected void genBitsets(Vector bitsetList, int maxVocabulary ) { println(""); for (int i = 0; i < bitsetList.size(); i++) { BitSet p = (BitSet)bitsetList.elementAt(i); // Ensure that generated BitSet is large enough for vocabulary p.growToInclude(maxVocabulary); genBitSet(p, i); } } /** Do something simple like: * private static final long[] mk_tokenSet_0() { * long[] data = { -2305839160922996736L, 63L, 16777216L, 0L, 0L, 0L }; * return data; * } * public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); * * Or, for large bitsets, optimize init so ranges are collapsed into loops. * This is most useful for lexers using unicode. */ private void genBitSet(BitSet p, int id) { // initialization data println( "private static final long[] mk" + getBitsetName(id) + "() {" ); int n = p.lengthInLongWords(); if ( n= makeSwitchThreshold) { // Determine the name of the item to be compared String testExpr = lookaheadString(1); createdLL1Switch = true; // when parsing trees, convert null to valid tree node with NULL lookahead if (grammar instanceof TreeWalkerGrammar) { println("if (_t==null) _t=ASTNULL;"); } println("switch ( " + testExpr + ") {"); for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); // ignore any non-LL(1) alts, predicated alts, // or end-of-token alts for case expressions if (!suitableForCaseExpression(alt)) { continue; } Lookahead p = alt.cache[1]; if (p.fset.degree() == 0 && !p.containsEpsilon()) { antlrTool.warning("Alternate omitted due to empty prediction set", grammar.getFilename(), alt.head.getLine(), alt.head.getColumn()); } else { genCases(p.fset); println("{"); tabs++; genAlt(alt, blk); println("break;"); tabs--; println("}"); } } println("default:"); tabs++; } // do non-LL(1) and nondeterministic cases This is tricky in // the lexer, because of cases like: STAR : '*' ; ASSIGN_STAR // : "*="; Since nextToken is generated without a loop, then // the STAR will have end-of-token as it's lookahead set for // LA(2). So, we must generate the alternatives containing // trailing end-of-token in their lookahead sets *after* the // alternatives without end-of-token. This implements the // usual lexer convention that longer matches come before // shorter ones, e.g. "*=" matches ASSIGN_STAR not STAR // // For non-lexer grammars, this does not sort the alternates // by depth Note that alts whose lookahead is purely // end-of-token at k=1 end up as default or else clauses. int startDepth = (grammar instanceof LexerGrammar) ? grammar.maxk : 0; for (int altDepth = startDepth; altDepth >= 0; altDepth--) { if (DEBUG_CODE_GENERATOR) System.out.println("checking depth " + altDepth); for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); if (DEBUG_CODE_GENERATOR) System.out.println("genAlt: " + i); // if we made a switch above, ignore what we already took care // of. Specifically, LL(1) alts with no preds // that do not have end-of-token in their prediction set // and that are not giant unicode sets. if (createdLL1Switch && suitableForCaseExpression(alt)) { if (DEBUG_CODE_GENERATOR) System.out.println("ignoring alt because it was in the switch"); continue; } String e; boolean unpredicted = false; if (grammar instanceof LexerGrammar) { // Calculate the "effective depth" of the alt, // which is the max depth at which // cache[depth]!=end-of-token int effectiveDepth = alt.lookaheadDepth; if (effectiveDepth == GrammarAnalyzer.NONDETERMINISTIC) { // use maximum lookahead effectiveDepth = grammar.maxk; } while (effectiveDepth >= 1 && alt.cache[effectiveDepth].containsEpsilon()) { effectiveDepth--; } // Ignore alts whose effective depth is other than // the ones we are generating for this iteration. if (effectiveDepth != altDepth) { if (DEBUG_CODE_GENERATOR) System.out.println("ignoring alt because effectiveDepth!=altDepth;" + effectiveDepth + "!=" + altDepth); continue; } unpredicted = lookaheadIsEmpty(alt, effectiveDepth); e = getLookaheadTestExpression(alt, effectiveDepth); } else { unpredicted = lookaheadIsEmpty(alt, grammar.maxk); e = getLookaheadTestExpression(alt, grammar.maxk); } // Was it a big unicode range that forced unsuitability // for a case expression? if (alt.cache[1].fset.degree() > caseSizeThreshold && suitableForCaseExpression(alt)) { if (nIF == 0) { println("if " + e + " {"); } else { println("else if " + e + " {"); } } else if (unpredicted && alt.semPred == null && alt.synPred == null) { // The alt has empty prediction set and no // predicate to help out. if we have not // generated a previous if, just put {...} around // the end-of-token clause if (nIF == 0) { println("{"); } else { println("else {"); } finishingInfo.needAnErrorClause = false; } else { // check for sem and syn preds // Add any semantic predicate expression to the // lookahead test if (alt.semPred != null) { // if debugging, wrap the evaluation of the // predicate in a method translate $ and # // references ActionTransInfo tInfo = new ActionTransInfo(); String actionStr = processActionForSpecialSymbols(alt.semPred, blk.line, currentRule, tInfo); // ignore translation info...we don't need to // do anything with it. call that will inform // SemanticPredicateListeners of the result if (((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)) && grammar.debuggingOutput) { e = "(" + e + "&& fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.PREDICTING," + addSemPred(charFormatter.escapeString(actionStr)) + "," + actionStr + "))"; } else { e = "(" + e + "&&(" + actionStr + "))"; } } // Generate any syntactic predicates if (nIF > 0) { if (alt.synPred != null) { println("else {"); tabs++; genSynPred(alt.synPred, e); closingBracesOfIFSequence++; } else { println("else if " + e + " {"); } } else { if (alt.synPred != null) { genSynPred(alt.synPred, e); } else { // when parsing trees, convert null to // valid tree node with NULL lookahead. if (grammar instanceof TreeWalkerGrammar) { println("if (_t==null) _t=ASTNULL;"); } println("if " + e + " {"); } } } nIF++; tabs++; genAlt(alt, blk); tabs--; println("}"); } } String ps = ""; for (int i = 1; i <= closingBracesOfIFSequence; i++) { ps += "}"; } // Restore the AST generation state genAST = savegenAST; // restore save text state saveText = oldsaveTest; // Return the finishing info. if (createdLL1Switch) { tabs--; finishingInfo.postscript = ps + "}"; finishingInfo.generatedSwitch = true; finishingInfo.generatedAnIf = nIF > 0; //return new JavaBlockFinishingInfo(ps+"}",true,nIF>0); // close up switch statement } else { finishingInfo.postscript = ps; finishingInfo.generatedSwitch = false; finishingInfo.generatedAnIf = nIF > 0; // return new JavaBlockFinishingInfo(ps, false,nIF>0); } return finishingInfo; } private static boolean suitableForCaseExpression(Alternative a) { return a.lookaheadDepth == 1 && a.semPred == null && !a.cache[1].containsEpsilon() && a.cache[1].fset.degree() <= caseSizeThreshold; } /** Generate code to link an element reference into the AST */ private void genElementAST(AlternativeElement el) { // handle case where you're not building trees, but are in tree walker. // Just need to get labels set up. if (grammar instanceof TreeWalkerGrammar && !grammar.buildAST) { String elementRef; String astName; // Generate names and declarations of the AST variable(s) if (el.getLabel() == null) { elementRef = lt1Value; // Generate AST variables for unlabeled stuff astName = "tmp" + astVarNumber + "_AST"; astVarNumber++; // Map the generated AST variable in the alternate mapTreeVariable(el, astName); // Generate an "input" AST variable also println(labeledElementASTType + " " + astName + "_in = " + elementRef + ";"); } return; } if (grammar.buildAST && syntacticPredLevel == 0) { boolean needASTDecl = (genAST && (el.getLabel() != null || el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG ) ); // RK: if we have a grammar element always generate the decl // since some guy can access it from an action and we can't // peek ahead (well not without making a mess). // I'd prefer taking this out. if (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG && (el instanceof TokenRefElement)) { needASTDecl = true; } boolean doNoGuessTest = (grammar.hasSyntacticPredicate && needASTDecl); String elementRef; String astNameBase; // Generate names and declarations of the AST variable(s) if (el.getLabel() != null) { elementRef = el.getLabel(); astNameBase = el.getLabel(); } else { elementRef = lt1Value; // Generate AST variables for unlabeled stuff astNameBase = "tmp" + astVarNumber; ; astVarNumber++; } // Generate the declaration if required. if (needASTDecl) { // Generate the declaration if (el instanceof GrammarAtom) { GrammarAtom ga = (GrammarAtom)el; if (ga.getASTNodeType() != null) { genASTDeclaration(el, astNameBase, ga.getASTNodeType()); // println(ga.getASTNodeType()+" " + astName+" = null;"); } else { genASTDeclaration(el, astNameBase, labeledElementASTType); // println(labeledElementASTType+" " + astName + " = null;"); } } else { genASTDeclaration(el, astNameBase, labeledElementASTType); // println(labeledElementASTType+" " + astName + " = null;"); } } // for convenience.. String astName = astNameBase + "_AST"; // Map the generated AST variable in the alternate mapTreeVariable(el, astName); if (grammar instanceof TreeWalkerGrammar) { // Generate an "input" AST variable also println(labeledElementASTType + " " + astName + "_in = null;"); } // Enclose actions with !guessing if (doNoGuessTest) { // println("if (inputState.guessing==0) {"); // tabs++; } // if something has a label assume it will be used // so we must initialize the RefAST if (el.getLabel() != null) { if (el instanceof GrammarAtom) { println(astName + " = " + getASTCreateString((GrammarAtom)el, elementRef) + ";"); } else { println(astName + " = " + getASTCreateString(elementRef) + ";"); } } // if it has no label but a declaration exists initialize it. if (el.getLabel() == null && needASTDecl) { elementRef = lt1Value; if (el instanceof GrammarAtom) { println(astName + " = " + getASTCreateString((GrammarAtom)el, elementRef) + ";"); } else { println(astName + " = " + getASTCreateString(elementRef) + ";"); } // Map the generated AST variable in the alternate if (grammar instanceof TreeWalkerGrammar) { // set "input" AST variable also println(astName + "_in = " + elementRef + ";"); } } if (genAST) { switch (el.getAutoGenType()) { case GrammarElement.AUTO_GEN_NONE: println("astFactory.addASTChild(currentAST, " + astName + ");"); break; case GrammarElement.AUTO_GEN_CARET: println("astFactory.makeASTRoot(currentAST, " + astName + ");"); break; default: break; } } if (doNoGuessTest) { // tabs--; // println("}"); } } } /** Close the try block and generate catch phrases * if the element has a labeled handler in the rule */ private void genErrorCatchForElement(AlternativeElement el) { if (el.getLabel() == null) return; String r = el.enclosingRuleName; if (grammar instanceof LexerGrammar) { r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName); } RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r); if (rs == null) { antlrTool.panic("Enclosing rule not found!"); } ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel()); if (ex != null) { tabs--; println("}"); genErrorHandler(ex); } } /** Generate the catch phrases for a user-specified error handler */ private void genErrorHandler(ExceptionSpec ex) { // Each ExceptionHandler in the ExceptionSpec is a separate catch for (int i = 0; i < ex.handlers.size(); i++) { ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i); // Generate catch phrase println("catch (" + handler.exceptionTypeAndName.getText() + ") {"); tabs++; if (grammar.hasSyntacticPredicate) { println("if (inputState.guessing==0) {"); tabs++; } // When not guessing, execute user handler action ActionTransInfo tInfo = new ActionTransInfo(); printAction( processActionForSpecialSymbols(handler.action.getText(), handler.action.getLine(), currentRule, tInfo) ); if (grammar.hasSyntacticPredicate) { tabs--; println("} else {"); tabs++; // When guessing, rethrow exception println( "throw " + extractIdOfAction(handler.exceptionTypeAndName) + ";" ); tabs--; println("}"); } // Close catch phrase tabs--; println("}"); } } /** Generate a try { opening if the element has a labeled handler in the rule */ private void genErrorTryForElement(AlternativeElement el) { if (el.getLabel() == null) return; String r = el.enclosingRuleName; if (grammar instanceof LexerGrammar) { r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName); } RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r); if (rs == null) { antlrTool.panic("Enclosing rule not found!"); } ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel()); if (ex != null) { println("try { // for error handling"); tabs++; } } protected void genASTDeclaration(AlternativeElement el) { genASTDeclaration(el, labeledElementASTType); } protected void genASTDeclaration(AlternativeElement el, String node_type) { genASTDeclaration(el, el.getLabel(), node_type); } protected void genASTDeclaration(AlternativeElement el, String var_name, String node_type) { // already declared? if (declaredASTVariables.contains(el)) return; // emit code println(node_type + " " + var_name + "_AST = null;"); // mark as declared declaredASTVariables.put(el,el); } /** Generate a header that is common to all Java files */ protected void genHeader() { println("// $ANTLR " + Tool.version + ": " + "\"" + antlrTool.fileMinusPath(antlrTool.grammarFile) + "\"" + " -> " + "\"" + grammar.getClassName() + ".java\"$"); } private void genLiteralsTest() { println("_ttype = testLiteralsTable(_ttype);"); } private void genLiteralsTestForPartialToken() { println("_ttype = testLiteralsTable(new String(text.getBuffer(),_begin,text.length()-_begin),_ttype);"); } protected void genMatch(BitSet b) { } protected void genMatch(GrammarAtom atom) { if (atom instanceof StringLiteralElement) { if (grammar instanceof LexerGrammar) { genMatchUsingAtomText(atom); } else { genMatchUsingAtomTokenType(atom); } } else if (atom instanceof CharLiteralElement) { if (grammar instanceof LexerGrammar) { genMatchUsingAtomText(atom); } else { antlrTool.error("cannot ref character literals in grammar: " + atom); } } else if (atom instanceof TokenRefElement) { genMatchUsingAtomText(atom); } else if (atom instanceof WildcardElement) { gen((WildcardElement)atom); } } protected void genMatchUsingAtomText(GrammarAtom atom) { // match() for trees needs the _t cursor String astArgs = ""; if (grammar instanceof TreeWalkerGrammar) { astArgs = "_t,"; } // if in lexer and ! on element, save buffer index to kill later if (grammar instanceof LexerGrammar && (!saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("_saveIndex=text.length();"); } print(atom.not ? "matchNot(" : "match("); _print(astArgs); // print out what to match if (atom.atomText.equals("EOF")) { // horrible hack to handle EOF case _print("Token.EOF_TYPE"); } else { _print(atom.atomText); } _println(");"); if (grammar instanceof LexerGrammar && (!saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("text.setLength(_saveIndex);"); // kill text atom put in buffer } } protected void genMatchUsingAtomTokenType(GrammarAtom atom) { // match() for trees needs the _t cursor String astArgs = ""; if (grammar instanceof TreeWalkerGrammar) { astArgs = "_t,"; } // If the literal can be mangled, generate the symbolic constant instead String mangledName = null; String s = astArgs + getValueString(atom.getType()); // matching println((atom.not ? "matchNot(" : "match(") + s + ");"); } /** Generate the nextToken() rule. nextToken() is a synthetic * lexer rule that is the implicit OR of all user-defined * lexer rules. */ public void genNextToken() { // Are there any public rules? If not, then just generate a // fake nextToken(). boolean hasPublicRules = false; for (int i = 0; i < grammar.rules.size(); i++) { RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i); if (rs.isDefined() && rs.access.equals("public")) { hasPublicRules = true; break; } } if (!hasPublicRules) { println(""); println("public Token nextToken() throws TokenStreamException {"); println("\ttry {uponEOF();}"); println("\tcatch(CharStreamIOException csioe) {"); println("\t\tthrow new TokenStreamIOException(csioe.io);"); println("\t}"); println("\tcatch(CharStreamException cse) {"); println("\t\tthrow new TokenStreamException(cse.getMessage());"); println("\t}"); println("\treturn new CommonToken(Token.EOF_TYPE, \"\");"); println("}"); println(""); return; } // Create the synthesized nextToken() rule RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken"); // Define the nextToken rule symbol RuleSymbol nextTokenRs = new RuleSymbol("mnextToken"); nextTokenRs.setDefined(); nextTokenRs.setBlock(nextTokenBlk); nextTokenRs.access = "private"; grammar.define(nextTokenRs); // Analyze the nextToken rule boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk); // Generate the next token rule String filterRule = null; if (((LexerGrammar)grammar).filterMode) { filterRule = ((LexerGrammar)grammar).filterRule; } println(""); println("public Token nextToken() throws TokenStreamException {"); tabs++; println("Token theRetToken=null;"); _println("tryAgain:"); println("for (;;) {"); tabs++; println("Token _token = null;"); println("int _ttype = Token.INVALID_TYPE;"); if (((LexerGrammar)grammar).filterMode) { println("setCommitToPath(false);"); if (filterRule != null) { // Here's a good place to ensure that the filter rule actually exists if (!grammar.isDefined(CodeGenerator.encodeLexerRuleName(filterRule))) { grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer"); } else { RuleSymbol rs = (RuleSymbol)grammar.getSymbol(CodeGenerator.encodeLexerRuleName(filterRule)); if (!rs.isDefined()) { grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer"); } else if (rs.access.equals("public")) { grammar.antlrTool.error("Filter rule " + filterRule + " must be protected"); } } println("int _m;"); println("_m = mark();"); } } println("resetText();"); println("try { // for char stream error handling"); tabs++; // Generate try around whole thing to trap scanner errors println("try { // for lexical error handling"); tabs++; // Test for public lexical rules with empty paths for (int i = 0; i < nextTokenBlk.getAlternatives().size(); i++) { Alternative a = nextTokenBlk.getAlternativeAt(i); if (a.cache[1].containsEpsilon()) { //String r = a.head.toString(); RuleRefElement rr = (RuleRefElement)a.head; String r = CodeGenerator.decodeLexerRuleName(rr.targetRule); antlrTool.warning("public lexical rule "+r+" is optional (can match \"nothing\")"); } } // Generate the block String newline = System.getProperty("line.separator"); JavaBlockFinishingInfo howToFinish = genCommonBlock(nextTokenBlk, false); String errFinish = "if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}"; errFinish += newline + "\t\t\t\t"; if (((LexerGrammar)grammar).filterMode) { if (filterRule == null) { errFinish += "else {consume(); continue tryAgain;}"; } else { errFinish += "else {" + newline + "\t\t\t\t\tcommit();" + newline + "\t\t\t\t\ttry {m" + filterRule + "(false);}" + newline + "\t\t\t\t\tcatch(RecognitionException e) {" + newline + "\t\t\t\t\t // catastrophic failure" + newline + "\t\t\t\t\t reportError(e);" + newline + "\t\t\t\t\t consume();" + newline + "\t\t\t\t\t}" + newline + "\t\t\t\t\tcontinue tryAgain;" + newline + "\t\t\t\t}"; } } else { errFinish += "else {" + throwNoViable + "}"; } genBlockFinish(howToFinish, errFinish); // at this point a valid token has been matched, undo "mark" that was done if (((LexerGrammar)grammar).filterMode && filterRule != null) { println("commit();"); } // Generate literals test if desired // make sure _ttype is set first; note _returnToken must be // non-null as the rule was required to create it. println("if ( _returnToken==null ) continue tryAgain; // found SKIP token"); println("_ttype = _returnToken.getType();"); if (((LexerGrammar)grammar).getTestLiterals()) { genLiteralsTest(); } // return token created by rule reference in switch println("_returnToken.setType(_ttype);"); println("return _returnToken;"); // Close try block tabs--; println("}"); println("catch (RecognitionException e) {"); tabs++; if (((LexerGrammar)grammar).filterMode) { if (filterRule == null) { println("if ( !getCommitToPath() ) {consume(); continue tryAgain;}"); } else { println("if ( !getCommitToPath() ) {"); tabs++; println("rewind(_m);"); println("resetText();"); println("try {m" + filterRule + "(false);}"); println("catch(RecognitionException ee) {"); println(" // horrendous failure: error in filter rule"); println(" reportError(ee);"); println(" consume();"); println("}"); println("continue tryAgain;"); tabs--; println("}"); } } if (nextTokenBlk.getDefaultErrorHandler()) { println("reportError(e);"); println("consume();"); } else { // pass on to invoking routine println("throw new TokenStreamRecognitionException(e);"); } tabs--; println("}"); // close CharStreamException try tabs--; println("}"); println("catch (CharStreamException cse) {"); println(" if ( cse instanceof CharStreamIOException ) {"); println(" throw new TokenStreamIOException(((CharStreamIOException)cse).io);"); println(" }"); println(" else {"); println(" throw new TokenStreamException(cse.getMessage());"); println(" }"); println("}"); // close for-loop tabs--; println("}"); // close method nextToken tabs--; println("}"); println(""); } /** Gen a named rule block. * ASTs are generated for each element of an alternative unless * the rule or the alternative have a '!' modifier. * * If an alternative defeats the default tree construction, it * must set _AST to the root of the returned AST. * * Each alternative that does automatic tree construction, builds * up root and child list pointers in an ASTPair structure. * * A rule finishes by setting the returnAST variable from the * ASTPair. * * @param rule The name of the rule to generate * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere) */ public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum) { tabs = 1; if (DEBUG_CODE_GENERATOR) System.out.println("genRule(" + s.getId() + ")"); if (!s.isDefined()) { antlrTool.error("undefined rule: " + s.getId()); return; } // Generate rule return type, name, arguments RuleBlock rblk = s.getBlock(); currentRule = rblk; currentASTResult = s.getId(); // clear list of declared ast variables.. declaredASTVariables.clear(); // Save the AST generation state, and set it to that of the rule boolean savegenAST = genAST; genAST = genAST && rblk.getAutoGen(); // boolean oldsaveTest = saveText; saveText = rblk.getAutoGen(); // print javadoc comment if any if (s.comment != null) { _println(s.comment); } // Gen method access and final qualifier print(s.access + " final "); // Gen method return type (note lexer return action set at rule creation) if (rblk.returnAction != null) { // Has specified return value _print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " "); } else { // No specified return value _print("void "); } // Gen method name _print(s.getId() + "("); // Additional rule parameters common to all rules for this grammar _print(commonExtraParams); if (commonExtraParams.length() != 0 && rblk.argAction != null) { _print(","); } // Gen arguments if (rblk.argAction != null) { // Has specified arguments _println(""); tabs++; println(rblk.argAction); tabs--; print(")"); } else { // No specified arguments _print(")"); } // Gen throws clause and open curly _print(" throws " + exceptionThrown); if (grammar instanceof ParserGrammar) { _print(", TokenStreamException"); } else if (grammar instanceof LexerGrammar) { _print(", CharStreamException, TokenStreamException"); } // Add user-defined exceptions unless lexer (for now) if (rblk.throwsSpec != null) { if (grammar instanceof LexerGrammar) { antlrTool.error("user-defined throws spec not allowed (yet) for lexer rule " + rblk.ruleName); } else { _print(", " + rblk.throwsSpec); } } _println(" {"); tabs++; // Convert return action to variable declaration if (rblk.returnAction != null) println(rblk.returnAction + ";"); // print out definitions needed by rules for various grammar types println(commonLocalVars); if (grammar.traceRules) { if (grammar instanceof TreeWalkerGrammar) { println("traceIn(\"" + s.getId() + "\",_t);"); } else { println("traceIn(\"" + s.getId() + "\");"); } } if (grammar instanceof LexerGrammar) { // lexer rule default return value is the rule's token name // This is a horrible hack to support the built-in EOF lexer rule. if (s.getId().equals("mEOF")) println("_ttype = Token.EOF_TYPE;"); else println("_ttype = " + s.getId().substring(1) + ";"); println("int _saveIndex;"); // used for element! (so we can kill text matched for element) /* println("boolean old_saveConsumedInput=saveConsumedInput;"); if ( !rblk.getAutoGen() ) { // turn off "save input" if ! on rule println("saveConsumedInput=false;"); } */ } // if debugging, write code to mark entry to the rule if (grammar.debuggingOutput) if (grammar instanceof ParserGrammar) println("fireEnterRule(" + ruleNum + ",0);"); else if (grammar instanceof LexerGrammar) println("fireEnterRule(" + ruleNum + ",_ttype);"); // Generate trace code if desired if (grammar.debuggingOutput || grammar.traceRules) { println("try { // debugging"); tabs++; } // Initialize AST variables if (grammar instanceof TreeWalkerGrammar) { // "Input" value for rule println(labeledElementASTType + " " + s.getId() + "_AST_in = (_t == ASTNULL) ? null : (" + labeledElementASTType + ")_t;"); } if (grammar.buildAST) { // Parser member used to pass AST returns from rule invocations println("returnAST = null;"); // Tracks AST construction // println("ASTPair currentAST = (inputState.guessing==0) ? new ASTPair() : null;"); println("ASTPair currentAST = new ASTPair();"); // User-settable return value for rule. println(labeledElementASTType + " " + s.getId() + "_AST = null;"); } genBlockPreamble(rblk); genBlockInitAction(rblk); println(""); // Search for an unlabeled exception specification attached to the rule ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec(""); // Generate try block around the entire rule for error handling if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler()) { println("try { // for error handling"); tabs++; } // Generate the alternatives if (rblk.alternatives.size() == 1) { // One alternative -- use simple form Alternative alt = rblk.getAlternativeAt(0); String pred = alt.semPred; if (pred != null) genSemPred(pred, currentRule.line); if (alt.synPred != null) { antlrTool.warning( "Syntactic predicate ignored for single alternative", grammar.getFilename(), alt.synPred.getLine(), alt.synPred.getColumn() ); } genAlt(alt, rblk); } else { // Multiple alternatives -- generate complex form boolean ok = grammar.theLLkAnalyzer.deterministic(rblk); JavaBlockFinishingInfo howToFinish = genCommonBlock(rblk, false); genBlockFinish(howToFinish, throwNoViable); } // Generate catch phrase for error handling if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler()) { // Close the try block tabs--; println("}"); } // Generate user-defined or default catch phrases if (unlabeledUserSpec != null) { genErrorHandler(unlabeledUserSpec); } else if (rblk.getDefaultErrorHandler()) { // Generate default catch phrase println("catch (" + exceptionThrown + " ex) {"); tabs++; // Generate code to handle error if not guessing if (grammar.hasSyntacticPredicate) { println("if (inputState.guessing==0) {"); tabs++; } println("reportError(ex);"); if (!(grammar instanceof TreeWalkerGrammar)) { // Generate code to consume until token in k==1 follow set Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode); String followSetName = getBitsetName(markBitsetForGen(follow.fset)); println("recover(ex," + followSetName + ");"); } else { // Just consume one token println("if (_t!=null) {_t = _t.getNextSibling();}"); } if (grammar.hasSyntacticPredicate) { tabs--; // When guessing, rethrow exception println("} else {"); println(" throw ex;"); println("}"); } // Close catch phrase tabs--; println("}"); } // Squirrel away the AST "return" value if (grammar.buildAST) { println("returnAST = " + s.getId() + "_AST;"); } // Set return tree value for tree walkers if (grammar instanceof TreeWalkerGrammar) { println("_retTree = _t;"); } // Generate literals test for lexer rules so marked if (rblk.getTestLiterals()) { if (s.access.equals("protected")) { genLiteralsTestForPartialToken(); } else { genLiteralsTest(); } } // if doing a lexer rule, dump code to create token if necessary if (grammar instanceof LexerGrammar) { println("if ( _createToken && _token==null && _ttype!=Token.SKIP ) {"); println(" _token = makeToken(_ttype);"); println(" _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));"); println("}"); println("_returnToken = _token;"); } // Gen the return statement if there is one (lexer has hard-wired return action) if (rblk.returnAction != null) { println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + ";"); } if (grammar.debuggingOutput || grammar.traceRules) { tabs--; println("} finally { // debugging"); tabs++; // If debugging, generate calls to mark exit of rule if (grammar.debuggingOutput) if (grammar instanceof ParserGrammar) println("fireExitRule(" + ruleNum + ",0);"); else if (grammar instanceof LexerGrammar) println("fireExitRule(" + ruleNum + ",_ttype);"); if (grammar.traceRules) { if (grammar instanceof TreeWalkerGrammar) { println("traceOut(\"" + s.getId() + "\",_t);"); } else { println("traceOut(\"" + s.getId() + "\");"); } } tabs--; println("}"); } tabs--; println("}"); println(""); // Restore the AST generation state genAST = savegenAST; // restore char save state // saveText = oldsaveTest; } private void GenRuleInvocation(RuleRefElement rr) { // dump rule name _print(rr.targetRule + "("); // lexers must tell rule if it should set _returnToken if (grammar instanceof LexerGrammar) { // if labeled, could access Token, so tell rule to create if (rr.getLabel() != null) { _print("true"); } else { _print("false"); } if (commonExtraArgs.length() != 0 || rr.args != null) { _print(","); } } // Extra arguments common to all rules for this grammar _print(commonExtraArgs); if (commonExtraArgs.length() != 0 && rr.args != null) { _print(","); } // Process arguments to method, if any RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); if (rr.args != null) { // When not guessing, execute user arg action ActionTransInfo tInfo = new ActionTransInfo(); String args = processActionForSpecialSymbols(rr.args, 0, currentRule, tInfo); if (tInfo.assignToRoot || tInfo.refRuleRoot != null) { antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #" + currentRule.getRuleName(), grammar.getFilename(), rr.getLine(), rr.getColumn()); } _print(args); // Warn if the rule accepts no arguments if (rs.block.argAction == null) { antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments", grammar.getFilename(), rr.getLine(), rr.getColumn()); } } else { // For C++, no warning if rule has parameters, because there may be default // values for all of the parameters if (rs.block.argAction != null) { antlrTool.warning("Missing parameters on reference to rule " + rr.targetRule, grammar.getFilename(), rr.getLine(), rr.getColumn()); } } _println(");"); // move down to the first child while parsing if (grammar instanceof TreeWalkerGrammar) { println("_t = _retTree;"); } } protected void genSemPred(String pred, int line) { // translate $ and # references ActionTransInfo tInfo = new ActionTransInfo(); pred = processActionForSpecialSymbols(pred, line, currentRule, tInfo); // ignore translation info...we don't need to do anything with it. String escapedPred = charFormatter.escapeString(pred); // if debugging, wrap the semantic predicate evaluation in a method // that can tell SemanticPredicateListeners the result if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) pred = "fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.VALIDATING," + addSemPred(escapedPred) + "," + pred + ")"; println("if (!(" + pred + "))"); println(" throw new SemanticException(\"" + escapedPred + "\");"); } /** Write an array of Strings which are the semantic predicate * expressions. The debugger will reference them by number only */ protected void genSemPredMap() { Enumeration e = semPreds.elements(); println("private String _semPredNames[] = {"); while (e.hasMoreElements()) println("\"" + e.nextElement() + "\","); println("};"); } protected void genSynPred(SynPredBlock blk, String lookaheadExpr) { if (DEBUG_CODE_GENERATOR) System.out.println("gen=>(" + blk + ")"); // Dump synpred result variable println("boolean synPredMatched" + blk.ID + " = false;"); // Gen normal lookahead test println("if (" + lookaheadExpr + ") {"); tabs++; // Save input state if (grammar instanceof TreeWalkerGrammar) { println("AST __t" + blk.ID + " = _t;"); } else { println("int _m" + blk.ID + " = mark();"); } // Once inside the try, assume synpred works unless exception caught println("synPredMatched" + blk.ID + " = true;"); println("inputState.guessing++;"); // if debugging, tell listeners that a synpred has started if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) { println("fireSyntacticPredicateStarted();"); } syntacticPredLevel++; println("try {"); tabs++; gen((AlternativeBlock)blk); // gen code to test predicate tabs--; //println("System.out.println(\"pred "+blk+" succeeded\");"); println("}"); println("catch (" + exceptionThrown + " pe) {"); tabs++; println("synPredMatched" + blk.ID + " = false;"); //println("System.out.println(\"pred "+blk+" failed\");"); tabs--; println("}"); // Restore input state if (grammar instanceof TreeWalkerGrammar) { println("_t = __t" + blk.ID + ";"); } else { println("rewind(_m" + blk.ID + ");"); } println("inputState.guessing--;"); // if debugging, tell listeners how the synpred turned out if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) { println("if (synPredMatched" + blk.ID + ")"); println(" fireSyntacticPredicateSucceeded();"); println("else"); println(" fireSyntacticPredicateFailed();"); } syntacticPredLevel--; tabs--; // Close lookahead test println("}"); // Test synred result println("if ( synPredMatched" + blk.ID + " ) {"); } /** Generate a static array containing the names of the tokens, * indexed by the token type values. This static array is used * to format error messages so that the token identifers or literal * strings are displayed instead of the token numbers. * * If a lexical rule has a paraphrase, use it rather than the * token label. */ public void genTokenStrings() { // Generate a string for each token. This creates a static // array of Strings indexed by token type. println(""); println("public static final String[] _tokenNames = {"); tabs++; // Walk the token vocabulary and generate a Vector of strings // from the tokens. Vector v = grammar.tokenManager.getVocabulary(); for (int i = 0; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s == null) { s = "<" + String.valueOf(i) + ">"; } if (!s.startsWith("\"") && !s.startsWith("<")) { TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s); if (ts != null && ts.getParaphrase() != null) { s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\""); } } print(charFormatter.literalString(s)); if (i != v.size() - 1) { _print(","); } _println(""); } // Close the string array initailizer tabs--; println("};"); } /** Create and set Integer token type objects that map * to Java Class objects (which AST node to create). */ protected void genTokenASTNodeMap() { println(""); println("protected void buildTokenTypeASTClassMap() {"); // Generate a map.put("T","TNode") for each token // if heterogeneous node known for that token T. tabs++; boolean generatedNewHashtable = false; int n = 0; // Walk the token vocabulary and generate puts. Vector v = grammar.tokenManager.getVocabulary(); for (int i = 0; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s != null) { TokenSymbol ts = grammar.tokenManager.getTokenSymbol(s); if (ts != null && ts.getASTNodeType() != null) { n++; if ( !generatedNewHashtable ) { // only generate if we are going to add a mapping println("tokenTypeToASTClassMap = new Hashtable();"); generatedNewHashtable = true; } println("tokenTypeToASTClassMap.put(new Integer("+ts.getTokenType()+"), "+ ts.getASTNodeType()+".class);"); } } } if ( n==0 ) { println("tokenTypeToASTClassMap=null;"); } tabs--; println("};"); } /** Generate the token types Java file */ protected void genTokenTypes(TokenManager tm) throws IOException { // Open the token output Java file and set the currentOutput stream // SAS: file open was moved to a method so a subclass can override // This was mainly for the VAJ interface setupOutput(tm.getName() + TokenTypesFileSuffix); tabs = 0; // Generate the header common to all Java files genHeader(); // Do not use printAction because we assume tabs==0 println(behavior.getHeaderAction("")); // Encapsulate the definitions in an interface. This can be done // because they are all constants. println("public interface " + tm.getName() + TokenTypesFileSuffix + " {"); tabs++; // Generate a definition for each token type Vector v = tm.getVocabulary(); // Do special tokens manually println("int EOF = " + Token.EOF_TYPE + ";"); println("int NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD + ";"); for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s != null) { if (s.startsWith("\"")) { // a string literal StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s); if (sl == null) { antlrTool.panic("String literal " + s + " not in symbol table"); } else if (sl.label != null) { println("int " + sl.label + " = " + i + ";"); } else { String mangledName = mangleLiteral(s); if (mangledName != null) { // We were able to create a meaningful mangled token name println("int " + mangledName + " = " + i + ";"); // if no label specified, make the label equal to the mangled name sl.label = mangledName; } else { println("// " + s + " = " + i); } } } else if (!s.startsWith("<")) { println("int " + s + " = " + i + ";"); } } } // Close the interface tabs--; println("}"); // Close the tokens output file currentOutput.close(); currentOutput = null; exitIfError(); } /** Get a string for an expression to generate creation of an AST subtree. * @param v A Vector of String, where each element is an expression in the target language yielding an AST node. */ public String getASTCreateString(Vector v) { if (v.size() == 0) { return ""; } StringBuffer buf = new StringBuffer(); buf.append("(" + labeledElementASTType + ")astFactory.make( (new ASTArray(" + v.size() + "))"); for (int i = 0; i < v.size(); i++) { buf.append(".add(" + v.elementAt(i) + ")"); } buf.append(")"); return buf.toString(); } /** Get a string for an expression to generate creating of an AST node * @param atom The grammar node for which you are creating the node * @param str The arguments to the AST constructor */ public String getASTCreateString(GrammarAtom atom, String astCtorArgs) { //System.out.println("getASTCreateString("+atom+","+astCtorArgs+")"); if (atom != null && atom.getASTNodeType() != null) { // they specified a type either on the reference or in tokens{} section return "("+atom.getASTNodeType()+")"+ "astFactory.create("+astCtorArgs+",\""+atom.getASTNodeType()+"\")"; } else { // must be an action or something since not referencing an atom return getASTCreateString(astCtorArgs); } } /** Get a string for an expression to generate creating of an AST node. * Parse the first (possibly only) argument looking for the token type. * If the token type is a valid token symbol, ask for it's AST node type * and add to the end if only 2 arguments. The forms are #[T], #[T,"t"], * and as of 2.7.2 #[T,"t",ASTclassname]. * * @param str The arguments to the AST constructor */ public String getASTCreateString(String astCtorArgs) { //System.out.println("AST CTOR: "+astCtorArgs); if ( astCtorArgs==null ) { astCtorArgs = ""; } int nCommas = 0; for (int i=0; i (epsilon) lookahead. // There is no way to predict what that token would be. Just // allow anything instead. if (look[i].containsEpsilon()) { e.append("true"); } else { e.append(getLookaheadTestTerm(i, p)); } } e.append(")"); return e.toString(); } /**Generate a lookahead test expression for an alternate. This * will be a series of tests joined by '&&' and enclosed by '()', * the number of such tests being determined by the depth of the lookahead. */ protected String getLookaheadTestExpression(Alternative alt, int maxDepth) { int depth = alt.lookaheadDepth; if (depth == GrammarAnalyzer.NONDETERMINISTIC) { // if the decision is nondeterministic, do the best we can: LL(k) // any predicates that are around will be generated later. depth = grammar.maxk; } if (maxDepth == 0) { // empty lookahead can result from alt with sem pred // that can see end of token. E.g., A : {pred}? ('a')? ; return "( true )"; } return "(" + getLookaheadTestExpression(alt.cache, depth) + ")"; } /**Generate a depth==1 lookahead test expression given the BitSet. * This may be one of: * 1) a series of 'x==X||' tests * 2) a range test using >= && <= where possible, * 3) a bitset membership test for complex comparisons * @param k The lookahead level * @param p The lookahead set for level k */ protected String getLookaheadTestTerm(int k, BitSet p) { // Determine the name of the item to be compared String ts = lookaheadString(k); // Generate a range expression if possible int[] elems = p.toArray(); if (elementsAreRange(elems)) { return getRangeExpression(k, elems); } // Generate a bitset membership test if possible StringBuffer e; int degree = p.degree(); if (degree == 0) { return "true"; } if (degree >= bitsetTestThreshold) { int bitsetIdx = markBitsetForGen(p); return getBitsetName(bitsetIdx) + ".member(" + ts + ")"; } // Otherwise, generate the long-winded series of "x==X||" tests e = new StringBuffer(); for (int i = 0; i < elems.length; i++) { // Get the compared-to item (token or character value) String cs = getValueString(elems[i]); // Generate the element comparison if (i > 0) e.append("||"); e.append(ts); e.append("=="); e.append(cs); } return e.toString(); } /** Return an expression for testing a contiguous renage of elements * @param k The lookahead level * @param elems The elements representing the set, usually from BitSet.toArray(). * @return String containing test expression. */ public String getRangeExpression(int k, int[] elems) { if (!elementsAreRange(elems)) { antlrTool.panic("getRangeExpression called with non-range"); } int begin = elems[0]; int end = elems[elems.length - 1]; return "(" + lookaheadString(k) + " >= " + getValueString(begin) + " && " + lookaheadString(k) + " <= " + getValueString(end) + ")"; } /** getValueString: get a string representation of a token or char value * @param value The token or char value */ private String getValueString(int value) { String cs; if (grammar instanceof LexerGrammar) { cs = charFormatter.literalChar(value); } else { TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt(value); if (ts == null) { return "" + value; // return token type as string // tool.panic("vocabulary for token type " + value + " is null"); } String tId = ts.getId(); if (ts instanceof StringLiteralSymbol) { // if string literal, use predefined label if any // if no predefined, try to mangle into LITERAL_xxx. // if can't mangle, use int value as last resort StringLiteralSymbol sl = (StringLiteralSymbol)ts; String label = sl.getLabel(); if (label != null) { cs = label; } else { cs = mangleLiteral(tId); if (cs == null) { cs = String.valueOf(value); } } } else { cs = tId; } } return cs; } /**Is the lookahead for this alt empty? */ protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) { int depth = alt.lookaheadDepth; if (depth == GrammarAnalyzer.NONDETERMINISTIC) { depth = grammar.maxk; } for (int i = 1; i <= depth && i <= maxDepth; i++) { BitSet p = alt.cache[i].fset; if (p.degree() != 0) { return false; } } return true; } private String lookaheadString(int k) { if (grammar instanceof TreeWalkerGrammar) { return "_t.getType()"; } return "LA(" + k + ")"; } /** Mangle a string literal into a meaningful token name. This is * only possible for literals that are all characters. The resulting * mangled literal name is literalsPrefix with the text of the literal * appended. * @return A string representing the mangled literal, or null if not possible. */ private String mangleLiteral(String s) { String mangled = antlrTool.literalsPrefix; for (int i = 1; i < s.length() - 1; i++) { if (!Character.isLetter(s.charAt(i)) && s.charAt(i) != '_') { return null; } mangled += s.charAt(i); } if (antlrTool.upperCaseMangledLiterals) { mangled = mangled.toUpperCase(); } return mangled; } /** Map an identifier to it's corresponding tree-node variable. * This is context-sensitive, depending on the rule and alternative * being generated * @param idParam The identifier name to map * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates */ public String mapTreeId(String idParam, ActionTransInfo transInfo) { // if not in an action of a rule, nothing to map. if (currentRule == null) return idParam; boolean in_var = false; String id = idParam; if (grammar instanceof TreeWalkerGrammar) { if (!grammar.buildAST) { in_var = true; } // If the id ends with "_in", then map it to the input variable else if (id.length() > 3 && id.lastIndexOf("_in") == id.length() - 3) { // Strip off the "_in" id = id.substring(0, id.length() - 3); in_var = true; } } // Check the rule labels. If id is a label, then the output // variable is label_AST, and the input variable is plain label. for (int i = 0; i < currentRule.labeledElements.size(); i++) { AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i); if (elt.getLabel().equals(id)) { return in_var ? id : id + "_AST"; } } // Failing that, check the id-to-variable map for the alternative. // If the id is in the map, then output variable is the name in the // map, and input variable is name_in String s = (String)treeVariableMap.get(id); if (s != null) { if (s == NONUNIQUE) { // There is more than one element with this id antlrTool.error("Ambiguous reference to AST element "+id+ " in rule "+currentRule.getRuleName()); return null; } else if (s.equals(currentRule.getRuleName())) { // a recursive call to the enclosing rule is // ambiguous with the rule itself. antlrTool.error("Ambiguous reference to AST element "+id+ " in rule "+currentRule.getRuleName()); return null; } else { return in_var ? s + "_in" : s; } } // Failing that, check the rule name itself. Output variable // is rule_AST; input variable is rule_AST_in (treeparsers). if (id.equals(currentRule.getRuleName())) { String r = in_var ? id + "_AST_in" : id + "_AST"; if (transInfo != null) { if (!in_var) { transInfo.refRuleRoot = r; } } return r; } else { // id does not map to anything -- return itself. return id; } } /** Given an element and the name of an associated AST variable, * create a mapping between the element "name" and the variable name. */ private void mapTreeVariable(AlternativeElement e, String name) { // For tree elements, defer to the root if (e instanceof TreeElement) { mapTreeVariable(((TreeElement)e).root, name); return; } // Determine the name of the element, if any, for mapping purposes String elName = null; // Don't map labeled items if (e.getLabel() == null) { if (e instanceof TokenRefElement) { // use the token id elName = ((TokenRefElement)e).atomText; } else if (e instanceof RuleRefElement) { // use the rule name elName = ((RuleRefElement)e).targetRule; } } // Add the element to the tree variable map if it has a name if (elName != null) { if (treeVariableMap.get(elName) != null) { // Name is already in the map -- mark it as duplicate treeVariableMap.remove(elName); treeVariableMap.put(elName, NONUNIQUE); } else { treeVariableMap.put(elName, name); } } } /** Lexically process $var and tree-specifiers in the action. * This will replace #id and #(...) with the appropriate * function calls and/or variables etc... */ protected String processActionForSpecialSymbols(String actionStr, int line, RuleBlock currentRule, ActionTransInfo tInfo) { if (actionStr == null || actionStr.length() == 0) return null; // The action trans info tells us (at the moment) whether an // assignment was done to the rule's tree root. if (grammar == null) return actionStr; // see if we have anything to do... if ((grammar.buildAST && actionStr.indexOf('#') != -1) || grammar instanceof TreeWalkerGrammar || ((grammar instanceof LexerGrammar || grammar instanceof ParserGrammar) && actionStr.indexOf('$') != -1)) { // Create a lexer to read an action and return the translated version antlr.actions.java.ActionLexer lexer = new antlr.actions.java.ActionLexer(actionStr, currentRule, this, tInfo); lexer.setLineOffset(line); lexer.setFilename(grammar.getFilename()); lexer.setTool(antlrTool); try { lexer.mACTION(true); actionStr = lexer.getTokenObject().getText(); // System.out.println("action translated: "+actionStr); // System.out.println("trans info is "+tInfo); } catch (RecognitionException ex) { lexer.reportError(ex); return actionStr; } catch (TokenStreamException tex) { antlrTool.panic("Error reading action:" + actionStr); return actionStr; } catch (CharStreamException io) { antlrTool.panic("Error reading action:" + actionStr); return actionStr; } } return actionStr; } private void setupGrammarParameters(Grammar g) { if (g instanceof ParserGrammar) { labeledElementASTType = "AST"; if (g.hasOption("ASTLabelType")) { Token tsuffix = g.getOption("ASTLabelType"); if (tsuffix != null) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\""); if (suffix != null) { labeledElementASTType = suffix; } } } labeledElementType = "Token "; labeledElementInit = "null"; commonExtraArgs = ""; commonExtraParams = ""; commonLocalVars = ""; lt1Value = "LT(1)"; exceptionThrown = "RecognitionException"; throwNoViable = "throw new NoViableAltException(LT(1), getFilename());"; } else if (g instanceof LexerGrammar) { labeledElementType = "char "; labeledElementInit = "'\\0'"; commonExtraArgs = ""; commonExtraParams = "boolean _createToken"; commonLocalVars = "int _ttype; Token _token=null; int _begin=text.length();"; lt1Value = "LA(1)"; exceptionThrown = "RecognitionException"; throwNoViable = "throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());"; } else if (g instanceof TreeWalkerGrammar) { labeledElementASTType = "AST"; labeledElementType = "AST"; if (g.hasOption("ASTLabelType")) { Token tsuffix = g.getOption("ASTLabelType"); if (tsuffix != null) { String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\""); if (suffix != null) { labeledElementASTType = suffix; labeledElementType = suffix; } } } if (!g.hasOption("ASTLabelType")) { g.setOption("ASTLabelType", new Token(ANTLRTokenTypes.STRING_LITERAL, "AST")); } labeledElementInit = "null"; commonExtraArgs = "_t"; commonExtraParams = "AST _t"; commonLocalVars = ""; lt1Value = "(" + labeledElementASTType + ")_t"; exceptionThrown = "RecognitionException"; throwNoViable = "throw new NoViableAltException(_t);"; } else { antlrTool.panic("Unknown grammar type"); } } /** This method exists so a subclass, namely VAJCodeGenerator, * can open the file in its own evil way. JavaCodeGenerator * simply opens a text file... */ public void setupOutput(String className) throws IOException { currentOutput = antlrTool.openOutputFile(className + ".java"); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/LLkAnalyzer.java000066400000000000000000001341331161462365500241560ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/LLkAnalyzer.java#1 $ */ import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; /**A linear-approximate LL(k) grammar analzyer. * * All lookahead elements are sets of token types. * * @author Terence Parr, John Lilley * @see antlr.Grammar * @see antlr.Lookahead */ public class LLkAnalyzer implements LLkGrammarAnalyzer { // Set "analyzerDebug" to true public boolean DEBUG_ANALYZER = false; private AlternativeBlock currentBlock; protected Tool tool = null; protected Grammar grammar = null; // True if analyzing a lexical grammar protected boolean lexicalAnalysis = false; // Used for formatting bit sets in default (Java) format CharFormatter charFormatter = new JavaCharFormatter(); /** Create an LLk analyzer */ public LLkAnalyzer(Tool tool_) { tool = tool_; } /** Return true if someone used the '.' wildcard default idiom. * Either #(. children) or '.' as an alt by itself. */ protected boolean altUsesWildcardDefault(Alternative alt) { AlternativeElement head = alt.head; // if element is #(. blah) then check to see if el is root if (head instanceof TreeElement && ((TreeElement)head).root instanceof WildcardElement) { return true; } if (head instanceof WildcardElement && head.next instanceof BlockEndElement) { return true; } return false; } /**Is this block of alternatives LL(k)? Fill in alternative cache for this block. * @return true if the block is deterministic */ public boolean deterministic(AlternativeBlock blk) { /** The lookahead depth for this decision */ int k = 1; // start at k=1 if (DEBUG_ANALYZER) System.out.println("deterministic(" + blk + ")"); boolean det = true; int nalts = blk.alternatives.size(); AlternativeBlock saveCurrentBlock = currentBlock; Alternative wildcardAlt = null; currentBlock = blk; /* don't allow nongreedy (...) blocks */ if (blk.greedy == false && !(blk instanceof OneOrMoreBlock) && !(blk instanceof ZeroOrMoreBlock)) { tool.warning("Being nongreedy only makes sense for (...)+ and (...)*", grammar.getFilename(), blk.getLine(), blk.getColumn()); } // SPECIAL CASE: only one alternative. We don't need to check the // determinism, but other code expects the lookahead cache to be // set for the single alt. if (nalts == 1) { AlternativeElement e = blk.getAlternativeAt(0).head; currentBlock.alti = 0; blk.getAlternativeAt(0).cache[1] = e.look(1); blk.getAlternativeAt(0).lookaheadDepth = 1; // set lookahead to LL(1) currentBlock = saveCurrentBlock; return true; // always deterministic for one alt } outer: for (int i = 0; i < nalts - 1; i++) { currentBlock.alti = i; currentBlock.analysisAlt = i; // which alt are we analyzing? currentBlock.altj = i + 1; // reset this alt. Haven't computed yet, // but we need the alt number. inner: // compare against other alternatives with lookahead depth k for (int j = i + 1; j < nalts; j++) { currentBlock.altj = j; if (DEBUG_ANALYZER) System.out.println("comparing " + i + " against alt " + j); currentBlock.analysisAlt = j; // which alt are we analyzing? k = 1; // always attempt minimum lookahead possible. // check to see if there is a lookahead depth that distinguishes // between alternatives i and j. Lookahead[] r = new Lookahead[grammar.maxk + 1]; boolean haveAmbiguity; do { haveAmbiguity = false; if (DEBUG_ANALYZER) System.out.println("checking depth " + k + "<=" + grammar.maxk); Lookahead p,q; p = getAltLookahead(blk, i, k); q = getAltLookahead(blk, j, k); // compare LOOK(alt i) with LOOK(alt j). Is there an intersection? // Lookahead must be disjoint. if (DEBUG_ANALYZER) System.out.println("p is " + p.toString(",", charFormatter, grammar)); if (DEBUG_ANALYZER) System.out.println("q is " + q.toString(",", charFormatter, grammar)); // r[i] = p.fset.and(q.fset); r[k] = p.intersection(q); if (DEBUG_ANALYZER) System.out.println("intersection at depth " + k + " is " + r[k].toString()); if (!r[k].nil()) { haveAmbiguity = true; k++; } // go until no more lookahead to use or no intersection } while (haveAmbiguity && k <= grammar.maxk); Alternative ai = blk.getAlternativeAt(i); Alternative aj = blk.getAlternativeAt(j); if (haveAmbiguity) { det = false; ai.lookaheadDepth = NONDETERMINISTIC; aj.lookaheadDepth = NONDETERMINISTIC; /* if ith alt starts with a syntactic predicate, computing the * lookahead is still done for code generation, but messages * should not be generated when comparing against alt j. * Alternatives with syn preds that are unnecessary do * not result in syn pred try-blocks. */ if (ai.synPred != null) { if (DEBUG_ANALYZER) { System.out.println("alt " + i + " has a syn pred"); } // The alt with the (...)=> block is nondeterministic for sure. // If the (...)=> conflicts with alt j, j is nondeterministic. // This prevents alt j from being in any switch statements. // move on to next alternative=>no possible ambiguity! // continue inner; } /* if ith alt starts with a semantic predicate, computing the * lookahead is still done for code generation, but messages * should not be generated when comparing against alt j. */ else if (ai.semPred != null) { if (DEBUG_ANALYZER) { System.out.println("alt " + i + " has a sem pred"); } } /* if jth alt is exactly the wildcard or wildcard root of tree, * then remove elements from alt i lookahead from alt j's lookahead. * Don't do an ambiguity warning. */ else if (altUsesWildcardDefault(aj)) { // System.out.println("removing pred sets"); // removeCompetingPredictionSetsFromWildcard(aj.cache, aj.head, grammar.maxk); wildcardAlt = aj; } /* If the user specified warnWhenFollowAmbig=false, then we * can turn off this warning IFF one of the alts is empty; * that is, it points immediately at the end block. */ else if (!blk.warnWhenFollowAmbig && (ai.head instanceof BlockEndElement || aj.head instanceof BlockEndElement)) { // System.out.println("ai.head pts to "+ai.head.getClass()); // System.out.println("aj.head pts to "+aj.head.getClass()); } /* If they have the generateAmbigWarnings option off for the block * then don't generate a warning. */ else if (!blk.generateAmbigWarnings) { } /* If greedy=true and *one* empty alt shut off warning. */ else if (blk.greedySet && blk.greedy && ((ai.head instanceof BlockEndElement && !(aj.head instanceof BlockEndElement)) || (aj.head instanceof BlockEndElement && !(ai.head instanceof BlockEndElement)))) { // System.out.println("greedy set to true; one alt empty"); } /* We have no choice, but to report a nondetermism */ else { tool.errorHandler.warnAltAmbiguity( grammar, blk, // the block lexicalAnalysis, // true if lexical grammar.maxk, // depth of ambiguity r, // set of linear ambiguities i, // first ambiguous alternative j // second ambiguous alternative ); } } else { // a lookahead depth, k, was found where i and j do not conflict ai.lookaheadDepth = Math.max(ai.lookaheadDepth, k); aj.lookaheadDepth = Math.max(aj.lookaheadDepth, k); } } } // finished with block. // If had wildcard default clause idiom, remove competing lookahead /* if ( wildcardAlt!=null ) { removeCompetingPredictionSetsFromWildcard(wildcardAlt.cache, wildcardAlt.head, grammar.maxk); } */ currentBlock = saveCurrentBlock; return det; } /**Is (...)+ block LL(1)? Fill in alternative cache for this block. * @return true if the block is deterministic */ public boolean deterministic(OneOrMoreBlock blk) { if (DEBUG_ANALYZER) System.out.println("deterministic(...)+(" + blk + ")"); AlternativeBlock saveCurrentBlock = currentBlock; currentBlock = blk; boolean blkOk = deterministic((AlternativeBlock)blk); // block has been checked, now check that what follows does not conflict // with the lookahead of the (...)+ block. boolean det = deterministicImpliedPath(blk); currentBlock = saveCurrentBlock; return det && blkOk; } /**Is (...)* block LL(1)? Fill in alternative cache for this block. * @return true if the block is deterministic */ public boolean deterministic(ZeroOrMoreBlock blk) { if (DEBUG_ANALYZER) System.out.println("deterministic(...)*(" + blk + ")"); AlternativeBlock saveCurrentBlock = currentBlock; currentBlock = blk; boolean blkOk = deterministic((AlternativeBlock)blk); // block has been checked, now check that what follows does not conflict // with the lookahead of the (...)* block. boolean det = deterministicImpliedPath(blk); currentBlock = saveCurrentBlock; return det && blkOk; } /**Is this (...)* or (...)+ block LL(k)? * @return true if the block is deterministic */ public boolean deterministicImpliedPath(BlockWithImpliedExitPath blk) { /** The lookahead depth for this decision considering implied exit path */ int k; boolean det = true; Vector alts = blk.getAlternatives(); int nalts = alts.size(); currentBlock.altj = -1; // comparing against implicit optional/exit alt if (DEBUG_ANALYZER) System.out.println("deterministicImpliedPath"); for (int i = 0; i < nalts; i++) { // check follow against all alts Alternative alt = blk.getAlternativeAt(i); if (alt.head instanceof BlockEndElement) { tool.warning("empty alternative makes no sense in (...)* or (...)+", grammar.getFilename(), blk.getLine(), blk.getColumn()); } k = 1; // assume eac alt is LL(1) with exit branch // check to see if there is a lookahead depth that distinguishes // between alternative i and the exit branch. Lookahead[] r = new Lookahead[grammar.maxk + 1]; boolean haveAmbiguity; do { haveAmbiguity = false; if (DEBUG_ANALYZER) System.out.println("checking depth " + k + "<=" + grammar.maxk); Lookahead p; Lookahead follow = blk.next.look(k); blk.exitCache[k] = follow; currentBlock.alti = i; p = getAltLookahead(blk, i, k); if (DEBUG_ANALYZER) System.out.println("follow is " + follow.toString(",", charFormatter, grammar)); if (DEBUG_ANALYZER) System.out.println("p is " + p.toString(",", charFormatter, grammar)); //r[k] = follow.fset.and(p.fset); r[k] = follow.intersection(p); if (DEBUG_ANALYZER) System.out.println("intersection at depth " + k + " is " + r[k]); if (!r[k].nil()) { haveAmbiguity = true; k++; } // go until no more lookahead to use or no intersection } while (haveAmbiguity && k <= grammar.maxk); if (haveAmbiguity) { det = false; alt.lookaheadDepth = NONDETERMINISTIC; blk.exitLookaheadDepth = NONDETERMINISTIC; Alternative ambigAlt = blk.getAlternativeAt(currentBlock.alti); /* If the user specified warnWhenFollowAmbig=false, then we * can turn off this warning. */ if (!blk.warnWhenFollowAmbig) { } /* If they have the generateAmbigWarnings option off for the block * then don't generate a warning. */ else if (!blk.generateAmbigWarnings) { } /* If greedy=true and alt not empty, shut off warning */ else if (blk.greedy == true && blk.greedySet && !(ambigAlt.head instanceof BlockEndElement)) { if (DEBUG_ANALYZER) System.out.println("greedy loop"); } /* If greedy=false then shut off warning...will have * to add "if FOLLOW break" * block during code gen to compensate for removal of warning. */ else if (blk.greedy == false && !(ambigAlt.head instanceof BlockEndElement)) { if (DEBUG_ANALYZER) System.out.println("nongreedy loop"); // if FOLLOW not single k-string (|set[k]| can // be > 1 actually) then must warn them that // loop may terminate incorrectly. // For example, ('a'..'d')+ ("ad"|"cb") if (!lookaheadEquivForApproxAndFullAnalysis(blk.exitCache, grammar.maxk)) { tool.warning(new String[]{ "nongreedy block may exit incorrectly due", "\tto limitations of linear approximate lookahead (first k-1 sets", "\tin lookahead not singleton)."}, grammar.getFilename(), blk.getLine(), blk.getColumn()); } } // no choice but to generate a warning else { tool.errorHandler.warnAltExitAmbiguity( grammar, blk, // the block lexicalAnalysis, // true if lexical grammar.maxk, // depth of ambiguity r, // set of linear ambiguities i // ambiguous alternative ); } } else { alt.lookaheadDepth = Math.max(alt.lookaheadDepth, k); blk.exitLookaheadDepth = Math.max(blk.exitLookaheadDepth, k); } } return det; } /**Compute the lookahead set of whatever follows references to * the rule associated witht the FOLLOW block. */ public Lookahead FOLLOW(int k, RuleEndElement end) { // what rule are we trying to compute FOLLOW of? RuleBlock rb = (RuleBlock)end.block; // rule name is different in lexer String rule; if (lexicalAnalysis) { rule = CodeGenerator.encodeLexerRuleName(rb.getRuleName()); } else { rule = rb.getRuleName(); } if (DEBUG_ANALYZER) System.out.println("FOLLOW(" + k + "," + rule + ")"); // are we in the midst of computing this FOLLOW already? if (end.lock[k]) { if (DEBUG_ANALYZER) System.out.println("FOLLOW cycle to " + rule); return new Lookahead(rule); } // Check to see if there is cached value if (end.cache[k] != null) { if (DEBUG_ANALYZER) { System.out.println("cache entry FOLLOW(" + k + ") for " + rule + ": " + end.cache[k].toString(",", charFormatter, grammar)); } // if the cache is a complete computation then simply return entry if (end.cache[k].cycle == null) { return (Lookahead)end.cache[k].clone(); } // A cache entry exists, but it is a reference to a cyclic computation. RuleSymbol rs = (RuleSymbol)grammar.getSymbol(end.cache[k].cycle); RuleEndElement re = rs.getBlock().endNode; // The other entry may not exist because it is still being // computed when this cycle cache entry was found here. if (re.cache[k] == null) { // return the cycle...that's all we can do at the moment. return (Lookahead)end.cache[k].clone(); } else { if (DEBUG_ANALYZER) { System.out.println("combining FOLLOW(" + k + ") for " + rule + ": from "+end.cache[k].toString(",", charFormatter, grammar) + " with FOLLOW for "+((RuleBlock)re.block).getRuleName()+": "+re.cache[k].toString(",", charFormatter, grammar)); } // combine results from other rule's FOLLOW if ( re.cache[k].cycle==null ) { // current rule depends on another rule's FOLLOW and // it is complete with no cycle; just kill our cycle and // combine full result from other rule's FOLLOW end.cache[k].combineWith(re.cache[k]); end.cache[k].cycle = null; // kill cycle as we're complete } else { // the FOLLOW cache for other rule has a cycle also. // Here is where we bubble up a cycle. We better recursively // wipe out cycles (partial computations). I'm a little nervous // that we might leave a cycle here, however. Lookahead refFOLLOW = FOLLOW(k, re); end.cache[k].combineWith( refFOLLOW ); // all cycles should be gone, but if not, record ref to cycle end.cache[k].cycle = refFOLLOW.cycle; } if (DEBUG_ANALYZER) { System.out.println("saving FOLLOW(" + k + ") for " + rule + ": from "+end.cache[k].toString(",", charFormatter, grammar)); } // Return the updated cache entry associated // with the cycle reference. return (Lookahead)end.cache[k].clone(); } } end.lock[k] = true; // prevent FOLLOW computation cycles Lookahead p = new Lookahead(); RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rule); // Walk list of references to this rule to compute FOLLOW for (int i = 0; i < rs.numReferences(); i++) { RuleRefElement rr = rs.getReference(i); if (DEBUG_ANALYZER) System.out.println("next[" + rule + "] is " + rr.next.toString()); Lookahead q = rr.next.look(k); if (DEBUG_ANALYZER) System.out.println("FIRST of next[" + rule + "] ptr is " + q.toString()); /* If there is a cycle then if the cycle is to the rule for * this end block, you have a cycle to yourself. Remove the * cycle indication--the lookahead is complete. */ if (q.cycle != null && q.cycle.equals(rule)) { q.cycle = null; // don't want cycle to yourself! } // add the lookahead into the current FOLLOW computation set p.combineWith(q); if (DEBUG_ANALYZER) System.out.println("combined FOLLOW[" + rule + "] is " + p.toString()); } end.lock[k] = false; // we're not doing FOLLOW anymore // if no rules follow this, it can be a start symbol or called by a start sym. // set the follow to be end of file. if (p.fset.nil() && p.cycle == null) { if (grammar instanceof TreeWalkerGrammar) { // Tree grammars don't see EOF, they see end of sibling list or // "NULL TREE LOOKAHEAD". p.fset.add(Token.NULL_TREE_LOOKAHEAD); } else if (grammar instanceof LexerGrammar) { // Lexical grammars use Epsilon to indicate that the end of rule has been hit // EOF would be misleading; any character can follow a token rule not just EOF // as in a grammar (where a start symbol is followed by EOF). There is no // sequence info in a lexer between tokens to indicate what is the last token // to be seen. // p.fset.add(EPSILON_TYPE); p.setEpsilon(); } else { p.fset.add(Token.EOF_TYPE); } } // Cache the result of the FOLLOW computation if (DEBUG_ANALYZER) { System.out.println("saving FOLLOW(" + k + ") for " + rule + ": " + p.toString(",", charFormatter, grammar)); } end.cache[k] = (Lookahead)p.clone(); return p; } private Lookahead getAltLookahead(AlternativeBlock blk, int alt, int k) { Lookahead p; Alternative a = blk.getAlternativeAt(alt); AlternativeElement e = a.head; //System.out.println("getAltLookahead("+k+","+e+"), cache size is "+a.cache.length); if (a.cache[k] == null) { p = e.look(k); a.cache[k] = p; } else { p = a.cache[k]; } return p; } /**Actions are ignored */ public Lookahead look(int k, ActionElement action) { if (DEBUG_ANALYZER) System.out.println("lookAction(" + k + "," + action + ")"); return action.next.look(k); } /**Combine the lookahead computed for each alternative */ public Lookahead look(int k, AlternativeBlock blk) { if (DEBUG_ANALYZER) System.out.println("lookAltBlk(" + k + "," + blk + ")"); AlternativeBlock saveCurrentBlock = currentBlock; currentBlock = blk; Lookahead p = new Lookahead(); for (int i = 0; i < blk.alternatives.size(); i++) { if (DEBUG_ANALYZER) System.out.println("alt " + i + " of " + blk); // must set analysis alt currentBlock.analysisAlt = i; Alternative alt = blk.getAlternativeAt(i); AlternativeElement elem = alt.head; if (DEBUG_ANALYZER) { if (alt.head == alt.tail) { System.out.println("alt " + i + " is empty"); } } Lookahead q = elem.look(k); p.combineWith(q); } if (k == 1 && blk.not && subruleCanBeInverted(blk, lexicalAnalysis)) { // Invert the lookahead set if (lexicalAnalysis) { BitSet b = (BitSet)((LexerGrammar)grammar).charVocabulary.clone(); int[] elems = p.fset.toArray(); for (int j = 0; j < elems.length; j++) { b.remove(elems[j]); } p.fset = b; } else { p.fset.notInPlace(Token.MIN_USER_TYPE, grammar.tokenManager.maxTokenType()); } } currentBlock = saveCurrentBlock; return p; } /**Compute what follows this place-holder node and possibly * what begins the associated loop unless the * node is locked. *

* if we hit the end of a loop, we have to include * what tokens can begin the loop as well. If the start * node is locked, then we simply found an empty path * through this subrule while analyzing it. If the * start node is not locked, then this node was hit * during a FOLLOW operation and the FIRST of this * block must be included in that lookahead computation. */ public Lookahead look(int k, BlockEndElement end) { if (DEBUG_ANALYZER) System.out.println("lookBlockEnd(" + k + ", " + end.block + "); lock is " + end.lock[k]); if (end.lock[k]) { // computation in progress => the tokens we would have // computed (had we not been locked) will be included // in the set by that computation with the lock on this // node. return new Lookahead(); } Lookahead p; /* Hitting the end of a loop means you can see what begins the loop */ if (end.block instanceof ZeroOrMoreBlock || end.block instanceof OneOrMoreBlock) { // compute what can start the block, // but lock end node so we don't do it twice in same // computation. end.lock[k] = true; p = look(k, end.block); end.lock[k] = false; } else { p = new Lookahead(); } /* Tree blocks do not have any follow because they are children * of what surrounds them. For example, A #(B C) D results in * a look() for the TreeElement end of NULL_TREE_LOOKAHEAD, which * indicates that nothing can follow the last node of tree #(B C) */ if (end.block instanceof TreeElement) { p.combineWith(Lookahead.of(Token.NULL_TREE_LOOKAHEAD)); } /* Syntactic predicates such as ( (A)? )=> have no follow per se. * We cannot accurately say what would be matched following a * syntactic predicate (you MIGHT be ok if you said it was whatever * followed the alternative predicted by the predicate). Hence, * (like end-of-token) we return Epsilon to indicate "unknown * lookahead." */ else if (end.block instanceof SynPredBlock) { p.setEpsilon(); } // compute what can follow the block else { Lookahead q = end.block.next.look(k); p.combineWith(q); } return p; } /**Return this char as the lookahead if k=1. *

### Doesn't work for ( 'a' 'b' | 'a' ~'b' ) yet!!! *

* If the atom has the not flag on, then * create the set complement of the tokenType * which is the set of all characters referenced * in the grammar with this char turned off. * Also remove characters from the set that * are currently allocated for predicting * previous alternatives. This avoids ambiguity * messages and is more properly what is meant. * ( 'a' | ~'a' ) implies that the ~'a' is the * "else" clause. *

* NOTE: we do NOT include exit path in * the exclusion set. E.g., * ( 'a' | ~'a' )* 'b' * should exit upon seeing a 'b' during the loop. */ public Lookahead look(int k, CharLiteralElement atom) { if (DEBUG_ANALYZER) System.out.println("lookCharLiteral(" + k + "," + atom + ")"); // Skip until analysis hits k==1 if (k > 1) { return atom.next.look(k - 1); } if (lexicalAnalysis) { if (atom.not) { BitSet b = (BitSet)((LexerGrammar)grammar).charVocabulary.clone(); if (DEBUG_ANALYZER) System.out.println("charVocab is " + b.toString()); // remove stuff predicted by preceding alts and follow of block removeCompetingPredictionSets(b, atom); if (DEBUG_ANALYZER) System.out.println("charVocab after removal of prior alt lookahead " + b.toString()); // now remove element that is stated not to be in the set b.clear(atom.getType()); return new Lookahead(b); } else { return Lookahead.of(atom.getType()); } } else { // Should have been avoided by MakeGrammar tool.panic("Character literal reference found in parser"); // ... so we make the compiler happy return Lookahead.of(atom.getType()); } } public Lookahead look(int k, CharRangeElement r) { if (DEBUG_ANALYZER) System.out.println("lookCharRange(" + k + "," + r + ")"); // Skip until analysis hits k==1 if (k > 1) { return r.next.look(k - 1); } BitSet p = BitSet.of(r.begin); for (int i = r.begin + 1; i <= r.end; i++) { p.add(i); } return new Lookahead(p); } public Lookahead look(int k, GrammarAtom atom) { if (DEBUG_ANALYZER) System.out.println("look(" + k + "," + atom + "[" + atom.getType() + "])"); if (lexicalAnalysis) { // MakeGrammar should have created a rule reference instead tool.panic("token reference found in lexer"); } // Skip until analysis hits k==1 if (k > 1) { return atom.next.look(k - 1); } Lookahead l = Lookahead.of(atom.getType()); if (atom.not) { // Invert the lookahead set against the token vocabulary int maxToken = grammar.tokenManager.maxTokenType(); l.fset.notInPlace(Token.MIN_USER_TYPE, maxToken); // remove stuff predicted by preceding alts and follow of block removeCompetingPredictionSets(l.fset, atom); } return l; } /**The lookahead of a (...)+ block is the combined lookahead of * all alternatives and, if an empty path is found, the lookahead * of what follows the block. */ public Lookahead look(int k, OneOrMoreBlock blk) { if (DEBUG_ANALYZER) System.out.println("look+" + k + "," + blk + ")"); Lookahead p = look(k, (AlternativeBlock)blk); return p; } /**Combine the lookahead computed for each alternative. * Lock the node so that no other computation may come back * on itself--infinite loop. This also implies infinite left-recursion * in the grammar (or an error in this algorithm ;)). */ public Lookahead look(int k, RuleBlock blk) { if (DEBUG_ANALYZER) System.out.println("lookRuleBlk(" + k + "," + blk + ")"); Lookahead p = look(k, (AlternativeBlock)blk); return p; } /**If not locked or noFOLLOW set, compute FOLLOW of a rule. *

* TJP says 8/12/99: not true anymore: * Lexical rules never compute follow. They set epsilon and * the code generator gens code to check for any character. * The code generator must remove the tokens used to predict * any previous alts in the same block. *

* When the last node of a rule is reached and noFOLLOW, * it implies that a "local" FOLLOW will be computed * after this call. I.e., *

     *		a : b A;
     *		b : B | ;
     *		c : b C;
     * 
* Here, when computing the look of rule b from rule a, * we want only {B,EPSILON_TYPE} so that look(b A) will * be {B,A} not {B,A,C}. *

* if the end block is not locked and the FOLLOW is * wanted, the algorithm must compute the lookahead * of what follows references to this rule. If * end block is locked, FOLLOW will return an empty set * with a cycle to the rule associated with this end block. */ public Lookahead look(int k, RuleEndElement end) { if (DEBUG_ANALYZER) System.out.println("lookRuleBlockEnd(" + k + "); noFOLLOW=" + end.noFOLLOW + "; lock is " + end.lock[k]); if (/*lexicalAnalysis ||*/ end.noFOLLOW) { Lookahead p = new Lookahead(); p.setEpsilon(); p.epsilonDepth = BitSet.of(k); return p; } Lookahead p = FOLLOW(k, end); return p; } /**Compute the lookahead contributed by a rule reference. * *

* When computing ruleref lookahead, we don't want the FOLLOW * computation done if an empty path exists for the rule. * The FOLLOW is too loose of a set...we want only to * include the "local" FOLLOW or what can follow this * particular ref to the node. In other words, we use * context information to reduce the complexity of the * analysis and strengthen the parser. * * The noFOLLOW flag is used as a means of restricting * the FOLLOW to a "local" FOLLOW. This variable is * orthogonal to the lock variable that prevents * infinite recursion. noFOLLOW does not care about what k is. */ public Lookahead look(int k, RuleRefElement rr) { if (DEBUG_ANALYZER) System.out.println("lookRuleRef(" + k + "," + rr + ")"); RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); if (rs == null || !rs.defined) { tool.error("no definition of rule " + rr.targetRule, grammar.getFilename(), rr.getLine(), rr.getColumn()); return new Lookahead(); } RuleBlock rb = rs.getBlock(); RuleEndElement end = rb.endNode; boolean saveEnd = end.noFOLLOW; end.noFOLLOW = true; // go off to the rule and get the lookahead (w/o FOLLOW) Lookahead p = look(k, rr.targetRule); if (DEBUG_ANALYZER) System.out.println("back from rule ref to " + rr.targetRule); // restore state of end block end.noFOLLOW = saveEnd; // check for infinite recursion. If a cycle is returned: trouble! if (p.cycle != null) { tool.error("infinite recursion to rule " + p.cycle + " from rule " + rr.enclosingRuleName, grammar.getFilename(), rr.getLine(), rr.getColumn()); } // is the local FOLLOW required? if (p.containsEpsilon()) { if (DEBUG_ANALYZER) System.out.println("rule ref to " + rr.targetRule + " has eps, depth: " + p.epsilonDepth); // remove epsilon p.resetEpsilon(); // fset.clear(EPSILON_TYPE); // for each lookahead depth that saw epsilon int[] depths = p.epsilonDepth.toArray(); p.epsilonDepth = null; // clear all epsilon stuff for (int i = 0; i < depths.length; i++) { int rk = k - (k - depths[i]); Lookahead q = rr.next.look(rk); // see comments in Lookahead p.combineWith(q); } // note: any of these look() computations for local follow can // set EPSILON in the set again if the end of this rule is found. } return p; } public Lookahead look(int k, StringLiteralElement atom) { if (DEBUG_ANALYZER) System.out.println("lookStringLiteral(" + k + "," + atom + ")"); if (lexicalAnalysis) { // need more lookahead than string can provide? if (k > atom.processedAtomText.length()) { return atom.next.look(k - atom.processedAtomText.length()); } else { // get char at lookahead depth k, from the processed literal text return Lookahead.of(atom.processedAtomText.charAt(k - 1)); } } else { // Skip until analysis hits k==1 if (k > 1) { return atom.next.look(k - 1); } Lookahead l = Lookahead.of(atom.getType()); if (atom.not) { // Invert the lookahead set against the token vocabulary int maxToken = grammar.tokenManager.maxTokenType(); l.fset.notInPlace(Token.MIN_USER_TYPE, maxToken); } return l; } } /**The lookahead of a (...)=> block is the lookahead of * what follows the block. By definition, the syntactic * predicate block defies static analysis (you want to try it * out at run-time). The LOOK of (a)=>A B is A for LL(1) * ### is this even called? */ public Lookahead look(int k, SynPredBlock blk) { if (DEBUG_ANALYZER) System.out.println("look=>(" + k + "," + blk + ")"); return blk.next.look(k); } public Lookahead look(int k, TokenRangeElement r) { if (DEBUG_ANALYZER) System.out.println("lookTokenRange(" + k + "," + r + ")"); // Skip until analysis hits k==1 if (k > 1) { return r.next.look(k - 1); } BitSet p = BitSet.of(r.begin); for (int i = r.begin + 1; i <= r.end; i++) { p.add(i); } return new Lookahead(p); } public Lookahead look(int k, TreeElement t) { if (DEBUG_ANALYZER) System.out.println("look(" + k + "," + t.root + "[" + t.root.getType() + "])"); if (k > 1) { return t.next.look(k - 1); } Lookahead l = null; if (t.root instanceof WildcardElement) { l = t.root.look(1); // compute FIRST set minus previous rows } else { l = Lookahead.of(t.root.getType()); if (t.root.not) { // Invert the lookahead set against the token vocabulary int maxToken = grammar.tokenManager.maxTokenType(); l.fset.notInPlace(Token.MIN_USER_TYPE, maxToken); } } return l; } public Lookahead look(int k, WildcardElement wc) { if (DEBUG_ANALYZER) System.out.println("look(" + k + "," + wc + ")"); // Skip until analysis hits k==1 if (k > 1) { return wc.next.look(k - 1); } BitSet b; if (lexicalAnalysis) { // Copy the character vocabulary b = (BitSet)((LexerGrammar)grammar).charVocabulary.clone(); } else { b = new BitSet(1); // Invert the lookahead set against the token vocabulary int maxToken = grammar.tokenManager.maxTokenType(); b.notInPlace(Token.MIN_USER_TYPE, maxToken); if (DEBUG_ANALYZER) System.out.println("look(" + k + "," + wc + ") after not: " + b); } // Remove prediction sets from competing alternatives // removeCompetingPredictionSets(b, wc); return new Lookahead(b); } /** The (...)* element is the combined lookahead of the alternatives and what can * follow the loop. */ public Lookahead look(int k, ZeroOrMoreBlock blk) { if (DEBUG_ANALYZER) System.out.println("look*(" + k + "," + blk + ")"); Lookahead p = look(k, (AlternativeBlock)blk); Lookahead q = blk.next.look(k); p.combineWith(q); return p; } /**Compute the combined lookahead for all productions of a rule. * If the lookahead returns with epsilon, at least one epsilon * path exists (one that consumes no tokens). The noFOLLOW * flag being set for this endruleblk, indicates that the * a rule ref invoked this rule. * * Currently only look(RuleRef) calls this. There is no need * for the code generator to call this. */ public Lookahead look(int k, String rule) { if (DEBUG_ANALYZER) System.out.println("lookRuleName(" + k + "," + rule + ")"); RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rule); RuleBlock rb = rs.getBlock(); if (rb.lock[k]) { if (DEBUG_ANALYZER) System.out.println("infinite recursion to rule " + rb.getRuleName()); return new Lookahead(rule); } // have we computed it before? if (rb.cache[k] != null) { if (DEBUG_ANALYZER) { System.out.println("found depth " + k + " result in FIRST " + rule + " cache: " + rb.cache[k].toString(",", charFormatter, grammar)); } return (Lookahead)rb.cache[k].clone(); } rb.lock[k] = true; Lookahead p = look(k, (RuleBlock)rb); rb.lock[k] = false; // cache results rb.cache[k] = (Lookahead)p.clone(); if (DEBUG_ANALYZER) { System.out.println("saving depth " + k + " result in FIRST " + rule + " cache: " + rb.cache[k].toString(",", charFormatter, grammar)); } return p; } /** If the first k-1 sets are singleton sets, the appoximate * lookahead analysis is equivalent to full lookahead analysis. */ public static boolean lookaheadEquivForApproxAndFullAnalysis(Lookahead[] bset, int k) { // first k-1 sets degree 1? for (int i = 1; i <= k - 1; i++) { BitSet look = bset[i].fset; if (look.degree() > 1) { return false; } } return true; } /** Remove the prediction sets from preceding alternatives * and follow set, but *only* if this element is the first element * of the alternative. The class members currenBlock and * currentBlock.analysisAlt must be set correctly. * @param b The prediction bitset to be modified * @el The element of interest */ private void removeCompetingPredictionSets(BitSet b, AlternativeElement el) { // Only do this if the element is the first element of the alt, // because we are making an implicit assumption that k==1. GrammarElement head = currentBlock.getAlternativeAt(currentBlock.analysisAlt).head; // if element is #(. blah) then check to see if el is root if (head instanceof TreeElement) { if (((TreeElement)head).root != el) { return; } } else if (el != head) { return; } for (int i = 0; i < currentBlock.analysisAlt; i++) { AlternativeElement e = currentBlock.getAlternativeAt(i).head; b.subtractInPlace(e.look(1).fset); } } /** Remove the prediction sets from preceding alternatives * The class members currenBlock must be set correctly. * Remove prediction sets from 1..k. * @param look The prediction lookahead to be modified * @el The element of interest * @k How deep into lookahead to modify */ private void removeCompetingPredictionSetsFromWildcard(Lookahead[] look, AlternativeElement el, int k) { for (int d = 1; d <= k; d++) { for (int i = 0; i < currentBlock.analysisAlt; i++) { AlternativeElement e = currentBlock.getAlternativeAt(i).head; look[d].fset.subtractInPlace(e.look(d).fset); } } } /** reset the analyzer so it looks like a new one */ private void reset() { grammar = null; DEBUG_ANALYZER = false; currentBlock = null; lexicalAnalysis = false; } /** Set the grammar for the analyzer */ public void setGrammar(Grammar g) { if (grammar != null) { reset(); } grammar = g; // Is this lexical? lexicalAnalysis = (grammar instanceof LexerGrammar); DEBUG_ANALYZER = grammar.analyzerDebug; } public boolean subruleCanBeInverted(AlternativeBlock blk, boolean forLexer) { if ( blk instanceof ZeroOrMoreBlock || blk instanceof OneOrMoreBlock || blk instanceof SynPredBlock ) { return false; } // Cannot invert an empty subrule if (blk.alternatives.size() == 0) { return false; } // The block must only contain alternatives with a single element, // where each element is a char, token, char range, or token range. for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); // Cannot have anything interesting in the alternative ... if (alt.synPred != null || alt.semPred != null || alt.exceptionSpec != null) { return false; } // ... and there must be one simple element AlternativeElement elt = alt.head; if ( !( elt instanceof CharLiteralElement || elt instanceof TokenRefElement || elt instanceof CharRangeElement || elt instanceof TokenRangeElement || (elt instanceof StringLiteralElement && !forLexer) ) || !(elt.next instanceof BlockEndElement) || elt.getAutoGenType() != GrammarElement.AUTO_GEN_NONE ) { return false; } } return true; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/LLkGrammarAnalyzer.java000066400000000000000000000030761161462365500254660ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/LLkGrammarAnalyzer.java#1 $ */ public interface LLkGrammarAnalyzer extends GrammarAnalyzer { public boolean deterministic(AlternativeBlock blk); public boolean deterministic(OneOrMoreBlock blk); public boolean deterministic(ZeroOrMoreBlock blk); public Lookahead FOLLOW(int k, RuleEndElement end); public Lookahead look(int k, ActionElement action); public Lookahead look(int k, AlternativeBlock blk); public Lookahead look(int k, BlockEndElement end); public Lookahead look(int k, CharLiteralElement atom); public Lookahead look(int k, CharRangeElement end); public Lookahead look(int k, GrammarAtom atom); public Lookahead look(int k, OneOrMoreBlock blk); public Lookahead look(int k, RuleBlock blk); public Lookahead look(int k, RuleEndElement end); public Lookahead look(int k, RuleRefElement rr); public Lookahead look(int k, StringLiteralElement atom); public Lookahead look(int k, SynPredBlock blk); public Lookahead look(int k, TokenRangeElement end); public Lookahead look(int k, TreeElement end); public Lookahead look(int k, WildcardElement wc); public Lookahead look(int k, ZeroOrMoreBlock blk); public Lookahead look(int k, String rule); public void setGrammar(Grammar g); public boolean subruleCanBeInverted(AlternativeBlock blk, boolean forLexer); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/LLkParser.java000066400000000000000000000044221161462365500236220ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/LLkParser.java#1 $ */ import java.io.IOException; /**An LL(k) parser. * * @see antlr.Token * @see antlr.TokenBuffer */ public class LLkParser extends Parser { int k; public LLkParser(int k_) { k = k_; } public LLkParser(ParserSharedInputState state, int k_) { super(state); k = k_; } public LLkParser(TokenBuffer tokenBuf, int k_) { k = k_; setTokenBuffer(tokenBuf); } public LLkParser(TokenStream lexer, int k_) { k = k_; TokenBuffer tokenBuf = new TokenBuffer(lexer); setTokenBuffer(tokenBuf); } /**Consume another token from the input stream. Can only write sequentially! * If you need 3 tokens ahead, you must consume() 3 times. *

* Note that it is possible to overwrite tokens that have not been matched. * For example, calling consume() 3 times when k=2, means that the first token * consumed will be overwritten with the 3rd. */ public void consume() throws TokenStreamException { inputState.input.consume(); } public int LA(int i) throws TokenStreamException { return inputState.input.LA(i); } public Token LT(int i) throws TokenStreamException { return inputState.input.LT(i); } private void trace(String ee, String rname) throws TokenStreamException { traceIndent(); System.out.print(ee + rname + ((inputState.guessing > 0)?"; [guessing]":"; ")); for (int i = 1; i <= k; i++) { if (i != 1) { System.out.print(", "); } if ( LT(i)!=null ) { System.out.print("LA(" + i + ")==" + LT(i).getText()); } else { System.out.print("LA(" + i + ")==null"); } } System.out.println(""); } public void traceIn(String rname) throws TokenStreamException { traceDepth += 1; trace("> ", rname); } public void traceOut(String rname) throws TokenStreamException { trace("< ", rname); traceDepth -= 1; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/LexerGrammar.java000066400000000000000000000137611161462365500243570ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/LexerGrammar.java#1 $ */ import java.util.Hashtable; import java.util.Enumeration; import java.io.IOException; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; /** Lexer-specific grammar subclass */ class LexerGrammar extends Grammar { // character set used by lexer protected BitSet charVocabulary; // true if the lexer generates literal testing code for nextToken protected boolean testLiterals = true; // true if the lexer generates case-sensitive LA(k) testing protected boolean caseSensitiveLiterals = true; /** true if the lexer generates case-sensitive literals testing */ protected boolean caseSensitive = true; /** true if lexer is to ignore all unrecognized tokens */ protected boolean filterMode = false; /** if filterMode is true, then filterRule can indicate an optional * rule to use as the scarf language. If null, programmer used * plain "filter=true" not "filter=rule". */ protected String filterRule = null; LexerGrammar(String className_, Tool tool_, String superClass) { super(className_, tool_, superClass); // by default, use 0..127 for ASCII char vocabulary BitSet cv = new BitSet(); for (int i = 0; i <= 127; i++) { cv.add(i); } setCharVocabulary(cv); // Lexer usually has no default error handling defaultErrorHandler = false; } /** Top-level call to generate the code */ public void generate() throws IOException { generator.gen(this); } public String getSuperClass() { // If debugging, use debugger version of scanner if (debuggingOutput) return "debug.DebuggingCharScanner"; return "CharScanner"; } // Get the testLiterals option value public boolean getTestLiterals() { return testLiterals; } /**Process command line arguments. * -trace have all rules call traceIn/traceOut * -traceLexer have lexical rules call traceIn/traceOut * -debug generate debugging output for parser debugger */ public void processArguments(String[] args) { for (int i = 0; i < args.length; i++) { if (args[i].equals("-trace")) { traceRules = true; antlrTool.setArgOK(i); } else if (args[i].equals("-traceLexer")) { traceRules = true; antlrTool.setArgOK(i); } else if (args[i].equals("-debug")) { debuggingOutput = true; antlrTool.setArgOK(i); } } } /** Set the character vocabulary used by the lexer */ public void setCharVocabulary(BitSet b) { charVocabulary = b; } /** Set lexer options */ public boolean setOption(String key, Token value) { String s = value.getText(); if (key.equals("buildAST")) { antlrTool.warning("buildAST option is not valid for lexer", getFilename(), value.getLine(), value.getColumn()); return true; } if (key.equals("testLiterals")) { if (s.equals("true")) { testLiterals = true; } else if (s.equals("false")) { testLiterals = false; } else { antlrTool.warning("testLiterals option must be true or false", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("interactive")) { if (s.equals("true")) { interactive = true; } else if (s.equals("false")) { interactive = false; } else { antlrTool.error("interactive option must be true or false", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("caseSensitive")) { if (s.equals("true")) { caseSensitive = true; } else if (s.equals("false")) { caseSensitive = false; } else { antlrTool.warning("caseSensitive option must be true or false", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("caseSensitiveLiterals")) { if (s.equals("true")) { caseSensitiveLiterals = true; } else if (s.equals("false")) { caseSensitiveLiterals = false; } else { antlrTool.warning("caseSensitiveLiterals option must be true or false", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("filter")) { if (s.equals("true")) { filterMode = true; } else if (s.equals("false")) { filterMode = false; } else if (value.getType() == ANTLRTokenTypes.TOKEN_REF) { filterMode = true; filterRule = s; } else { antlrTool.warning("filter option must be true, false, or a lexer rule name", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("longestPossible")) { antlrTool.warning("longestPossible option has been deprecated; ignoring it...", getFilename(), value.getLine(), value.getColumn()); return true; } if (key.equals("className")) { super.setOption(key, value); return true; } if (super.setOption(key, value)) { return true; } antlrTool.error("Invalid option: " + key, getFilename(), value.getLine(), value.getColumn()); return false; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/LexerSharedInputState.java000066400000000000000000000023641161462365500262150ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/LexerSharedInputState.java#1 $ */ import java.io.Reader; import java.io.InputStream; /** This object contains the data associated with an * input stream of characters. Multiple lexers * share a single LexerSharedInputState to lex * the same input stream. */ public class LexerSharedInputState { protected int column = 1; protected int line = 1; protected int tokenStartColumn = 1; protected int tokenStartLine = 1; protected InputBuffer input; /** What file (if known) caused the problem? */ protected String filename; public int guessing = 0; public LexerSharedInputState(InputBuffer inbuf) { input = inbuf; } public LexerSharedInputState(InputStream in) { this(new ByteBuffer(in)); } public LexerSharedInputState(Reader in) { this(new CharBuffer(in)); } public void reset() { column = 1; line = 1; tokenStartColumn = 1; tokenStartLine = 1; guessing = 0; filename = null; input.reset(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/Lookahead.java000066400000000000000000000145371161462365500236620ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/Lookahead.java#1 $ */ import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; /**This object holds all information needed to represent * the lookahead for any particular lookahead computation * for a single lookahead depth. Final lookahead * information is a simple bit set, but intermediate * stages need computation cycle and FOLLOW information. * *

* Concerning the cycle variable. * If lookahead is computed for a RuleEnd node, then * computation is part of a FOLLOW cycle for this rule. * If lookahead is computed for a RuleBlock node, the * computation is part of a FIRST cycle to this rule. * *

* Concerning the epsilonDepth variable. * This is not the depth relative to the rule reference * that epsilon was encountered. That value is *

 * 		initial_k - epsilonDepth + 1
 * 
* Also, lookahead depths past rule ref for local follow are: *
 * 		initial_k - (initial_k - epsilonDepth)
 * 
* Used for rule references. If we try * to compute look(k, ruleref) and there are fewer * than k lookahead terminals before the end of the * the rule, epsilon will be returned (don't want to * pass the end of the rule). We must track when the * the lookahead got stuck. For example, *
 * 		a : b A B E F G;
 * 		b : C ;
 * 
* LOOK(5, ref-to(b)) is {} with depth = 4, which * indicates that at 2 (5-4+1) tokens ahead, end of rule was reached. * Therefore, the token at 4=5-(5-4) past rule ref b must be * included in the set == F. * The situation is complicated by the fact that a computation * may hit the end of a rule at many different depths. For example, *
 * 		a : b A B C ;
 * 		b : E F		// epsilon depth of 1 relative to initial k=3
 * 		  | G		// epsilon depth of 2
 * 		  ;
 * 
* Here, LOOK(3,ref-to(b)) returns epsilon, but the depths are * {1, 2}; i.e., 3-(3-1) and 3-(3-2). Those are the lookahead depths * past the rule ref needed for the local follow. * *

* This is null unless an epsilon is created. * * @see antlr.Lookahead#combineWith(Lookahead) */ public class Lookahead implements Cloneable { /** actual bitset of the lookahead */ BitSet fset; /** is this computation part of a computation cycle? */ String cycle; /** What k values were being computed when end of rule hit? */ BitSet epsilonDepth; /** Does this lookahead depth include Epsilon token type? This * is used to avoid having a bit in the set for Epsilon as it * conflicts with parsing binary files. */ boolean hasEpsilon = false; public Lookahead() { fset = new BitSet(); } /** create a new lookahead set with the LL(1) set to the parameter */ public Lookahead(BitSet p) { fset = p; } /** create an empty lookahead set, but with cycle */ public Lookahead(String c) { this(); cycle = c; } /** Make a deep copy of everything in this object */ public Object clone() { Lookahead p = null; try { p = (Lookahead)super.clone(); p.fset = (BitSet)fset.clone(); p.cycle = cycle; // strings are immutable if (epsilonDepth != null) { p.epsilonDepth = (BitSet)epsilonDepth.clone(); } } catch (CloneNotSupportedException e) { throw new InternalError(); } return p; } public void combineWith(Lookahead q) { if (cycle == null) { // track at least one cycle cycle = q.cycle; } if (q.containsEpsilon()) { hasEpsilon = true; } // combine epsilon depths if (epsilonDepth != null) { if (q.epsilonDepth != null) { epsilonDepth.orInPlace(q.epsilonDepth); } } else if (q.epsilonDepth != null) { epsilonDepth = (BitSet)q.epsilonDepth.clone(); } fset.orInPlace(q.fset); } public boolean containsEpsilon() { return hasEpsilon; } /** What is the intersection of two lookahead depths? * Only the Epsilon "bit" and bitset are considered. */ public Lookahead intersection(Lookahead q) { Lookahead p = new Lookahead(fset.and(q.fset)); if (this.hasEpsilon && q.hasEpsilon) { p.setEpsilon(); } return p; } public boolean nil() { return fset.nil() && !hasEpsilon; } public static Lookahead of(int el) { Lookahead look = new Lookahead(); look.fset.add(el); return look; } public void resetEpsilon() { hasEpsilon = false; } public void setEpsilon() { hasEpsilon = true; } public String toString() { String e = "",b,f = "",d = ""; b = fset.toString(","); if (containsEpsilon()) { e = "+"; } if (cycle != null) { f = "; FOLLOW(" + cycle + ")"; } if (epsilonDepth != null) { d = "; depths=" + epsilonDepth.toString(","); } return b + e + f + d; } public String toString(String separator, CharFormatter formatter) { String e = "",b,f = "",d = ""; b = fset.toString(separator, formatter); if (containsEpsilon()) { e = "+"; } if (cycle != null) { f = "; FOLLOW(" + cycle + ")"; } if (epsilonDepth != null) { d = "; depths=" + epsilonDepth.toString(","); } return b + e + f + d; } public String toString(String separator, CharFormatter formatter, Grammar g) { if (g instanceof LexerGrammar) { return toString(separator, formatter); } else { return toString(separator, g.tokenManager.getVocabulary()); } } public String toString(String separator, Vector vocab) { String b,f = "",d = ""; b = fset.toString(separator, vocab); if (cycle != null) { f = "; FOLLOW(" + cycle + ")"; } if (epsilonDepth != null) { d = "; depths=" + epsilonDepth.toString(","); } return b + f + d; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/MakeGrammar.java000066400000000000000000000774751161462365500241710ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/MakeGrammar.java#1 $ */ import antlr.collections.Stack; import antlr.collections.impl.LList; import antlr.collections.impl.Vector; public class MakeGrammar extends DefineGrammarSymbols { protected Stack blocks = new LList(); // track subrules--Stack protected RuleRefElement lastRuleRef; protected RuleEndElement ruleEnd; // used if not nested protected RuleBlock ruleBlock; // points to block of current rule. protected int nested = 0; // nesting inside a subrule protected boolean grammarError = false; ExceptionSpec currentExceptionSpec = null; public MakeGrammar(Tool tool_, String[] args_, LLkAnalyzer analyzer_) { super(tool_, args_, analyzer_); } /** Abort the processing of a grammar (due to syntax errors) */ public void abortGrammar() { String s = "unknown grammar"; if (grammar != null) { s = grammar.getClassName(); } tool.error("aborting grammar '" + s + "' due to errors"); super.abortGrammar(); } protected void addElementToCurrentAlt(AlternativeElement e) { e.enclosingRuleName = ruleBlock.ruleName; context().addAlternativeElement(e); } public void beginAlt(boolean doAutoGen_) { super.beginAlt(doAutoGen_); Alternative alt = new Alternative(); alt.setAutoGen(doAutoGen_); context().block.addAlternative(alt); } public void beginChildList() { super.beginChildList(); context().block.addAlternative(new Alternative()); } /** Add an exception group to a rule (currently a no-op) */ public void beginExceptionGroup() { super.beginExceptionGroup(); if (!(context().block instanceof RuleBlock)) { tool.panic("beginExceptionGroup called outside of rule block"); } } /** Add an exception spec to an exception group or rule block */ public void beginExceptionSpec(Token label) { // Hack the label string a bit to remove leading/trailing space. if (label != null) { label.setText(StringUtils.stripFront(StringUtils.stripBack(label.getText(), " \n\r\t"), " \n\r\t")); } super.beginExceptionSpec(label); // Don't check for currentExceptionSpec!=null because syntax errors // may leave it set to something. currentExceptionSpec = new ExceptionSpec(label); } public void beginSubRule(Token label, Token start, boolean not) { super.beginSubRule(label, start, not); // we don't know what kind of subrule it is yet. // push a dummy one that will allow us to collect the // alternatives. Later, we'll switch to real object. blocks.push(new BlockContext()); context().block = new AlternativeBlock(grammar, start, not); context().altNum = 0; // reset alternative number nested++; // create a final node to which the last elememt of each // alternative will point. context().blockEnd = new BlockEndElement(grammar); // make sure end node points to start of block context().blockEnd.block = context().block; labelElement(context().block, label); } public void beginTree(Token tok) throws SemanticException { if (!(grammar instanceof TreeWalkerGrammar)) { tool.error("Trees only allowed in TreeParser", grammar.getFilename(), tok.getLine(), tok.getColumn()); throw new SemanticException("Trees only allowed in TreeParser"); } super.beginTree(tok); blocks.push(new TreeBlockContext()); context().block = new TreeElement(grammar, tok); context().altNum = 0; // reset alternative number } public BlockContext context() { if (blocks.height() == 0) { return null; } else { return (BlockContext)blocks.top(); } } /**Used to build nextToken() for the lexer. * This builds a rule which has every "public" rule in the given Vector of * rules as it's alternate. Each rule ref generates a Token object. * @param g The Grammar that is being processed * @param lexRules A vector of lexer rules that will be used to create an alternate block. * @param rname The name of the resulting rule. */ public static RuleBlock createNextTokenRule(Grammar g, Vector lexRules, String rname) { // create actual rule data structure RuleBlock rb = new RuleBlock(g, rname); rb.setDefaultErrorHandler(g.getDefaultErrorHandler()); RuleEndElement ruleEnd = new RuleEndElement(g); rb.setEndElement(ruleEnd); ruleEnd.block = rb; // Add an alternative for each element of the rules vector. for (int i = 0; i < lexRules.size(); i++) { RuleSymbol r = (RuleSymbol)lexRules.elementAt(i); if (!r.isDefined()) { g.antlrTool.error("Lexer rule " + r.id.substring(1) + " is not defined"); } else { if (r.access.equals("public")) { Alternative alt = new Alternative(); // create alt we'll add to ref rule RuleBlock targetRuleBlock = r.getBlock(); Vector targetRuleAlts = targetRuleBlock.getAlternatives(); // collect a sem pred if only one alt and it's at the start; // simple, but faster to implement until real hoisting if ( targetRuleAlts!=null && targetRuleAlts.size()==1 ) { Alternative onlyAlt = (Alternative)targetRuleAlts.elementAt(0); if ( onlyAlt.semPred!=null ) { // ok, has sem pred, make this rule ref alt have a pred alt.semPred = onlyAlt.semPred; // REMOVE predicate from target rule??? NOPE, another // rule other than nextToken() might invoke it. } } // create a rule ref to lexer rule // the Token is a RULE_REF not a TOKEN_REF since the // conversion to mRulename has already taken place RuleRefElement rr = new RuleRefElement(g, new CommonToken(ANTLRTokenTypes.RULE_REF, r.getId()), GrammarElement.AUTO_GEN_NONE); rr.setLabel("theRetToken"); rr.enclosingRuleName = "nextToken"; rr.next = ruleEnd; alt.addElement(rr); // add rule ref to alt alt.setAutoGen(true); // keep text of elements rb.addAlternative(alt); // add alt to rule block r.addReference(rr); // track ref to this rule in rule blk } } } rb.setAutoGen(true); // keep text of elements rb.prepareForAnalysis(); //System.out.println(rb); return rb; } /** Return block as if they had typed: "( rule )?" */ private AlternativeBlock createOptionalRuleRef(String rule, Token start) { // Make the subrule AlternativeBlock blk = new AlternativeBlock(grammar, start, false); // Make sure rule is defined String mrule = CodeGenerator.encodeLexerRuleName(rule); // can only be a lexer rule! if (!grammar.isDefined(mrule)) { grammar.define(new RuleSymbol(mrule)); } // Make the rule ref element // RK: fixme probably easier to abuse start token.. Token t = new CommonToken(ANTLRTokenTypes.TOKEN_REF, rule); t.setLine(start.getLine()); t.setLine(start.getColumn()); RuleRefElement rref = new RuleRefElement(grammar, t, GrammarElement.AUTO_GEN_NONE); rref.enclosingRuleName = ruleBlock.ruleName; // Make the end of block element BlockEndElement end = new BlockEndElement(grammar); end.block = blk; // end block points back to start of blk // Make an alternative, putting the rule ref into it Alternative alt = new Alternative(rref); alt.addElement(end); // last element in alt points to end of block // Add the alternative to this block blk.addAlternative(alt); // create an empty (optional) alt and add to blk Alternative optAlt = new Alternative(); optAlt.addElement(end); // points immediately to end of block blk.addAlternative(optAlt); blk.prepareForAnalysis(); return blk; } public void defineRuleName(Token r, String access, boolean ruleAutoGen, String docComment) throws SemanticException { // if ( Character.isUpperCase(r.getText().charAt(0)) ) { if (r.type == ANTLRTokenTypes.TOKEN_REF) { if (!(grammar instanceof LexerGrammar)) { tool.error("Lexical rule " + r.getText() + " defined outside of lexer", grammar.getFilename(), r.getLine(), r.getColumn()); r.setText(r.getText().toLowerCase()); } } else { if (grammar instanceof LexerGrammar) { tool.error("Lexical rule names must be upper case, '" + r.getText() + "' is not", grammar.getFilename(), r.getLine(), r.getColumn()); r.setText(r.getText().toUpperCase()); } } super.defineRuleName(r, access, ruleAutoGen, docComment); String id = r.getText(); // if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule? if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule? id = CodeGenerator.encodeLexerRuleName(id); } RuleSymbol rs = (RuleSymbol)grammar.getSymbol(id); RuleBlock rb = new RuleBlock(grammar, r.getText(), r.getLine(), ruleAutoGen); // Lexer rules do not generate default error handling rb.setDefaultErrorHandler(grammar.getDefaultErrorHandler()); ruleBlock = rb; blocks.push(new BlockContext()); // enter new context context().block = rb; rs.setBlock(rb); ruleEnd = new RuleEndElement(grammar); rb.setEndElement(ruleEnd); nested = 0; } public void endAlt() { super.endAlt(); if (nested == 0) { // all rule-level alts link to ruleEnd node addElementToCurrentAlt(ruleEnd); } else { addElementToCurrentAlt(context().blockEnd); } context().altNum++; } public void endChildList() { super.endChildList(); // create a final node to which the last elememt of the single // alternative will point. Done for compatibility with analyzer. // Does NOT point to any block like alternative blocks because the // TreeElement is not a block. This is used only as a placeholder. BlockEndElement be = new BlockEndElement(grammar); be.block = context().block; addElementToCurrentAlt(be); } public void endExceptionGroup() { super.endExceptionGroup(); } public void endExceptionSpec() { super.endExceptionSpec(); if (currentExceptionSpec == null) { tool.panic("exception processing internal error -- no active exception spec"); } if (context().block instanceof RuleBlock) { // Named rule ((RuleBlock)context().block).addExceptionSpec(currentExceptionSpec); } else { // It must be a plain-old alternative block if (context().currentAlt().exceptionSpec != null) { tool.error("Alternative already has an exception specification", grammar.getFilename(), context().block.getLine(), context().block.getColumn()); } else { context().currentAlt().exceptionSpec = currentExceptionSpec; } } currentExceptionSpec = null; } /** Called at the end of processing a grammar */ public void endGrammar() { if (grammarError) { abortGrammar(); } else { super.endGrammar(); } } public void endRule(String rule) { super.endRule(rule); BlockContext ctx = (BlockContext)blocks.pop(); // remove scope // record the start of this block in the ending node ruleEnd.block = ctx.block; ruleEnd.block.prepareForAnalysis(); //System.out.println(ctx.block); } public void endSubRule() { super.endSubRule(); nested--; // remove subrule context from scope stack BlockContext ctx = (BlockContext)blocks.pop(); AlternativeBlock block = ctx.block; // If the subrule is marked with ~, check that it is // a valid candidate for analysis if ( block.not && !(block instanceof SynPredBlock) && !(block instanceof ZeroOrMoreBlock) && !(block instanceof OneOrMoreBlock) ) { if (!analyzer.subruleCanBeInverted(block, grammar instanceof LexerGrammar)) { String newline = System.getProperty("line.separator"); tool.error( "This subrule cannot be inverted. Only subrules of the form:" + newline + " (T1|T2|T3...) or" + newline + " ('c1'|'c2'|'c3'...)" + newline + "may be inverted (ranges are also allowed).", grammar.getFilename(), block.getLine(), block.getColumn() ); } } // add the subrule as element if not a syn pred if (block instanceof SynPredBlock) { // record a reference to the recently-recognized syn pred in the // enclosing block. SynPredBlock synpred = (SynPredBlock)block; context().block.hasASynPred = true; context().currentAlt().synPred = synpred; grammar.hasSyntacticPredicate = true; synpred.removeTrackingOfRuleRefs(grammar); } else { addElementToCurrentAlt(block); } ctx.blockEnd.block.prepareForAnalysis(); } public void endTree() { super.endTree(); BlockContext ctx = (BlockContext)blocks.pop(); addElementToCurrentAlt(ctx.block); // add new TreeElement to enclosing alt. } /** Remember that a major error occured in the grammar */ public void hasError() { grammarError = true; } private void labelElement(AlternativeElement el, Token label) { if (label != null) { // Does this label already exist? for (int i = 0; i < ruleBlock.labeledElements.size(); i++) { AlternativeElement altEl = (AlternativeElement)ruleBlock.labeledElements.elementAt(i); String l = altEl.getLabel(); if (l != null && l.equals(label.getText())) { tool.error("Label '" + label.getText() + "' has already been defined", grammar.getFilename(), label.getLine(), label.getColumn()); return; } } // add this node to the list of labeled elements el.setLabel(label.getText()); ruleBlock.labeledElements.appendElement(el); } } public void noAutoGenSubRule() { context().block.setAutoGen(false); } public void oneOrMoreSubRule() { if (context().block.not) { tool.error("'~' cannot be applied to (...)* subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn()); } // create the right kind of object now that we know what that is // and switch the list of alternatives. Adjust the stack of blocks. // copy any init action also. OneOrMoreBlock b = new OneOrMoreBlock(grammar); setBlock(b, context().block); BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule blocks.push(new BlockContext()); context().block = b; context().blockEnd = old.blockEnd; context().blockEnd.block = b; } public void optionalSubRule() { if (context().block.not) { tool.error("'~' cannot be applied to (...)? subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn()); } // convert (X)? -> (X|) so that we can ignore optional blocks altogether! // It already thinks that we have a simple subrule, just add option block. beginAlt(false); endAlt(); } public void refAction(Token action) { super.refAction(action); context().block.hasAnAction = true; addElementToCurrentAlt(new ActionElement(grammar, action)); } public void setUserExceptions(String thr) { ((RuleBlock)context().block).throwsSpec = thr; } // Only called for rule blocks public void refArgAction(Token action) { ((RuleBlock)context().block).argAction = action.getText(); } public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) { if (!(grammar instanceof LexerGrammar)) { tool.error("Character literal only valid in lexer", grammar.getFilename(), lit.getLine(), lit.getColumn()); return; } super.refCharLiteral(lit, label, inverted, autoGenType, lastInRule); CharLiteralElement cl = new CharLiteralElement((LexerGrammar)grammar, lit, inverted, autoGenType); // Generate a warning for non-lowercase ASCII when case-insensitive if ( !((LexerGrammar)grammar).caseSensitive && cl.getType() < 128 && Character.toLowerCase((char)cl.getType()) != (char)cl.getType() ) { tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), lit.getLine(), lit.getColumn()); } addElementToCurrentAlt(cl); labelElement(cl, label); // if ignore option is set, must add an optional call to the specified rule. String ignore = ruleBlock.getIgnoreRule(); if (!lastInRule && ignore != null) { addElementToCurrentAlt(createOptionalRuleRef(ignore, lit)); } } public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) { if (!(grammar instanceof LexerGrammar)) { tool.error("Character range only valid in lexer", grammar.getFilename(), t1.getLine(), t1.getColumn()); return; } int rangeMin = ANTLRLexer.tokenTypeForCharLiteral(t1.getText()); int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(t2.getText()); if (rangeMax < rangeMin) { tool.error("Malformed range.", grammar.getFilename(), t1.getLine(), t1.getColumn()); return; } // Generate a warning for non-lowercase ASCII when case-insensitive if (!((LexerGrammar)grammar).caseSensitive) { if (rangeMin < 128 && Character.toLowerCase((char)rangeMin) != (char)rangeMin) { tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), t1.getLine(), t1.getColumn()); } if (rangeMax < 128 && Character.toLowerCase((char)rangeMax) != (char)rangeMax) { tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), t2.getLine(), t2.getColumn()); } } super.refCharRange(t1, t2, label, autoGenType, lastInRule); CharRangeElement cr = new CharRangeElement((LexerGrammar)grammar, t1, t2, autoGenType); addElementToCurrentAlt(cr); labelElement(cr, label); // if ignore option is set, must add an optional call to the specified rule. String ignore = ruleBlock.getIgnoreRule(); if (!lastInRule && ignore != null) { addElementToCurrentAlt(createOptionalRuleRef(ignore, t1)); } } public void refTokensSpecElementOption(Token tok, Token option, Token value) { /* System.out.println("setting tokens spec option for "+tok.getText()); System.out.println(option.getText()+","+value.getText()); */ TokenSymbol ts = (TokenSymbol) grammar.tokenManager.getTokenSymbol(tok.getText()); if (ts == null) { tool.panic("cannot find " + tok.getText() + "in tokens {...}"); } if (option.getText().equals("AST")) { ts.setASTNodeType(value.getText()); } else { grammar.antlrTool.error("invalid tokens {...} element option:" + option.getText(), grammar.getFilename(), option.getLine(), option.getColumn()); } } public void refElementOption(Token option, Token value) { /* System.out.println("setting option for "+context().currentElement()); System.out.println(option.getText()+","+value.getText()); */ AlternativeElement e = context().currentElement(); if (e instanceof StringLiteralElement || e instanceof TokenRefElement || e instanceof WildcardElement) { ((GrammarAtom)e).setOption(option, value); } else { tool.error("cannot use element option (" + option.getText() + ") for this kind of element", grammar.getFilename(), option.getLine(), option.getColumn()); } } /** Add an exception handler to an exception spec */ public void refExceptionHandler(Token exTypeAndName, Token action) { super.refExceptionHandler(exTypeAndName, action); if (currentExceptionSpec == null) { tool.panic("exception handler processing internal error"); } currentExceptionSpec.addHandler(new ExceptionHandler(exTypeAndName, action)); } public void refInitAction(Token action) { super.refAction(action); context().block.setInitAction(action.getText()); } public void refMemberAction(Token act) { grammar.classMemberAction = act; } public void refPreambleAction(Token act) { super.refPreambleAction(act); } // Only called for rule blocks public void refReturnAction(Token returnAction) { if (grammar instanceof LexerGrammar) { String name = CodeGenerator.encodeLexerRuleName(((RuleBlock)context().block).getRuleName()); RuleSymbol rs = (RuleSymbol)grammar.getSymbol(name); if (rs.access.equals("public")) { tool.warning("public Lexical rules cannot specify return type", grammar.getFilename(), returnAction.getLine(), returnAction.getColumn()); return; } } ((RuleBlock)context().block).returnAction = returnAction.getText(); } public void refRule(Token idAssign, Token r, Token label, Token args, int autoGenType) { // Disallow parser rule references in the lexer if (grammar instanceof LexerGrammar) { // if (!Character.isUpperCase(r.getText().charAt(0))) { if (r.type != ANTLRTokenTypes.TOKEN_REF) { tool.error("Parser rule " + r.getText() + " referenced in lexer"); return; } if (autoGenType == GrammarElement.AUTO_GEN_CARET) { tool.error("AST specification ^ not allowed in lexer", grammar.getFilename(), r.getLine(), r.getColumn()); } } super.refRule(idAssign, r, label, args, autoGenType); lastRuleRef = new RuleRefElement(grammar, r, autoGenType); if (args != null) { lastRuleRef.setArgs(args.getText()); } if (idAssign != null) { lastRuleRef.setIdAssign(idAssign.getText()); } addElementToCurrentAlt(lastRuleRef); String id = r.getText(); // if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule? if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule? id = CodeGenerator.encodeLexerRuleName(id); } // update symbol table so it knows what nodes reference the rule. RuleSymbol rs = (RuleSymbol)grammar.getSymbol(id); rs.addReference(lastRuleRef); labelElement(lastRuleRef, label); } public void refSemPred(Token pred) { //System.out.println("refSemPred "+pred.getText()); super.refSemPred(pred); //System.out.println("context().block: "+context().block); if (context().currentAlt().atStart()) { context().currentAlt().semPred = pred.getText(); } else { ActionElement a = new ActionElement(grammar, pred); a.isSemPred = true; addElementToCurrentAlt(a); } //System.out.println("DONE refSemPred "+pred.getText()); } public void refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) { super.refStringLiteral(lit, label, autoGenType, lastInRule); if (grammar instanceof TreeWalkerGrammar && autoGenType == GrammarElement.AUTO_GEN_CARET) { tool.error("^ not allowed in here for tree-walker", grammar.getFilename(), lit.getLine(), lit.getColumn()); } StringLiteralElement sl = new StringLiteralElement(grammar, lit, autoGenType); // If case-insensitive, then check each char of the stirng literal if (grammar instanceof LexerGrammar && !((LexerGrammar)grammar).caseSensitive) { for (int i = 1; i < lit.getText().length() - 1; i++) { char c = lit.getText().charAt(i); if (c < 128 && Character.toLowerCase(c) != c) { tool.warning("Characters of string literal must be lowercase when caseSensitive=false", grammar.getFilename(), lit.getLine(), lit.getColumn()); break; } } } addElementToCurrentAlt(sl); labelElement(sl, label); // if ignore option is set, must add an optional call to the specified rule. String ignore = ruleBlock.getIgnoreRule(); if (!lastInRule && ignore != null) { addElementToCurrentAlt(createOptionalRuleRef(ignore, lit)); } } public void refToken(Token idAssign, Token t, Token label, Token args, boolean inverted, int autoGenType, boolean lastInRule) { if (grammar instanceof LexerGrammar) { // In lexer, token references are really rule references if (autoGenType == GrammarElement.AUTO_GEN_CARET) { tool.error("AST specification ^ not allowed in lexer", grammar.getFilename(), t.getLine(), t.getColumn()); } if (inverted) { tool.error("~TOKEN is not allowed in lexer", grammar.getFilename(), t.getLine(), t.getColumn()); } refRule(idAssign, t, label, args, autoGenType); // if ignore option is set, must add an optional call to the specified token rule. String ignore = ruleBlock.getIgnoreRule(); if (!lastInRule && ignore != null) { addElementToCurrentAlt(createOptionalRuleRef(ignore, t)); } } else { // Cannot have token ref args or assignment outside of lexer if (idAssign != null) { tool.error("Assignment from token reference only allowed in lexer", grammar.getFilename(), idAssign.getLine(), idAssign.getColumn()); } if (args != null) { tool.error("Token reference arguments only allowed in lexer", grammar.getFilename(), args.getLine(), args.getColumn()); } super.refToken(idAssign, t, label, args, inverted, autoGenType, lastInRule); TokenRefElement te = new TokenRefElement(grammar, t, inverted, autoGenType); addElementToCurrentAlt(te); labelElement(te, label); } } public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) { if (grammar instanceof LexerGrammar) { tool.error("Token range not allowed in lexer", grammar.getFilename(), t1.getLine(), t1.getColumn()); return; } super.refTokenRange(t1, t2, label, autoGenType, lastInRule); TokenRangeElement tr = new TokenRangeElement(grammar, t1, t2, autoGenType); if (tr.end < tr.begin) { tool.error("Malformed range.", grammar.getFilename(), t1.getLine(), t1.getColumn()); return; } addElementToCurrentAlt(tr); labelElement(tr, label); } public void refTreeSpecifier(Token treeSpec) { context().currentAlt().treeSpecifier = treeSpec; } public void refWildcard(Token t, Token label, int autoGenType) { super.refWildcard(t, label, autoGenType); WildcardElement wc = new WildcardElement(grammar, t, autoGenType); addElementToCurrentAlt(wc); labelElement(wc, label); } /** Get ready to process a new grammar */ public void reset() { super.reset(); blocks = new LList(); lastRuleRef = null; ruleEnd = null; ruleBlock = null; nested = 0; currentExceptionSpec = null; grammarError = false; } public void setArgOfRuleRef(Token argAction) { super.setArgOfRuleRef(argAction); lastRuleRef.setArgs(argAction.getText()); } public static void setBlock(AlternativeBlock b, AlternativeBlock src) { b.setAlternatives(src.getAlternatives()); b.initAction = src.initAction; //b.lookaheadDepth = src.lookaheadDepth; b.label = src.label; b.hasASynPred = src.hasASynPred; b.hasAnAction = src.hasAnAction; b.warnWhenFollowAmbig = src.warnWhenFollowAmbig; b.generateAmbigWarnings = src.generateAmbigWarnings; b.line = src.line; b.greedy = src.greedy; b.greedySet = src.greedySet; } public void setRuleOption(Token key, Token value) { //((RuleBlock)context().block).setOption(key, value); ruleBlock.setOption(key, value); } public void setSubruleOption(Token key, Token value) { ((AlternativeBlock)context().block).setOption(key, value); } public void synPred() { if (context().block.not) { tool.error("'~' cannot be applied to syntactic predicate", grammar.getFilename(), context().block.getLine(), context().block.getColumn()); } // create the right kind of object now that we know what that is // and switch the list of alternatives. Adjust the stack of blocks. // copy any init action also. SynPredBlock b = new SynPredBlock(grammar); setBlock(b, context().block); BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule blocks.push(new BlockContext()); context().block = b; context().blockEnd = old.blockEnd; context().blockEnd.block = b; } public void zeroOrMoreSubRule() { if (context().block.not) { tool.error("'~' cannot be applied to (...)+ subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn()); } // create the right kind of object now that we know what that is // and switch the list of alternatives. Adjust the stack of blocks. // copy any init action also. ZeroOrMoreBlock b = new ZeroOrMoreBlock(grammar); setBlock(b, context().block); BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule blocks.push(new BlockContext()); context().block = b; context().blockEnd = old.blockEnd; context().blockEnd.block = b; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/Makefile.in000077500000000000000000000314521161462365500231730ustar00rootroot00000000000000##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx @stdvars@ ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ## do not change this value subdir=antlr ## compile java files all: @ANTLR_JAR@ ## antlr_java_action_FILES = \ actions/java/ActionLexer.java \ actions/java/ActionLexerTokenTypes.java \ $(eol) ## antlr_cpp_action_FILES = \ actions/cpp/ActionLexer.java \ actions/cpp/ActionLexerTokenTypes.java \ $(eol) ## antlr_csharp_action_FILES = \ actions/csharp/ActionLexer.java \ actions/csharp/ActionLexerTokenTypes.java \ $(eol) ## antlr_python_action_FILES = \ actions/python/ActionLexer.java \ actions/python/ActionLexerTokenTypes.java \ $(eol) ## antlr_python_code_FILES = \ actions/python/CodeLexer.java \ actions/python/CodeLexerTokenTypes.java \ $(eol) ## antlr_antlr_FILES = \ ANTLRParser.java \ ANTLRTokenTypes.java \ ANTLRLexer.java \ $(eol) ## antlr_tokdef_FILES = \ ANTLRTokdefParser.java \ ANTLRTokdefLexer.java \ ANTLRTokdefParserTokenTypes.java \ $(eol) ## all actions files antlr_action_FILES = \ $(antlr_java_action_FILES) \ $(antlr_cpp_action_FILES) \ $(antlr_csharp_action_FILES) \ $(antlr_python_action_FILES) \ $(antlr_python_code_FILES) \ $(eol) ## all generated files antlr_gen_FILES = \ $(antlr_antlr_FILES) \ $(antlr_tokdef_FILES) \ $(antlr_action_FILES) \ $(eol) ## all java files antlr_java1_FILES = \ Version.java \ $(antlr_gen_FILES) \ @abs_top_srcdir@/antlr/ANTLRError.java \ @abs_top_srcdir@/antlr/ANTLRException.java \ @abs_top_srcdir@/antlr/ANTLRGrammarParseBehavior.java \ @abs_top_srcdir@/antlr/ANTLRHashString.java \ @abs_top_srcdir@/antlr/ANTLRStringBuffer.java \ @abs_top_srcdir@/antlr/ASTFactory.java \ @abs_top_srcdir@/antlr/ASTIterator.java \ @abs_top_srcdir@/antlr/ASTNULLType.java \ @abs_top_srcdir@/antlr/ASTPair.java \ @abs_top_srcdir@/antlr/ASTVisitor.java \ @abs_top_srcdir@/antlr/ActionElement.java \ @abs_top_srcdir@/antlr/ActionTransInfo.java \ @abs_top_srcdir@/antlr/Alternative.java \ @abs_top_srcdir@/antlr/AlternativeBlock.java \ @abs_top_srcdir@/antlr/AlternativeElement.java \ @abs_top_srcdir@/antlr/BaseAST.java \ @abs_top_srcdir@/antlr/BlockContext.java \ @abs_top_srcdir@/antlr/BlockEndElement.java \ @abs_top_srcdir@/antlr/BlockWithImpliedExitPath.java \ @abs_top_srcdir@/antlr/ByteBuffer.java \ @abs_top_srcdir@/antlr/BooBlockFinishingInfo.java \ @abs_top_srcdir@/antlr/BooCharFormatter.java \ @abs_top_srcdir@/antlr/BooCodeGenerator.java \ @abs_top_srcdir@/antlr/BooNameSpace.java \ @abs_top_srcdir@/antlr/CSharpBlockFinishingInfo.java \ @abs_top_srcdir@/antlr/CSharpCharFormatter.java \ @abs_top_srcdir@/antlr/CSharpCodeGenerator.java \ @abs_top_srcdir@/antlr/CSharpNameSpace.java \ @abs_top_srcdir@/antlr/CharBuffer.java \ @abs_top_srcdir@/antlr/CharFormatter.java \ @abs_top_srcdir@/antlr/CharLiteralElement.java \ @abs_top_srcdir@/antlr/CharQueue.java \ @abs_top_srcdir@/antlr/CharRangeElement.java \ @abs_top_srcdir@/antlr/CharScanner.java \ @abs_top_srcdir@/antlr/CharStreamException.java \ @abs_top_srcdir@/antlr/CharStreamIOException.java \ @abs_top_srcdir@/antlr/CodeGenerator.java \ @abs_top_srcdir@/antlr/CommonAST.java \ @abs_top_srcdir@/antlr/CommonASTWithHiddenTokens.java \ @abs_top_srcdir@/antlr/CommonHiddenStreamToken.java \ @abs_top_srcdir@/antlr/CommonToken.java \ @abs_top_srcdir@/antlr/CppBlockFinishingInfo.java \ @abs_top_srcdir@/antlr/CppCharFormatter.java \ @abs_top_srcdir@/antlr/CppCodeGenerator.java \ @abs_top_srcdir@/antlr/DefaultFileLineFormatter.java \ @abs_top_srcdir@/antlr/DefaultToolErrorHandler.java \ @abs_top_srcdir@/antlr/DefineGrammarSymbols.java \ @abs_top_srcdir@/antlr/DiagnosticCodeGenerator.java \ @abs_top_srcdir@/antlr/DocBookCodeGenerator.java \ @abs_top_srcdir@/antlr/DumpASTVisitor.java \ @abs_top_srcdir@/antlr/ExceptionHandler.java \ @abs_top_srcdir@/antlr/ExceptionSpec.java \ @abs_top_srcdir@/antlr/FileCopyException.java \ @abs_top_srcdir@/antlr/FileLineFormatter.java \ @abs_top_srcdir@/antlr/Grammar.java \ @abs_top_srcdir@/antlr/GrammarAnalyzer.java \ @abs_top_srcdir@/antlr/GrammarAtom.java \ @abs_top_srcdir@/antlr/GrammarElement.java \ @abs_top_srcdir@/antlr/GrammarSymbol.java \ @abs_top_srcdir@/antlr/HTMLCodeGenerator.java \ @abs_top_srcdir@/antlr/ImportVocabTokenManager.java \ @abs_top_srcdir@/antlr/InputBuffer.java \ @abs_top_srcdir@/antlr/JavaBlockFinishingInfo.java \ @abs_top_srcdir@/antlr/JavaCharFormatter.java \ @abs_top_srcdir@/antlr/JavaCodeGenerator.java \ @abs_top_srcdir@/antlr/LLkAnalyzer.java \ @abs_top_srcdir@/antlr/LLkGrammarAnalyzer.java \ @abs_top_srcdir@/antlr/LLkParser.java \ @abs_top_srcdir@/antlr/LexerGrammar.java \ @abs_top_srcdir@/antlr/LexerSharedInputState.java \ @abs_top_srcdir@/antlr/Lookahead.java \ @abs_top_srcdir@/antlr/MakeGrammar.java \ @abs_top_srcdir@/antlr/MismatchedCharException.java \ @abs_top_srcdir@/antlr/MismatchedTokenException.java \ @abs_top_srcdir@/antlr/NameSpace.java \ @abs_top_srcdir@/antlr/NoViableAltException.java \ @abs_top_srcdir@/antlr/NoViableAltForCharException.java \ @abs_top_srcdir@/antlr/OneOrMoreBlock.java \ @abs_top_srcdir@/antlr/ParseTree.java \ @abs_top_srcdir@/antlr/ParseTreeRule.java \ @abs_top_srcdir@/antlr/ParseTreeToken.java \ @abs_top_srcdir@/antlr/Parser.java \ @abs_top_srcdir@/antlr/ParserGrammar.java \ @abs_top_srcdir@/antlr/ParserSharedInputState.java \ @abs_top_srcdir@/antlr/PreservingFileWriter.java \ @abs_top_srcdir@/antlr/PythonBlockFinishingInfo.java \ @abs_top_srcdir@/antlr/PythonCharFormatter.java \ @abs_top_srcdir@/antlr/PythonCodeGenerator.java \ @abs_top_srcdir@/antlr/RecognitionException.java \ @abs_top_srcdir@/antlr/RuleBlock.java \ @abs_top_srcdir@/antlr/RuleEndElement.java \ @abs_top_srcdir@/antlr/RuleRefElement.java \ @abs_top_srcdir@/antlr/RuleSymbol.java \ @abs_top_srcdir@/antlr/SemanticException.java \ @abs_top_srcdir@/antlr/SimpleTokenManager.java \ @abs_top_srcdir@/antlr/StringLiteralElement.java \ @abs_top_srcdir@/antlr/StringLiteralSymbol.java \ @abs_top_srcdir@/antlr/StringUtils.java \ @abs_top_srcdir@/antlr/SynPredBlock.java \ @abs_top_srcdir@/antlr/Token.java \ @abs_top_srcdir@/antlr/TokenBuffer.java \ @abs_top_srcdir@/antlr/TokenManager.java \ @abs_top_srcdir@/antlr/TokenQueue.java \ @abs_top_srcdir@/antlr/TokenRangeElement.java \ @abs_top_srcdir@/antlr/TokenRefElement.java \ @abs_top_srcdir@/antlr/TokenStream.java \ @abs_top_srcdir@/antlr/TokenStreamBasicFilter.java \ @abs_top_srcdir@/antlr/TokenStreamException.java \ @abs_top_srcdir@/antlr/TokenStreamHiddenTokenFilter.java \ @abs_top_srcdir@/antlr/TokenStreamIOException.java \ @abs_top_srcdir@/antlr/TokenStreamRecognitionException.java \ @abs_top_srcdir@/antlr/TokenStreamRetryException.java \ @abs_top_srcdir@/antlr/TokenStreamRewriteEngine.java \ @abs_top_srcdir@/antlr/TokenStreamSelector.java \ @abs_top_srcdir@/antlr/TokenSymbol.java \ @abs_top_srcdir@/antlr/TokenWithIndex.java \ @abs_top_srcdir@/antlr/Tool.java \ @abs_top_srcdir@/antlr/ToolErrorHandler.java \ @abs_top_srcdir@/antlr/TreeBlockContext.java \ @abs_top_srcdir@/antlr/TreeElement.java \ @abs_top_srcdir@/antlr/TreeParser.java \ @abs_top_srcdir@/antlr/TreeParserSharedInputState.java \ @abs_top_srcdir@/antlr/TreeSpecifierNode.java \ @abs_top_srcdir@/antlr/TreeWalkerGrammar.java \ @abs_top_srcdir@/antlr/WildcardElement.java \ @abs_top_srcdir@/antlr/ZeroOrMoreBlock.java \ @abs_top_srcdir@/antlr/build/ANTLR.java \ @abs_top_srcdir@/antlr/build/StreamScarfer.java \ @abs_top_srcdir@/antlr/build/Tool.java \ @abs_top_srcdir@/antlr/collections/AST.java \ @abs_top_srcdir@/antlr/collections/ASTEnumeration.java \ @abs_top_srcdir@/antlr/collections/Enumerator.java \ @abs_top_srcdir@/antlr/collections/List.java \ @abs_top_srcdir@/antlr/collections/Stack.java \ @abs_top_srcdir@/antlr/collections/impl/ASTArray.java \ @abs_top_srcdir@/antlr/collections/impl/ASTEnumerator.java \ @abs_top_srcdir@/antlr/collections/impl/BitSet.java \ @abs_top_srcdir@/antlr/collections/impl/IndexedVector.java \ @abs_top_srcdir@/antlr/collections/impl/IntRange.java \ @abs_top_srcdir@/antlr/collections/impl/LLCell.java \ @abs_top_srcdir@/antlr/collections/impl/LLEnumeration.java \ @abs_top_srcdir@/antlr/collections/impl/LList.java \ @abs_top_srcdir@/antlr/collections/impl/Vector.java \ @abs_top_srcdir@/antlr/collections/impl/VectorEnumeration.java \ @abs_top_srcdir@/antlr/collections/impl/VectorEnumerator.java \ @abs_top_srcdir@/antlr/debug/DebuggingCharScanner.java \ @abs_top_srcdir@/antlr/debug/DebuggingInputBuffer.java \ @abs_top_srcdir@/antlr/debug/DebuggingParser.java \ @abs_top_srcdir@/antlr/debug/Event.java \ @abs_top_srcdir@/antlr/debug/GuessingEvent.java \ @abs_top_srcdir@/antlr/debug/InputBufferAdapter.java \ @abs_top_srcdir@/antlr/debug/InputBufferEvent.java \ @abs_top_srcdir@/antlr/debug/InputBufferEventSupport.java \ @abs_top_srcdir@/antlr/debug/InputBufferListener.java \ @abs_top_srcdir@/antlr/debug/InputBufferReporter.java \ @abs_top_srcdir@/antlr/debug/LLkDebuggingParser.java \ @abs_top_srcdir@/antlr/debug/ListenerBase.java \ @abs_top_srcdir@/antlr/debug/MessageAdapter.java \ @abs_top_srcdir@/antlr/debug/MessageEvent.java \ @abs_top_srcdir@/antlr/debug/MessageListener.java \ @abs_top_srcdir@/antlr/debug/NewLineEvent.java \ @abs_top_srcdir@/antlr/debug/NewLineListener.java \ @abs_top_srcdir@/antlr/debug/ParseTreeDebugParser.java \ @abs_top_srcdir@/antlr/debug/ParserAdapter.java \ @abs_top_srcdir@/antlr/debug/ParserController.java \ @abs_top_srcdir@/antlr/debug/ParserEventSupport.java \ @abs_top_srcdir@/antlr/debug/ParserListener.java \ @abs_top_srcdir@/antlr/debug/ParserMatchAdapter.java \ @abs_top_srcdir@/antlr/debug/ParserMatchEvent.java \ @abs_top_srcdir@/antlr/debug/ParserMatchListener.java \ @abs_top_srcdir@/antlr/debug/ParserReporter.java \ @abs_top_srcdir@/antlr/debug/ParserTokenAdapter.java \ @abs_top_srcdir@/antlr/debug/ParserTokenEvent.java \ @abs_top_srcdir@/antlr/debug/ParserTokenListener.java \ @abs_top_srcdir@/antlr/debug/SemanticPredicateAdapter.java \ @abs_top_srcdir@/antlr/debug/SemanticPredicateEvent.java \ @abs_top_srcdir@/antlr/debug/SemanticPredicateListener.java \ @abs_top_srcdir@/antlr/debug/SyntacticPredicateAdapter.java \ @abs_top_srcdir@/antlr/debug/SyntacticPredicateEvent.java \ @abs_top_srcdir@/antlr/debug/SyntacticPredicateListener.java \ @abs_top_srcdir@/antlr/debug/TraceAdapter.java \ @abs_top_srcdir@/antlr/debug/TraceEvent.java \ @abs_top_srcdir@/antlr/debug/TraceListener.java \ @abs_top_srcdir@/antlr/debug/Tracer.java \ @abs_top_srcdir@/antlr/debug/misc/ASTFrame.java \ @abs_top_srcdir@/antlr/preprocessor/Grammar.java \ @abs_top_srcdir@/antlr/preprocessor/GrammarFile.java \ @abs_top_srcdir@/antlr/preprocessor/Hierarchy.java \ @abs_top_srcdir@/antlr/preprocessor/Option.java \ @abs_top_srcdir@/antlr/preprocessor/Preprocessor.java \ @abs_top_srcdir@/antlr/preprocessor/PreprocessorLexer.java \ @abs_top_srcdir@/antlr/preprocessor/PreprocessorTokenTypes.java \ @abs_top_srcdir@/antlr/preprocessor/Rule.java \ @abs_top_srcdir@/antlr/preprocessor/Tool.java \ $(eol) antlr_swing_FILES = \ @abs_top_srcdir@/antlr/debug/misc/JTreeASTModel.java \ @abs_top_srcdir@/antlr/debug/misc/JTreeASTPanel.java \ $(eol) ## damned ugly hack .. ifeq (gcj,@javac@) antlr_java_FILES = $(antlr_java1_FILES) else antlr_java_FILES = $(antlr_java1_FILES) $(antlr_swing_FILES) endif ## rule how to compile java files @ANTLR_JAR@ :: $(antlr_java_FILES) @ -rm -f $@ @ @JAVA_COMPILE_CMD@ $? @ @JAR_CMD@ $@ antlr @ test -f $@ || { exit 1; } ## how ANTLR grammar files .. $(antlr_java_action_FILES) : @abs_top_srcdir@/antlr/actions/java/action.g @cd actions/java && @ANTLR_COMPILE_CMD@ $< $(antlr_cpp_action_FILES) : @abs_top_srcdir@/antlr/actions/cpp/action.g @cd actions/cpp && @ANTLR_COMPILE_CMD@ $< $(antlr_csharp_action_FILES) : @abs_top_srcdir@/antlr/actions/csharp/action.g @cd actions/csharp && @ANTLR_COMPILE_CMD@ $< $(antlr_python_action_FILES) : @abs_top_srcdir@/antlr/actions/python/action.g @cd actions/python && @ANTLR_COMPILE_CMD@ $< $(antlr_python_code_FILES) : @abs_top_srcdir@/antlr/actions/python/code.g @cd actions/python && @ANTLR_COMPILE_CMD@ $< $(antlr_antlr_FILES) : @abs_top_srcdir@/antlr/antlr.g @ANTLR_COMPILE_CMD@ $< $(antlr_tokdef_FILES) : @abs_top_srcdir@/antlr/tokdef.g @ANTLR_COMPILE_CMD@ $< clean :: rm -rf *.class antlr @ANTLR_JAR@ distclean :: clean rm -rf $(antlr_gen_FILES) ## install antlr.jar in libdirectory if exists install :: @ANTLR_JAR@ -$(MKDIR) -p "$(libdir)" -$(MKDIR) -p "$(datadir)/$(versioneddir)" -test -f @ANTLR_JAR@ && {\ $(INSTALL) -m 444 @ANTLR_JAR@ "$(datadir)/$(versioneddir)" ; \ $(INSTALL) -m 444 @ANTLR_JAR@ "$(libdir)" ; \ } ## get configured dependencies @stddeps@ nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/MismatchedCharException.java000066400000000000000000000105761161462365500265250ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/MismatchedCharException.java#1 $ */ import antlr.collections.impl.BitSet; public class MismatchedCharException extends RecognitionException { // Types of chars public static final int CHAR = 1; public static final int NOT_CHAR = 2; public static final int RANGE = 3; public static final int NOT_RANGE = 4; public static final int SET = 5; public static final int NOT_SET = 6; // One of the above public int mismatchType; // what was found on the input stream public int foundChar; // For CHAR/NOT_CHAR and RANGE/NOT_RANGE public int expecting; // For RANGE/NOT_RANGE (expecting is lower bound of range) public int upper; // For SET/NOT_SET public BitSet set; // who knows...they may want to ask scanner questions public CharScanner scanner; /** * MismatchedCharException constructor comment. */ public MismatchedCharException() { super("Mismatched char"); } // Expected range / not range public MismatchedCharException(char c, char lower, char upper_, boolean matchNot, CharScanner scanner_) { super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn()); mismatchType = matchNot ? NOT_RANGE : RANGE; foundChar = c; expecting = lower; upper = upper_; scanner = scanner_; } // Expected token / not token public MismatchedCharException(char c, char expecting_, boolean matchNot, CharScanner scanner_) { super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn()); mismatchType = matchNot ? NOT_CHAR : CHAR; foundChar = c; expecting = expecting_; scanner = scanner_; } // Expected BitSet / not BitSet public MismatchedCharException(char c, BitSet set_, boolean matchNot, CharScanner scanner_) { super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn()); mismatchType = matchNot ? NOT_SET : SET; foundChar = c; set = set_; scanner = scanner_; } /** * Returns a clean error message (no line number/column information) */ public String getMessage() { StringBuffer sb = new StringBuffer(); switch (mismatchType) { case CHAR: sb.append("expecting "); appendCharName(sb, expecting); sb.append(", found "); appendCharName(sb, foundChar); break; case NOT_CHAR: sb.append("expecting anything but '"); appendCharName(sb, expecting); sb.append("'; got it anyway"); break; case RANGE: case NOT_RANGE: sb.append("expecting token "); if (mismatchType == NOT_RANGE) sb.append("NOT "); sb.append("in range: "); appendCharName(sb, expecting); sb.append(".."); appendCharName(sb, upper); sb.append(", found "); appendCharName(sb, foundChar); break; case SET: case NOT_SET: sb.append("expecting " + (mismatchType == NOT_SET ? "NOT " : "") + "one of ("); int[] elems = set.toArray(); for (int i = 0; i < elems.length; i++) { appendCharName(sb, elems[i]); } sb.append("), found "); appendCharName(sb, foundChar); break; default : sb.append(super.getMessage()); break; } return sb.toString(); } /** Append a char to the msg buffer. If special, * then show escaped version */ private void appendCharName(StringBuffer sb, int c) { switch (c) { case 65535 : // 65535 = (char) -1 = EOF sb.append("''"); break; case '\n' : sb.append("'\\n'"); break; case '\r' : sb.append("'\\r'"); break; case '\t' : sb.append("'\\t'"); break; default : sb.append('\''); sb.append((char) c); sb.append('\''); break; } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/MismatchedTokenException.java000066400000000000000000000137061161462365500267260ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/MismatchedTokenException.java#1 $ */ import antlr.collections.impl.BitSet; import antlr.collections.AST; public class MismatchedTokenException extends RecognitionException { // Token names array for formatting String[] tokenNames; // The token that was encountered public Token token; // The offending AST node if tree walking public AST node; String tokenText = null; // taken from node or token object // Types of tokens public static final int TOKEN = 1; public static final int NOT_TOKEN = 2; public static final int RANGE = 3; public static final int NOT_RANGE = 4; public static final int SET = 5; public static final int NOT_SET = 6; // One of the above public int mismatchType; // For TOKEN/NOT_TOKEN and RANGE/NOT_RANGE public int expecting; // For RANGE/NOT_RANGE (expecting is lower bound of range) public int upper; // For SET/NOT_SET public BitSet set; /** Looking for AST wildcard, didn't find it */ public MismatchedTokenException() { super("Mismatched Token: expecting any AST node", "", -1, -1); } // Expected range / not range public MismatchedTokenException(String[] tokenNames_, AST node_, int lower, int upper_, boolean matchNot) { super("Mismatched Token", "", node_==null? -1:node_.getLine(), node_==null? -1:node_.getColumn()); tokenNames = tokenNames_; node = node_; if (node_ == null) { tokenText = ""; } else { tokenText = node_.toString(); } mismatchType = matchNot ? NOT_RANGE : RANGE; expecting = lower; upper = upper_; } // Expected token / not token public MismatchedTokenException(String[] tokenNames_, AST node_, int expecting_, boolean matchNot) { super("Mismatched Token", "", node_==null? -1:node_.getLine(), node_==null? -1:node_.getColumn()); tokenNames = tokenNames_; node = node_; if (node_ == null) { tokenText = ""; } else { tokenText = node_.toString(); } mismatchType = matchNot ? NOT_TOKEN : TOKEN; expecting = expecting_; } // Expected BitSet / not BitSet public MismatchedTokenException(String[] tokenNames_, AST node_, BitSet set_, boolean matchNot) { super("Mismatched Token", "", node_==null? -1:node_.getLine(), node_==null? -1:node_.getColumn()); tokenNames = tokenNames_; node = node_; if (node_ == null) { tokenText = ""; } else { tokenText = node_.toString(); } mismatchType = matchNot ? NOT_SET : SET; set = set_; } // Expected range / not range public MismatchedTokenException(String[] tokenNames_, Token token_, int lower, int upper_, boolean matchNot, String fileName_) { super("Mismatched Token", fileName_, token_.getLine(), token_.getColumn()); tokenNames = tokenNames_; token = token_; tokenText = token_.getText(); mismatchType = matchNot ? NOT_RANGE : RANGE; expecting = lower; upper = upper_; } // Expected token / not token public MismatchedTokenException(String[] tokenNames_, Token token_, int expecting_, boolean matchNot, String fileName_) { super("Mismatched Token", fileName_, token_.getLine(), token_.getColumn()); tokenNames = tokenNames_; token = token_; tokenText = token_.getText(); mismatchType = matchNot ? NOT_TOKEN : TOKEN; expecting = expecting_; } // Expected BitSet / not BitSet public MismatchedTokenException(String[] tokenNames_, Token token_, BitSet set_, boolean matchNot, String fileName_) { super("Mismatched Token", fileName_, token_.getLine(), token_.getColumn()); tokenNames = tokenNames_; token = token_; tokenText = token_.getText(); mismatchType = matchNot ? NOT_SET : SET; set = set_; } /** * Returns a clean error message (no line number/column information) */ public String getMessage() { StringBuffer sb = new StringBuffer(); switch (mismatchType) { case TOKEN: sb.append("expecting " + tokenName(expecting) + ", found '" + tokenText + "'"); break; case NOT_TOKEN: sb.append("expecting anything but " + tokenName(expecting) + "; got it anyway"); break; case RANGE: sb.append("expecting token in range: " + tokenName(expecting) + ".." + tokenName(upper) + ", found '" + tokenText + "'"); break; case NOT_RANGE: sb.append("expecting token NOT in range: " + tokenName(expecting) + ".." + tokenName(upper) + ", found '" + tokenText + "'"); break; case SET: case NOT_SET: sb.append("expecting " + (mismatchType == NOT_SET ? "NOT " : "") + "one of ("); int[] elems = set.toArray(); for (int i = 0; i < elems.length; i++) { sb.append(" "); sb.append(tokenName(elems[i])); } sb.append("), found '" + tokenText + "'"); break; default : sb.append(super.getMessage()); break; } return sb.toString(); } private String tokenName(int tokenType) { if (tokenType == Token.INVALID_TYPE) { return ""; } else if (tokenType < 0 || tokenType >= tokenNames.length) { return "<" + String.valueOf(tokenType) + ">"; } else { return tokenNames[tokenType]; } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/NameSpace.java000066400000000000000000000034051161462365500236170ustar00rootroot00000000000000package antlr; /** * ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * Container for a C++ namespace specification. Namespaces can be * nested, so this contains a vector of all the nested names. * * @author David Wagner (JPL/Caltech) 8-12-00 * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/NameSpace.java#1 $ */ import java.util.Vector; import java.util.Enumeration; import java.io.PrintWriter; import java.util.StringTokenizer; public class NameSpace { private Vector names = new Vector(); private String _name; public NameSpace(String name) { _name = new String(name); parse(name); } public String getName() { return _name; } /** * Parse a C++ namespace declaration into seperate names * splitting on :: We could easily parameterize this to make * the delimiter a language-specific parameter, or use subclasses * to support C++ namespaces versus java packages. -DAW */ protected void parse(String name) { StringTokenizer tok = new StringTokenizer(name, "::"); while (tok.hasMoreTokens()) names.addElement(tok.nextToken()); } /** * Method to generate the required C++ namespace declarations */ void emitDeclarations(PrintWriter out) { for (Enumeration n = names.elements(); n.hasMoreElements();) { String s = (String)n.nextElement(); out.println("ANTLR_BEGIN_NAMESPACE(" + s + ")"); } } /** * Method to generate the required C++ namespace closures */ void emitClosures(PrintWriter out) { for (int i = 0; i < names.size(); ++i) out.println("ANTLR_END_NAMESPACE"); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/NoViableAltException.java000066400000000000000000000022021161462365500257740ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/NoViableAltException.java#1 $ */ import antlr.collections.AST; public class NoViableAltException extends RecognitionException { public Token token; public AST node; // handles parsing and treeparsing public NoViableAltException(AST t) { super("NoViableAlt", "", t.getLine(), t.getColumn()); node = t; } public NoViableAltException(Token t, String fileName_) { super("NoViableAlt", fileName_, t.getLine(), t.getColumn()); token = t; } /** * Returns a clean error message (no line number/column information) */ public String getMessage() { if (token != null) { return "unexpected token: " + token.getText(); } // must a tree parser error if token==null if (node == TreeParser.ASTNULL) { return "unexpected end of subtree"; } return "unexpected AST node: " + node.toString(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/NoViableAltForCharException.java000066400000000000000000000032741161462365500272530ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/NoViableAltForCharException.java#1 $ */ public class NoViableAltForCharException extends RecognitionException { public char foundChar; public NoViableAltForCharException(char c, CharScanner scanner) { super("NoViableAlt", scanner.getFilename(), scanner.getLine(), scanner.getColumn()); foundChar = c; } /** @deprecated As of ANTLR 2.7.2 use {@see #NoViableAltForCharException(char, String, int, int) } */ public NoViableAltForCharException(char c, String fileName, int line) { this(c, fileName, line, -1); } public NoViableAltForCharException(char c, String fileName, int line, int column) { super("NoViableAlt", fileName, line, column); foundChar = c; } /** * Returns a clean error message (no line number/column information) */ public String getMessage() { String mesg = "unexpected char: "; // I'm trying to mirror a change in the C++ stuff. // But java seems to lack something convenient isprint-ish.. // actually we're kludging around unicode and non unicode savy // output stuff like most terms.. Basically one would want to // be able to tweak the generation of this message. if ((foundChar >= ' ') && (foundChar <= '~')) { mesg += '\''; mesg += foundChar; mesg += '\''; } else { mesg += "0x"+Integer.toHexString((int)foundChar).toUpperCase(); } return mesg; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/OneOrMoreBlock.java000066400000000000000000000012671161462365500246070ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/OneOrMoreBlock.java#1 $ */ class OneOrMoreBlock extends BlockWithImpliedExitPath { public OneOrMoreBlock(Grammar g) { super(g); } public OneOrMoreBlock(Grammar g, Token start) { super(g, start); } public void generate() { grammar.generator.gen(this); } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public String toString() { return super.toString() + "+"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ParseTree.java000066400000000000000000000024641161462365500236610ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html */ import antlr.*; import antlr.collections.AST; public abstract class ParseTree extends BaseAST { /** Walk parse tree and return requested number of derivation steps. * If steps <= 0, return node text. If steps == 1, return derivation * string at step. */ public String getLeftmostDerivationStep(int step) { if ( step<=0 ) { return toString(); } StringBuffer buf = new StringBuffer(2000); getLeftmostDerivation(buf, step); return buf.toString(); } public String getLeftmostDerivation(int maxSteps) { StringBuffer buf = new StringBuffer(2000); buf.append(" "+this.toString()); buf.append("\n"); for (int d=1; d"); buf.append(getLeftmostDerivationStep(d)); buf.append("\n"); } return buf.toString(); } /** Get derivation and return how many you did (less than requested for * subtree roots. */ protected abstract int getLeftmostDerivation(StringBuffer buf, int step); // just satisfy BaseAST interface; unused as we manually create nodes public void initialize(int i, String s) { } public void initialize(AST ast) { } public void initialize(Token token) { } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ParseTreeRule.java000066400000000000000000000033421161462365500245050ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html */ import antlr.Token; import antlr.collections.AST; public class ParseTreeRule extends ParseTree { public static final int INVALID_ALT = -1; protected String ruleName; protected int altNumber; // unused until I modify antlr to record this public ParseTreeRule(String ruleName) { this(ruleName,INVALID_ALT); } public ParseTreeRule(String ruleName, int altNumber) { this.ruleName = ruleName; this.altNumber = altNumber; } public String getRuleName() { return ruleName; } /** Do a step-first walk, building up a buffer of tokens until * you've reached a particular step and print out any rule subroots * insteads of descending. */ protected int getLeftmostDerivation(StringBuffer buf, int step) { int numReplacements = 0; if ( step<=0 ) { buf.append(' '); buf.append(toString()); return numReplacements; } AST child = getFirstChild(); numReplacements = 1; // walk child printing them out, descending into at most one while ( child!=null ) { if ( numReplacements>=step || child instanceof ParseTreeToken ) { buf.append(' '); buf.append(child.toString()); } else { // descend for at least one more derivation; update count int remainingReplacements = step-numReplacements; int n = ((ParseTree)child).getLeftmostDerivation(buf, remainingReplacements); numReplacements += n; } child = child.getNextSibling(); } return numReplacements; } public String toString() { if ( altNumber==INVALID_ALT ) { return '<'+ruleName+'>'; } else { return '<'+ruleName+"["+altNumber+"]>"; } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ParseTreeToken.java000066400000000000000000000011511161462365500246520ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html */ import antlr.Token; import antlr.collections.AST; public class ParseTreeToken extends ParseTree { protected Token token; public ParseTreeToken(Token token) { this.token = token; } protected int getLeftmostDerivation(StringBuffer buf, int step) { buf.append(' '); buf.append(toString()); return step; // did on replacements } public String toString() { if ( token!=null ) { return token.getText(); } return ""; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/Parser.java000066400000000000000000000311311161462365500232140ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/Parser.java#1 $ */ import antlr.collections.impl.BitSet; import antlr.collections.AST; import antlr.collections.impl.ASTArray; /**A generic ANTLR parser (LL(k) for k>=1) containing a bunch of * utility routines useful at any lookahead depth. We distinguish between * the LL(1) and LL(k) parsers because of efficiency. This may not be * necessary in the near future. * * Each parser object contains the state of the parse including a lookahead * cache (the form of which is determined by the subclass), whether or * not the parser is in guess mode, where tokens come from, etc... * *

* During guess mode, the current lookahead token(s) and token type(s) * cache must be saved because the token stream may not have been informed * to save the token (via mark) before the try block. * Guessing is started by: *

    *
  1. saving the lookahead cache. *
  2. marking the current position in the TokenBuffer. *
  3. increasing the guessing level. *
* * After guessing, the parser state is restored by: *
    *
  1. restoring the lookahead cache. *
  2. rewinding the TokenBuffer. *
  3. decreasing the guessing level. *
* * @see antlr.Token * @see antlr.TokenBuffer * @see antlr.LLkParser */ import java.io.IOException; import java.util.Hashtable; import antlr.debug.MessageListener; import antlr.debug.ParserListener; import antlr.debug.ParserMatchListener; import antlr.debug.ParserTokenListener; import antlr.debug.SemanticPredicateListener; import antlr.debug.SyntacticPredicateListener; import antlr.debug.TraceListener; public abstract class Parser { protected ParserSharedInputState inputState; /** Nesting level of registered handlers */ // protected int exceptionLevel = 0; /** Table of token type to token names */ protected String[] tokenNames; /** AST return value for a rule is squirreled away here */ protected AST returnAST; /** AST support code; parser delegates to this object. * This is set during parser construction by default * to either "new ASTFactory()" or a ctor that * has a token type to class map for hetero nodes. */ protected ASTFactory astFactory = null; /** Constructed if any AST types specified in tokens{..}. * Maps an Integer->Class object. */ protected Hashtable tokenTypeToASTClassMap = null; private boolean ignoreInvalidDebugCalls = false; /** Used to keep track of indentdepth for traceIn/Out */ protected int traceDepth = 0; public Parser() { this(new ParserSharedInputState()); } public Parser(ParserSharedInputState state) { inputState = state; } /** If the user specifies a tokens{} section with heterogeneous * AST node types, then ANTLR generates code to fill * this mapping. */ public Hashtable getTokenTypeToASTClassMap() { return tokenTypeToASTClassMap; } public void addMessageListener(MessageListener l) { if (!ignoreInvalidDebugCalls) throw new IllegalArgumentException("addMessageListener() is only valid if parser built for debugging"); } public void addParserListener(ParserListener l) { if (!ignoreInvalidDebugCalls) throw new IllegalArgumentException("addParserListener() is only valid if parser built for debugging"); } public void addParserMatchListener(ParserMatchListener l) { if (!ignoreInvalidDebugCalls) throw new IllegalArgumentException("addParserMatchListener() is only valid if parser built for debugging"); } public void addParserTokenListener(ParserTokenListener l) { if (!ignoreInvalidDebugCalls) throw new IllegalArgumentException("addParserTokenListener() is only valid if parser built for debugging"); } public void addSemanticPredicateListener(SemanticPredicateListener l) { if (!ignoreInvalidDebugCalls) throw new IllegalArgumentException("addSemanticPredicateListener() is only valid if parser built for debugging"); } public void addSyntacticPredicateListener(SyntacticPredicateListener l) { if (!ignoreInvalidDebugCalls) throw new IllegalArgumentException("addSyntacticPredicateListener() is only valid if parser built for debugging"); } public void addTraceListener(TraceListener l) { if (!ignoreInvalidDebugCalls) throw new IllegalArgumentException("addTraceListener() is only valid if parser built for debugging"); } /**Get another token object from the token stream */ public abstract void consume() throws TokenStreamException; /** Consume tokens until one matches the given token */ public void consumeUntil(int tokenType) throws TokenStreamException { while (LA(1) != Token.EOF_TYPE && LA(1) != tokenType) { consume(); } } /** Consume tokens until one matches the given token set */ public void consumeUntil(BitSet set) throws TokenStreamException { while (LA(1) != Token.EOF_TYPE && !set.member(LA(1))) { consume(); } } protected void defaultDebuggingSetup(TokenStream lexer, TokenBuffer tokBuf) { // by default, do nothing -- we're not debugging } /** Get the AST return value squirreled away in the parser */ public AST getAST() { return returnAST; } public ASTFactory getASTFactory() { return astFactory; } public String getFilename() { return inputState.filename; } public ParserSharedInputState getInputState() { return inputState; } public void setInputState(ParserSharedInputState state) { inputState = state; } public String getTokenName(int num) { return tokenNames[num]; } public String[] getTokenNames() { return tokenNames; } public boolean isDebugMode() { return false; } /** Return the token type of the ith token of lookahead where i=1 * is the current token being examined by the parser (i.e., it * has not been matched yet). */ public abstract int LA(int i) throws TokenStreamException; /**Return the ith token of lookahead */ public abstract Token LT(int i) throws TokenStreamException; // Forwarded to TokenBuffer public int mark() { return inputState.input.mark(); } /**Make sure current lookahead symbol matches token type t. * Throw an exception upon mismatch, which is catch by either the * error handler or by the syntactic predicate. */ public void match(int t) throws MismatchedTokenException, TokenStreamException { if (LA(1) != t) throw new MismatchedTokenException(tokenNames, LT(1), t, false, getFilename()); else // mark token as consumed -- fetch next token deferred until LA/LT consume(); } /**Make sure current lookahead symbol matches the given set * Throw an exception upon mismatch, which is catch by either the * error handler or by the syntactic predicate. */ public void match(BitSet b) throws MismatchedTokenException, TokenStreamException { if (!b.member(LA(1))) throw new MismatchedTokenException(tokenNames, LT(1), b, false, getFilename()); else // mark token as consumed -- fetch next token deferred until LA/LT consume(); } public void matchNot(int t) throws MismatchedTokenException, TokenStreamException { if (LA(1) == t) // Throws inverted-sense exception throw new MismatchedTokenException(tokenNames, LT(1), t, true, getFilename()); else // mark token as consumed -- fetch next token deferred until LA/LT consume(); } /** @deprecated as of 2.7.2. This method calls System.exit() and writes * directly to stderr, which is usually not appropriate when * a parser is embedded into a larger application. Since the method is * static, it cannot be overridden to avoid these problems. * ANTLR no longer uses this method internally or in generated code. */ public static void panic() { System.err.println("Parser: panic"); System.exit(1); } public void removeMessageListener(MessageListener l) { if (!ignoreInvalidDebugCalls) throw new RuntimeException("removeMessageListener() is only valid if parser built for debugging"); } public void removeParserListener(ParserListener l) { if (!ignoreInvalidDebugCalls) throw new RuntimeException("removeParserListener() is only valid if parser built for debugging"); } public void removeParserMatchListener(ParserMatchListener l) { if (!ignoreInvalidDebugCalls) throw new RuntimeException("removeParserMatchListener() is only valid if parser built for debugging"); } public void removeParserTokenListener(ParserTokenListener l) { if (!ignoreInvalidDebugCalls) throw new RuntimeException("removeParserTokenListener() is only valid if parser built for debugging"); } public void removeSemanticPredicateListener(SemanticPredicateListener l) { if (!ignoreInvalidDebugCalls) throw new IllegalArgumentException("removeSemanticPredicateListener() is only valid if parser built for debugging"); } public void removeSyntacticPredicateListener(SyntacticPredicateListener l) { if (!ignoreInvalidDebugCalls) throw new IllegalArgumentException("removeSyntacticPredicateListener() is only valid if parser built for debugging"); } public void removeTraceListener(TraceListener l) { if (!ignoreInvalidDebugCalls) throw new RuntimeException("removeTraceListener() is only valid if parser built for debugging"); } /** Parser error-reporting function can be overridden in subclass */ public void reportError(RecognitionException ex) { System.err.println(ex); } /** Parser error-reporting function can be overridden in subclass */ public void reportError(String s) { if (getFilename() == null) { System.err.println("error: " + s); } else { System.err.println(getFilename() + ": error: " + s); } } /** Parser warning-reporting function can be overridden in subclass */ public void reportWarning(String s) { if (getFilename() == null) { System.err.println("warning: " + s); } else { System.err.println(getFilename() + ": warning: " + s); } } public void recover(RecognitionException ex, BitSet tokenSet) throws TokenStreamException { consume(); consumeUntil(tokenSet); } public void rewind(int pos) { inputState.input.rewind(pos); } /** Specify an object with support code (shared by * Parser and TreeParser. Normally, the programmer * does not play with this, using setASTNodeType instead. */ public void setASTFactory(ASTFactory f) { astFactory = f; } public void setASTNodeClass(String cl) { astFactory.setASTNodeType(cl); } /** Specify the type of node to create during tree building; use setASTNodeClass now * to be consistent with Token Object Type accessor. * @deprecated since 2.7.1 */ public void setASTNodeType(String nodeType) { setASTNodeClass(nodeType); } public void setDebugMode(boolean debugMode) { if (!ignoreInvalidDebugCalls) throw new RuntimeException("setDebugMode() only valid if parser built for debugging"); } public void setFilename(String f) { inputState.filename = f; } public void setIgnoreInvalidDebugCalls(boolean value) { ignoreInvalidDebugCalls = value; } /** Set or change the input token buffer */ public void setTokenBuffer(TokenBuffer t) { inputState.input = t; } public void traceIndent() { for (int i = 0; i < traceDepth; i++) System.out.print(" "); } public void traceIn(String rname) throws TokenStreamException { traceDepth += 1; traceIndent(); System.out.println("> " + rname + "; LA(1)==" + LT(1).getText() + ((inputState.guessing > 0)?" [guessing]":"")); } public void traceOut(String rname) throws TokenStreamException { traceIndent(); System.out.println("< " + rname + "; LA(1)==" + LT(1).getText() + ((inputState.guessing > 0)?" [guessing]":"")); traceDepth -= 1; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ParserGrammar.java000066400000000000000000000062431161462365500245310ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ParserGrammar.java#1 $ */ import java.util.Hashtable; import java.util.Enumeration; import java.io.IOException; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; /** Parser-specific grammar subclass */ class ParserGrammar extends Grammar { ParserGrammar(String className_, Tool tool_, String superClass) { super(className_, tool_, superClass); } /** Top-level call to generate the code for this grammar */ public void generate() throws IOException { generator.gen(this); } // Get name of class from which generated parser/lexer inherits protected String getSuperClass() { // if debugging, choose the debugging version of the parser if (debuggingOutput) return "debug.LLkDebuggingParser"; return "LLkParser"; } /**Process command line arguments. * -trace have all rules call traceIn/traceOut * -traceParser have parser rules call traceIn/traceOut * -debug generate debugging output for parser debugger */ public void processArguments(String[] args) { for (int i = 0; i < args.length; i++) { if (args[i].equals("-trace")) { traceRules = true; antlrTool.setArgOK(i); } else if (args[i].equals("-traceParser")) { traceRules = true; antlrTool.setArgOK(i); } else if (args[i].equals("-debug")) { debuggingOutput = true; antlrTool.setArgOK(i); } } } /** Set parser options -- performs action on the following options: */ public boolean setOption(String key, Token value) { String s = value.getText(); if (key.equals("buildAST")) { if (s.equals("true")) { buildAST = true; } else if (s.equals("false")) { buildAST = false; } else { antlrTool.error("buildAST option must be true or false", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("interactive")) { if (s.equals("true")) { interactive = true; } else if (s.equals("false")) { interactive = false; } else { antlrTool.error("interactive option must be true or false", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("ASTLabelType")) { super.setOption(key, value); return true; } if (key.equals("className")) { super.setOption(key, value); return true; } if (super.setOption(key, value)) { return true; } antlrTool.error("Invalid option: " + key, getFilename(), value.getLine(), value.getColumn()); return false; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ParserSharedInputState.java000066400000000000000000000014521161462365500263670ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ParserSharedInputState.java#1 $ */ /** This object contains the data associated with an * input stream of tokens. Multiple parsers * share a single ParserSharedInputState to parse * the same stream of tokens. */ public class ParserSharedInputState { /** Where to get token objects */ protected TokenBuffer input; /** Are we guessing (guessing>0)? */ public int guessing = 0; /** What file (if known) caused the problem? */ protected String filename; public void reset() { guessing = 0; filename = null; input.reset(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/PreservingFileWriter.java000066400000000000000000000064671161462365500261170ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id:$ * @author Ric Klaren */ import java.io.*; /** PreservingFileWriter only overwrites target if the new file is different. Mainly added in order to prevent big and unnecessary recompiles in C++ projects. I/O is buffered. */ public class PreservingFileWriter extends FileWriter { protected File target_file; /// the file we intend to write to protected File tmp_file; /// the tmp file we create at first public PreservingFileWriter(String file) throws IOException { super(file+".antlr.tmp"); // set up File thingy for target.. target_file = new File(file); String parentdirname = target_file.getParent(); if( parentdirname != null ) { File parentdir = new File(parentdirname); if (!parentdir.exists()) throw new IOException("destination directory of '"+file+"' doesn't exist"); if (!parentdir.canWrite()) throw new IOException("destination directory of '"+file+"' isn't writeable"); } if( target_file.exists() && ! target_file.canWrite() ) throw new IOException("cannot write to '"+file+"'"); // and for the temp file tmp_file = new File(file+".antlr.tmp"); // have it nuked at exit // RK: this is broken on java 1.4 and // is not compatible with java 1.1 (which is a big problem I'm told :) ) // sigh. Any real language would do this in a destructor ;) ;) // tmp_file.deleteOnExit(); } /** Close the file and see if the actual target is different * if so the target file is overwritten by the copy. If not we do nothing */ public void close() throws IOException { Reader source = null; Writer target = null; try { // close the tmp file so we can access it safely... super.close(); char[] buffer = new char[1024]; int cnt; // target_file != tmp_file so we have to compare and move it.. if( target_file.length() == tmp_file.length() ) { // Do expensive read'n'compare Reader tmp; char[] buf2 = new char[1024]; source = new BufferedReader(new FileReader(tmp_file)); tmp = new BufferedReader(new FileReader(target_file)); int cnt1, cnt2; boolean equal = true; while( equal ) { cnt1 = source.read(buffer,0,1024); cnt2 = tmp.read(buf2,0,1024); if( cnt1 != cnt2 ) { equal = false; break; } if( cnt1 == -1 ) // EOF break; for( int i = 0; i < cnt1; i++ ) { if( buffer[i] != buf2[i] ) { equal = false; break; } } } // clean up... source.close(); tmp.close(); source = tmp = null; if( equal ) return; } source = new BufferedReader(new FileReader(tmp_file)); target = new BufferedWriter(new FileWriter(target_file)); while(true) { cnt = source.read(buffer,0,1024); if( cnt == -1 ) break; target.write(buffer, 0, cnt ); } } finally { if( source != null ) { try { source.close(); } catch( IOException e ) { ; } } if( target != null ) { try { target.close(); } catch( IOException e ) { ; } } // RK: Now if I'm correct this should be called anytime. if( tmp_file != null && tmp_file.exists() ) { tmp_file.delete(); tmp_file = null; } } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/PythonBlockFinishingInfo.java000066400000000000000000000017201161462365500266700ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/RIGHTS.html * * $Id$ */ class PythonBlockFinishingInfo { String postscript; // what to generate to terminate block boolean generatedSwitch;// did block finish with "default:" of switch? boolean generatedAnIf; /** When generating an if or switch, end-of-token lookahead sets * will become the else or default clause, don't generate an * error clause in this case. */ boolean needAnErrorClause; public PythonBlockFinishingInfo() { postscript = null; generatedSwitch = generatedSwitch = false; needAnErrorClause = true; } public PythonBlockFinishingInfo(String ps, boolean genS, boolean generatedAnIf, boolean n) { postscript = ps; generatedSwitch = genS; this.generatedAnIf = generatedAnIf; needAnErrorClause = n; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/PythonCharFormatter.java000066400000000000000000000034621161462365500257310ustar00rootroot00000000000000// This file is part of PyANTLR. See LICENSE.txt for license // details..........Copyright (C) Wolfgang Haefelinger, 2004. // // $Id$ package antlr; public class PythonCharFormatter implements antlr.CharFormatter { public String escapeChar(int c, boolean forCharLiteral) { //System.out.println("escapeChar("+c+","+forCharLiteral+") called"); String s = _escapeChar(c,forCharLiteral); //System.out.println("=>[" + s + "]"); return s; } public String _escapeChar(int c, boolean forCharLiteral) { switch (c) { // case GrammarAnalyzer.EPSILON_TYPE : return ""; case '\n': return "\\n"; case '\t': return "\\t"; case '\r': return "\\r"; case '\\': return "\\\\"; case '\'': return forCharLiteral ? "\\'" : "'"; case '"': return forCharLiteral ? "\"" : "\\\""; default : if (c < ' ' || c > 126) { if ((0x0000 <= c) && (c <= 0x000F)) { return "\\u000" + Integer.toString(c, 16); } else if ((0x0010 <= c) && (c <= 0x00FF)) { return "\\u00" + Integer.toString(c, 16); } else if ((0x0100 <= c) && (c <= 0x0FFF)) { return "\\u0" + Integer.toString(c, 16); } else { return "\\u" + Integer.toString(c, 16); } } else { return String.valueOf((char)c); } } } public String escapeString(String s) { String retval = new String(); for (int i = 0; i < s.length(); i++) { retval += escapeChar(s.charAt(i), false); } return retval; } public String literalChar(int c) { return "" + escapeChar(c, true) + ""; } public String literalString(String s) { return "\"" + escapeString(s) + "\""; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/PythonCodeGenerator.java000066400000000000000000003414671161462365500257230ustar00rootroot00000000000000// This file is part of PyANTLR. See LICENSE.txt for license // details..........Copyright (C) Wolfgang Haefelinger, 2004. // // $Id:$ package antlr; import java.util.Enumeration; import java.util.Hashtable; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; import java.io.PrintWriter; //SAS: changed for proper text file io import java.io.IOException; import java.io.FileWriter; /**Generate MyParser.java, MyLexer.java and MyParserTokenTypes.java */ public class PythonCodeGenerator extends CodeGenerator { // non-zero if inside syntactic predicate generation protected int syntacticPredLevel = 0; // Are we generating ASTs (for parsers and tree parsers) right now? protected boolean genAST = false; // Are we saving the text consumed (for lexers) right now? protected boolean saveText = false; // Grammar parameters set up to handle different grammar classes. // These are used to get instanceof tests out of code generation String labeledElementType; String labeledElementASTType; String labeledElementInit; String commonExtraArgs; String commonExtraParams; String commonLocalVars; String lt1Value; String exceptionThrown; String throwNoViable; public static final String initHeaderAction = "__init__"; public static final String mainHeaderAction = "__main__"; String lexerClassName; String parserClassName; String treeWalkerClassName; /** Tracks the rule being generated. Used for mapTreeId */ RuleBlock currentRule; /** Tracks the rule or labeled subrule being generated. Used for AST generation. */ String currentASTResult; /** Mapping between the ids used in the current alt, and the * names of variables used to represent their AST values. */ Hashtable treeVariableMap = new Hashtable(); /** Used to keep track of which AST variables have been defined in a rule * (except for the #rule_name and #rule_name_in var's */ Hashtable declaredASTVariables = new Hashtable(); /* Count of unnamed generated variables */ int astVarNumber = 1; /** Special value used to mark duplicate in treeVariableMap */ protected static final String NONUNIQUE = new String(); public static final int caseSizeThreshold = 127; // ascii is max private Vector semPreds; /** Create a Java code-generator using the given Grammar. * The caller must still call setTool, setBehavior, and setAnalyzer * before generating code. */ protected void printTabs() { for (int i = 0; i < tabs; i++) { // don't print tabs ever - replace a tab by ' ' currentOutput.print(" "); } } public PythonCodeGenerator() { super(); charFormatter = new antlr.PythonCharFormatter(); DEBUG_CODE_GENERATOR = true; } /** Adds a semantic predicate string to the sem pred vector These strings will be used to build an array of sem pred names when building a debugging parser. This method should only be called when the debug option is specified */ protected int addSemPred(String predicate) { semPreds.appendElement(predicate); return semPreds.size() - 1; } public void exitIfError() { if (antlrTool.hasError()) { antlrTool.fatalError("Exiting due to errors."); } } protected void checkCurrentOutputStream() { try { if(currentOutput == null) throw new NullPointerException(); } catch(Exception e) { System.err.println("error: current output is not set"); e.printStackTrace(System.err); System.exit(1); } } /** Get the identifier portion of an argument-action. * For Python the ID of an action is assumed to be everything before * the assignment, as Python does not support a type. * @param s The action text * @param line Line used for error reporting. * @param column Line used for error reporting. * @return A string containing the text of the identifier */ protected String extractIdOfAction(String s, int line, int column) { s = removeAssignmentFromDeclaration(s); //wh: removeAssignmentFromDeclaration returns an indentifier that //wh: may start with whitespace. s = s.trim(); // println("###ZZZZZ \""+s+"\""); return s; } /** Get the type portion of an argument-action. * Python does not have a type declaration before an identifier, so we * just return the empty string. * @param s The action text * @param line Line used for error reporting. * @return A string containing the text of the type */ protected String extractTypeOfAction(String s, int line, int column) { return ""; } protected void flushTokens() { try { boolean generated = false; checkCurrentOutputStream(); println(""); println("### import antlr.Token "); println("from antlr import Token"); println("### >>>The Known Token Types <<<"); /* save current stream */ PrintWriter cout = currentOutput; // Loop over all token managers (some of which are lexers) Enumeration tmIter = behavior.tokenManagers.elements(); while (tmIter.hasMoreElements()) { TokenManager tm = (TokenManager)tmIter.nextElement(); if (!tm.isReadOnly()) { // Write the token manager tokens as Java // this must appear before genTokenInterchange so that // labels are set on string literals if(! generated) { genTokenTypes(tm); generated = true; } /* restore stream */ currentOutput = cout; // Write the token manager tokens as plain text genTokenInterchange(tm); currentOutput = cout; } exitIfError(); } } catch(Exception e) { exitIfError(); } checkCurrentOutputStream(); println(""); } /**Generate the parser, lexer, treeparser, and token types in Java */ public void gen() { // Do the code generation try { // Loop over all grammars Enumeration grammarIter = behavior.grammars.elements(); while (grammarIter.hasMoreElements()) { Grammar g = (Grammar)grammarIter.nextElement(); // Connect all the components to each other g.setGrammarAnalyzer(analyzer); g.setCodeGenerator(this); analyzer.setGrammar(g); // To get right overloading behavior across hetrogeneous grammars setupGrammarParameters(g); g.generate(); // print out the grammar with lookahead sets (and FOLLOWs) // System.out.print(g.toString()); exitIfError(); } } catch (IOException e) { antlrTool.reportException(e, null); } } /** Generate code for the given grammar element. * @param blk The {...} action to generate */ public void gen(ActionElement action) { if (action.isSemPred) { genSemPred(action.actionText, action.line); } else { if (grammar.hasSyntacticPredicate) { println("if not self.inputState.guessing:"); tabs++; } // get the name of the followSet for the current rule so that we // can replace $FOLLOW in the .g file. ActionTransInfo tInfo = new ActionTransInfo(); String actionStr = processActionForSpecialSymbols(action.actionText, action.getLine(), currentRule, tInfo); if (tInfo.refRuleRoot != null) { // Somebody referenced "#rule", make sure translated var is valid // assignment to #rule is left as a ref also, meaning that assignments // with no other refs like "#rule = foo();" still forces this code to be // generated (unnecessarily). println(tInfo.refRuleRoot + " = currentAST.root"); } // dump the translated action printAction(actionStr); if (tInfo.assignToRoot) { // Somebody did a "#rule=", reset internal currentAST.root println("currentAST.root = " + tInfo.refRuleRoot + ""); println("if (" + tInfo.refRuleRoot + " != None) and (" + tInfo.refRuleRoot + ".getFirstChild() != None):"); tabs++; println("currentAST.child = " + tInfo.refRuleRoot + ".getFirstChild()"); tabs--; println("else:"); tabs++; println("currentAST.child = " + tInfo.refRuleRoot); tabs--; println("currentAST.advanceChildToEnd()"); } if (grammar.hasSyntacticPredicate) { tabs--; } } } /** Generate code for the given grammar element. * @param blk The "x|y|z|..." block to generate */ public void gen(AlternativeBlock blk) { if (DEBUG_CODE_GENERATOR) System.out.println("gen(" + blk + ")"); genBlockPreamble(blk); genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); { int _tabs_ = tabs; PythonBlockFinishingInfo howToFinish = genCommonBlock(blk, true); genBlockFinish(howToFinish, throwNoViable); tabs = _tabs_; } // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate code for the given grammar element. * @param blk The block-end element to generate. Block-end * elements are synthesized by the grammar parser to represent * the end of a block. */ public void gen(BlockEndElement end) { if (DEBUG_CODE_GENERATOR) System.out.println("genRuleEnd(" + end + ")"); } /** Generate code for the given grammar element. * @param blk The character literal reference to generate */ public void gen(CharLiteralElement atom) { if (DEBUG_CODE_GENERATOR) System.out.println("genChar(" + atom + ")"); if (atom.getLabel() != null) { println(atom.getLabel() + " = " + lt1Value ); } boolean oldsaveText = saveText; saveText = saveText && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE; genMatch(atom); saveText = oldsaveText; } String toString(boolean v) { String s; if(v) s = "True"; else s = "False"; return s; } /** Generate code for the given grammar element. * @param blk The character-range reference to generate */ public void gen(CharRangeElement r) { if (r.getLabel() != null && syntacticPredLevel == 0) { println(r.getLabel() + " = " + lt1Value); } boolean flag = ( grammar instanceof LexerGrammar && ( !saveText || r.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) ); if (flag) { println("_saveIndex = self.text.length()"); } println("self.matchRange(u" + r.beginText + ", u" + r.endText + ")"); if (flag) { println("self.text.setLength(_saveIndex)"); } } /** Generate the lexer Java file */ public void gen(LexerGrammar g) throws IOException { // If debugging, create a new sempred vector for this grammar if (g.debuggingOutput) semPreds = new Vector(); setGrammar(g); if (!(grammar instanceof LexerGrammar)) { antlrTool.panic("Internal error generating lexer"); } // SAS: moved output creation to method so a subclass can change // how the output is generated (for VAJ interface) setupOutput(grammar.getClassName()); genAST = false; // no way to gen trees. saveText = true; // save consumed characters. tabs = 0; // Generate header common to all Python output files genHeader(); // Generate header specific to lexer Python file println("### import antlr and other modules .."); println("import sys"); println("import antlr"); println(""); println("version = sys.version.split()[0]"); println("if version < '2.2.1':"); tabs++; println("False = 0"); tabs--; println("if version < '2.3':"); tabs++; println("True = not False"); tabs--; println("### header action >>> "); printActionCode(behavior.getHeaderAction(""),0); println("### header action <<< "); // Generate user-defined lexer file preamble println("### preamble action >>> "); printActionCode(grammar.preambleAction.getText(),0); println("### preamble action <<< "); // Generate lexer class definition String sup = null; if (grammar.superClass != null) { sup = grammar.superClass; } else { sup = "antlr." + grammar.getSuperClass(); } // get prefix (replaces "public" and lets user specify) String prefix = ""; Token tprefix = (Token)grammar.options.get("classHeaderPrefix"); if (tprefix != null) { String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\""); if (p != null) { prefix = p; } } // print my literals println("### >>>The Literals<<<"); println("literals = {}"); Enumeration keys = grammar.tokenManager.getTokenSymbolKeys(); while (keys.hasMoreElements()) { String key = (String)keys.nextElement(); if (key.charAt(0) != '"') { continue; } TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key); if (sym instanceof StringLiteralSymbol) { StringLiteralSymbol s = (StringLiteralSymbol)sym; println("literals[u" + s.getId() + "] = " + s.getTokenType()); } } println(""); flushTokens(); // print javadoc comment if any genJavadocComment(grammar); // class name remains the same, it's the module that changes in python. println("class " + lexerClassName + "(" + sup + ") :"); tabs++; printGrammarAction(grammar); // Generate the constructor from InputStream, which in turn // calls the ByteBuffer constructor // println("def __init__(self, *argv, **kwargs) :"); tabs++; println(sup + ".__init__(self, *argv, **kwargs)"); // Generate the setting of various generated options. // These need to be before the literals since ANTLRHashString depends on // the casesensitive stuff. println("self.caseSensitiveLiterals = " + toString(g.caseSensitiveLiterals)); println("self.setCaseSensitive(" + toString(g.caseSensitive) + ")" ); println("self.literals = literals"); Enumeration ids; // generate the rule name array for debugging if (grammar.debuggingOutput) { println("ruleNames[] = ["); ids = grammar.rules.elements(); int ruleNum = 0; tabs++; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); if (sym instanceof RuleSymbol) println("\"" + ((RuleSymbol)sym).getId() + "\","); } tabs--; println("]"); } genHeaderInit(grammar); tabs--; // wh: iterator moved to base class as proposed by mk. // println(""); // Generate the __iter__ method for Python CharScanner (sub)classes. // genIterator(); // Generate nextToken() rule. // nextToken() is a synthetic lexer rule that is the implicit OR of all // user-defined lexer rules. genNextToken(); println(""); // Generate code for each rule in the lexer ids = grammar.rules.elements(); int ruleNum = 0; while (ids.hasMoreElements()) { RuleSymbol sym = (RuleSymbol)ids.nextElement(); // Don't generate the synthetic rules if (!sym.getId().equals("mnextToken")) { genRule(sym, false, ruleNum++); } exitIfError(); } // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) genSemPredMap(); // Generate the bitsets used throughout the lexer genBitsets(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size()); println(""); genHeaderMain(grammar); // Close the lexer output stream currentOutput.close(); currentOutput = null; } protected void genHeaderMain(Grammar grammar) { String h = grammar.getClassName() + "." + mainHeaderAction; String s = behavior.getHeaderAction(h); if (isEmpty(s)) { s = behavior.getHeaderAction(mainHeaderAction); } if(isEmpty(s)) { if(grammar instanceof LexerGrammar) { int _tabs = tabs; tabs = 0; println("### __main__ header action >>> "); genLexerTest(); tabs = 0; println("### __main__ header action <<< "); tabs = _tabs; } } else { int _tabs = tabs; tabs = 0; println(""); println("### __main__ header action >>> "); printMainFunc(s); tabs = 0; println("### __main__ header action <<< "); tabs = _tabs; } } protected void genHeaderInit(Grammar grammar) { String h = grammar.getClassName() + "." + initHeaderAction; String s = behavior.getHeaderAction(h); if (isEmpty(s)) { s = behavior.getHeaderAction(initHeaderAction); } if(isEmpty(s)) { /* nothing gets generated by default */ } else { int _tabs = tabs; println("### __init__ header action >>> "); printActionCode(s,0); tabs = _tabs; println("### __init__ header action <<< "); } } protected void printMainFunc(String s) { int _tabs = tabs; tabs = 0; println("if __name__ == '__main__':"); tabs++; printActionCode(s,0); tabs--; tabs = _tabs; } /** Generate code for the given grammar element. * @param blk The (...)+ block to generate */ public void gen(OneOrMoreBlock blk) { String label; String cnt; /* save current tabs */ int _tabs_ = tabs; genBlockPreamble(blk); if (blk.getLabel() != null) { cnt = "_cnt_" + blk.getLabel(); } else { cnt = "_cnt" + blk.ID; } println("" + cnt + "= 0"); println("while True:"); tabs++; _tabs_ = tabs; // generate the init action for ()+ ()* inside the loop // this allows us to do usefull EOF checking... genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); // generate exit test if greedy set to false // and an alt is ambiguous with exit branch // or when lookahead derived purely from end-of-file // Lookahead analysis stops when end-of-file is hit, // returning set {epsilon}. Since {epsilon} is not // ambig with any real tokens, no error is reported // by deterministic() routines and we have to check // for the case where the lookahead depth didn't get // set to NONDETERMINISTIC (this only happens when the // FOLLOW contains real atoms + epsilon). boolean generateNonGreedyExitPath = false; int nonGreedyExitDepth = grammar.maxk; if (!blk.greedy && blk.exitLookaheadDepth <= grammar.maxk && blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) { generateNonGreedyExitPath = true; nonGreedyExitDepth = blk.exitLookaheadDepth; } else { if (!blk.greedy && blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) { generateNonGreedyExitPath = true; } } // generate exit test if greedy set to false // and an alt is ambiguous with exit branch if (generateNonGreedyExitPath) { println("### nongreedy (...)+ loop; exit depth is " + blk.exitLookaheadDepth); String predictExit = getLookaheadTestExpression( blk.exitCache, nonGreedyExitDepth); println("### nongreedy exit test"); println("if " + cnt + " >= 1 and " + predictExit + ":"); tabs++; println("break"); tabs--; } { int _tabs = tabs; PythonBlockFinishingInfo howToFinish = genCommonBlock(blk, false); genBlockFinish(howToFinish, "break"); tabs = _tabs; } /* no matter what previous block did, here we have to continue ** one the 'while block' level. Reseting tabs .. */ tabs = _tabs_; println(cnt + " += 1"); tabs = _tabs_; tabs--; println("if " + cnt + " < 1:"); tabs++; println(throwNoViable); tabs--; // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate the parser Java file */ public void gen(ParserGrammar g) throws IOException { // if debugging, set up a new vector to keep track of sempred // strings for this grammar if (g.debuggingOutput) semPreds = new Vector(); setGrammar(g); if (!(grammar instanceof ParserGrammar)) { antlrTool.panic("Internal error generating parser"); } // Open the output stream for the parser and set the currentOutput // SAS: moved file setup so subclass could do it (for VAJ interface) setupOutput(grammar.getClassName()); genAST = grammar.buildAST; tabs = 0; // Generate the header common to all output files. genHeader(); // Generate header specific to lexer Java file println("### import antlr and other modules .."); println("import sys"); println("import antlr"); println(""); println("version = sys.version.split()[0]"); println("if version < '2.2.1':"); tabs++; println("False = 0"); tabs--; println("if version < '2.3':"); tabs++; println("True = not False"); tabs--; println("### header action >>> "); printActionCode(behavior.getHeaderAction(""),0); println("### header action <<< "); println("### preamble action>>>"); // Output the user-defined parser preamble printActionCode(grammar.preambleAction.getText(),0); println("### preamble action <<<"); flushTokens(); // Generate parser class definition String sup = null; if (grammar.superClass != null) sup = grammar.superClass; else sup = "antlr." + grammar.getSuperClass(); // print javadoc comment if any genJavadocComment(grammar); // get prefix (replaces "public" and lets user specify) String prefix = ""; Token tprefix = (Token)grammar.options.get("classHeaderPrefix"); if (tprefix != null) { String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\""); if (p != null) { prefix = p; } } print("class " + parserClassName + "(" + sup); println("):"); tabs++; // set up an array of all the rule names so the debugger can // keep track of them only by number -- less to store in tree... if (grammar.debuggingOutput) { println("_ruleNames = ["); Enumeration ids = grammar.rules.elements(); int ruleNum = 0; tabs++; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); if (sym instanceof RuleSymbol) println("\"" + ((RuleSymbol)sym).getId() + "\","); } tabs--; println("]"); } // Generate user-defined parser class members printGrammarAction(grammar); // Generate parser class constructor from TokenBuffer println(""); println("def __init__(self, *args, **kwargs):"); tabs++; println(sup + ".__init__(self, *args, **kwargs)"); println("self.tokenNames = _tokenNames"); // if debugging, set up arrays and call the user-overridable // debugging setup method if (grammar.debuggingOutput) { println("self.ruleNames = _ruleNames"); println("self.semPredNames = _semPredNames"); println("self.setupDebugging(self.tokenBuf)"); } if (grammar.buildAST) { println("self.buildTokenTypeASTClassMap()"); println("self.astFactory = antlr.ASTFactory(self.getTokenTypeToASTClassMap())"); if(labeledElementASTType != null) { println("self.astFactory.setASTNodeClass("+ labeledElementASTType+")"); } } genHeaderInit(grammar); println(""); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); int ruleNum = 0; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); if (sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol)sym; genRule(rs, rs.references.size() == 0, ruleNum++); } exitIfError(); } if ( grammar.buildAST ) { genTokenASTNodeMap(); } // Generate the token names genTokenStrings(); // Generate the bitsets used throughout the grammar genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType()); // Generate the semantic predicate map for debugging if (grammar.debuggingOutput) genSemPredMap(); // Close class definition println(""); tabs = 0; genHeaderMain(grammar); // Close the parser output stream currentOutput.close(); currentOutput = null; } /** Generate code for the given grammar element. * @param blk The rule-reference to generate */ public void gen(RuleRefElement rr) { if (DEBUG_CODE_GENERATOR) System.out.println("genRR(" + rr + ")"); RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); if (rs == null || !rs.isDefined()) { // Is this redundant??? antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn()); return; } if (!(rs instanceof RuleSymbol)) { // Is this redundant??? antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn()); return; } genErrorTryForElement(rr); // AST value for labeled rule refs in tree walker. // This is not AST construction; it is just the input tree node value. if (grammar instanceof TreeWalkerGrammar && rr.getLabel() != null && syntacticPredLevel == 0) { println(rr.getLabel() + " = antlr.ifelse(_t == antlr.ASTNULL, None, " + lt1Value + ")"); } // if in lexer and ! on rule ref or alt or rule, save buffer index to kill later if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("_saveIndex = self.text.length()"); } // Process return value assignment if any printTabs(); if (rr.idAssign != null) { // Warn if the rule has no return type if (rs.block.returnAction == null) { antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr.getLine(), rr.getColumn()); } _print(rr.idAssign + "="); } else { // Warn about return value if any, but not inside syntactic predicate if (!(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null) { antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr.getLine(), rr.getColumn()); } } // Call the rule GenRuleInvocation(rr); // if in lexer and ! on element or alt or rule, save buffer index to kill later if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("self.text.setLength(_saveIndex)"); } // if not in a syntactic predicate if (syntacticPredLevel == 0) { boolean doNoGuessTest = ( grammar.hasSyntacticPredicate && ( grammar.buildAST && rr.getLabel() != null || (genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE) ) ); if (doNoGuessTest) { // println("if (inputState.guessing==0) {"); // tabs++; } if (grammar.buildAST && rr.getLabel() != null) { // always gen variable for rule return on labeled rules println(rr.getLabel() + "_AST = self.returnAST"); } if (genAST) { switch (rr.getAutoGenType()) { case GrammarElement.AUTO_GEN_NONE: println("self.addASTChild(currentAST, self.returnAST)"); break; case GrammarElement.AUTO_GEN_CARET: antlrTool.error("Internal: encountered ^ after rule reference"); break; default: break; } } // if a lexer and labeled, Token label defined at rule level, just set it here if (grammar instanceof LexerGrammar && rr.getLabel() != null) { println(rr.getLabel() + " = self._returnToken"); } if (doNoGuessTest) { } } genErrorCatchForElement(rr); } /** Generate code for the given grammar element. * @param blk The string-literal reference to generate */ public void gen(StringLiteralElement atom) { if (DEBUG_CODE_GENERATOR) System.out.println("genString(" + atom + ")"); // Variable declarations for labeled elements if (atom.getLabel() != null && syntacticPredLevel == 0) { println(atom.getLabel() + " = " + lt1Value + ""); } // AST genElementAST(atom); // is there a bang on the literal? boolean oldsaveText = saveText; saveText = saveText && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE; // matching genMatch(atom); saveText = oldsaveText; // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling()"); } } /** Generate code for the given grammar element. * @param blk The token-range reference to generate */ public void gen(TokenRangeElement r) { genErrorTryForElement(r); if (r.getLabel() != null && syntacticPredLevel == 0) { println(r.getLabel() + " = " + lt1Value); } // AST genElementAST(r); // match println("self.matchRange(u" + r.beginText + ", u" + r.endText + ")"); genErrorCatchForElement(r); } /** Generate code for the given grammar element. * @param blk The token-reference to generate */ public void gen(TokenRefElement atom) { if (DEBUG_CODE_GENERATOR) System.out.println("genTokenRef(" + atom + ")"); if (grammar instanceof LexerGrammar) { antlrTool.panic("Token reference found in lexer"); } genErrorTryForElement(atom); // Assign Token value to token label variable if (atom.getLabel() != null && syntacticPredLevel == 0) { println(atom.getLabel() + " = " + lt1Value + ""); } // AST genElementAST(atom); // matching genMatch(atom); genErrorCatchForElement(atom); // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling()"); } } public void gen(TreeElement t) { // save AST cursor println("_t" + t.ID + " = _t"); // If there is a label on the root, then assign that to the variable if (t.root.getLabel() != null) { println(t.root.getLabel() + " = antlr.ifelse(_t == antlr.ASTNULL, None, _t)"); } // check for invalid modifiers ! and ^ on tree element roots if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) { antlrTool.error("Suffixing a root node with '!' is not implemented", grammar.getFilename(), t.getLine(), t.getColumn()); t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE); } if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET ) { antlrTool.warning("Suffixing a root node with '^' is redundant; already a root", grammar.getFilename(), t.getLine(), t.getColumn()); t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE); } // Generate AST variables genElementAST(t.root); if (grammar.buildAST) { // Save the AST construction state println("_currentAST" + t.ID + " = currentAST.copy()"); // Make the next item added a child of the TreeElement root println("currentAST.root = currentAST.child"); println("currentAST.child = None"); } // match root if ( t.root instanceof WildcardElement ) { println("if not _t: raise antlr.MismatchedTokenException()"); } else { genMatch(t.root); } // move to list of children println("_t = _t.getFirstChild()"); // walk list of children, generating code for each for (int i = 0; i < t.getAlternatives().size(); i++) { Alternative a = t.getAlternativeAt(i); AlternativeElement e = a.head; while (e != null) { e.generate(); e = e.next; } } if (grammar.buildAST) { // restore the AST construction state to that just after the // tree root was added println("currentAST = _currentAST" + t.ID + ""); } // restore AST cursor println("_t = _t" + t.ID + ""); // move cursor to sibling of tree just parsed println("_t = _t.getNextSibling()"); } /** Generate the tree-parser Java file */ public void gen(TreeWalkerGrammar g) throws IOException { // SAS: debugging stuff removed for now... setGrammar(g); if (!(grammar instanceof TreeWalkerGrammar)) { antlrTool.panic("Internal error generating tree-walker"); } // Open the output stream for the parser and set the currentOutput // SAS: move file open to method so subclass can override it // (mainly for VAJ interface) setupOutput(grammar.getClassName()); genAST = grammar.buildAST; tabs = 0; // Generate the header common to all output files. genHeader(); // Generate header specific to lexer Java file println("### import antlr and other modules .."); println("import sys"); println("import antlr"); println(""); println("version = sys.version.split()[0]"); println("if version < '2.2.1':"); tabs++; println("False = 0"); tabs--; println("if version < '2.3':"); tabs++; println("True = not False"); tabs--; println("### header action >>> "); printActionCode(behavior.getHeaderAction(""),0); println("### header action <<< "); flushTokens(); println("### user code>>>"); // Output the user-defined parser preamble printActionCode(grammar.preambleAction.getText(),0); println("### user code<<<"); // Generate parser class definition String sup = null; if (grammar.superClass != null) { sup = grammar.superClass; } else { sup = "antlr." + grammar.getSuperClass(); } println(""); // get prefix (replaces "public" and lets user specify) String prefix = ""; Token tprefix = (Token)grammar.options.get("classHeaderPrefix"); if (tprefix != null) { String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\""); if (p != null) { prefix = p; } } // print javadoc comment if any genJavadocComment(grammar); println("class " + treeWalkerClassName + "(" + sup + "):"); tabs++; // Generate default parser class constructor println(""); println("# ctor .."); println("def __init__(self, *args, **kwargs):"); tabs++; println(sup + ".__init__(self, *args, **kwargs)"); println("self.tokenNames = _tokenNames"); genHeaderInit(grammar); tabs--; println(""); // print grammar specific action printGrammarAction(grammar); // Generate code for each rule in the grammar Enumeration ids = grammar.rules.elements(); int ruleNum = 0; String ruleNameInits = ""; while (ids.hasMoreElements()) { GrammarSymbol sym = (GrammarSymbol)ids.nextElement(); if (sym instanceof RuleSymbol) { RuleSymbol rs = (RuleSymbol)sym; genRule(rs, rs.references.size() == 0, ruleNum++); } exitIfError(); } // Generate the token names genTokenStrings(); // Generate the bitsets used throughout the grammar genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType()); tabs = 0; genHeaderMain(grammar); // Close the parser output stream currentOutput.close(); currentOutput = null; } /** Generate code for the given grammar element. * @param wc The wildcard element to generate */ public void gen(WildcardElement wc) { // Variable assignment for labeled elements if (wc.getLabel() != null && syntacticPredLevel == 0) { println(wc.getLabel() + " = " + lt1Value + ""); } // AST genElementAST(wc); // Match anything but EOF if (grammar instanceof TreeWalkerGrammar) { println("if not _t:"); tabs++; println("raise MismatchedTokenException()"); tabs--; } else if (grammar instanceof LexerGrammar) { if (grammar instanceof LexerGrammar && (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("_saveIndex = self.text.length()"); } println("self.matchNot(antlr.EOF_CHAR)"); if (grammar instanceof LexerGrammar && (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("self.text.setLength(_saveIndex)"); // kill text atom put in buffer } } else { println("self.matchNot(" + getValueString(Token.EOF_TYPE,false) + ")"); } // tack on tree cursor motion if doing a tree walker if (grammar instanceof TreeWalkerGrammar) { println("_t = _t.getNextSibling()"); } } /** Generate code for the given grammar element. * @param blk The (...)* block to generate */ public void gen(ZeroOrMoreBlock blk) { int _tabs_ = tabs; genBlockPreamble(blk); String label; println("while True:"); tabs++; _tabs_ = tabs; // generate the init action for ()* inside the loop // this allows us to do usefull EOF checking... genBlockInitAction(blk); // Tell AST generation to build subrule result String saveCurrentASTResult = currentASTResult; if (blk.getLabel() != null) { currentASTResult = blk.getLabel(); } boolean ok = grammar.theLLkAnalyzer.deterministic(blk); // generate exit test if greedy set to false // and an alt is ambiguous with exit branch // or when lookahead derived purely from end-of-file // Lookahead analysis stops when end-of-file is hit, // returning set {epsilon}. Since {epsilon} is not // ambig with any real tokens, no error is reported // by deterministic() routines and we have to check // for the case where the lookahead depth didn't get // set to NONDETERMINISTIC (this only happens when the // FOLLOW contains real atoms + epsilon). boolean generateNonGreedyExitPath = false; int nonGreedyExitDepth = grammar.maxk; if (!blk.greedy && blk.exitLookaheadDepth <= grammar.maxk && blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) { generateNonGreedyExitPath = true; nonGreedyExitDepth = blk.exitLookaheadDepth; } else if (!blk.greedy && blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) { generateNonGreedyExitPath = true; } if (generateNonGreedyExitPath) { if (DEBUG_CODE_GENERATOR) { System.out.println("nongreedy (...)* loop; exit depth is " + blk.exitLookaheadDepth); } String predictExit = getLookaheadTestExpression(blk.exitCache, nonGreedyExitDepth); println("### nongreedy exit test"); println("if (" + predictExit + "):"); tabs++; println("break"); tabs--; } { int _tabs = tabs; PythonBlockFinishingInfo howToFinish = genCommonBlock(blk, false); genBlockFinish(howToFinish, "break"); tabs = _tabs; } tabs = _tabs_; /* no matter where we are */ tabs--; // Restore previous AST generation currentASTResult = saveCurrentASTResult; } /** Generate an alternative. * @param alt The alternative to generate * @param blk The block to which the alternative belongs */ protected void genAlt(Alternative alt, AlternativeBlock blk) { // Save the AST generation state, and set it to that of the alt boolean savegenAST = genAST; genAST = genAST && alt.getAutoGen(); boolean oldsaveTest = saveText; saveText = saveText && alt.getAutoGen(); // Reset the variable name map for the alternative Hashtable saveMap = treeVariableMap; treeVariableMap = new Hashtable(); // Generate try block around the alt for error handling if (alt.exceptionSpec != null) { println("try:"); tabs++; } println("pass"); // make sure that always something gets generated .. AlternativeElement elem = alt.head; while (!(elem instanceof BlockEndElement)) { elem.generate(); // alt can begin with anything. Ask target to gen. elem = elem.next; } if (genAST) { if (blk instanceof RuleBlock) { // Set the AST return value for the rule RuleBlock rblk = (RuleBlock)blk; if (grammar.hasSyntacticPredicate) { } println(rblk.getRuleName() + "_AST = currentAST.root"); if (grammar.hasSyntacticPredicate) { } } else if (blk.getLabel() != null) { antlrTool.warning( "Labeled subrules not yet supported", grammar.getFilename(), blk.getLine(), blk.getColumn()); } } if (alt.exceptionSpec != null) { tabs--; genErrorHandler(alt.exceptionSpec); } genAST = savegenAST; saveText = oldsaveTest; treeVariableMap = saveMap; } /** Generate all the bitsets to be used in the parser or lexer * Generate the raw bitset data like "long _tokenSet1_data[] = {...}" * and the BitSet object declarations like "BitSet _tokenSet1 = new BitSet(_tokenSet1_data)" * Note that most languages do not support object initialization inside a * class definition, so other code-generators may have to separate the * bitset declarations from the initializations (e.g., put the initializations * in the generated constructor instead). * @param bitsetList The list of bitsets to generate. * @param maxVocabulary Ensure that each generated bitset can contain at least this value. */ protected void genBitsets(Vector bitsetList, int maxVocabulary ) { println(""); for (int i = 0; i < bitsetList.size(); i++) { BitSet p = (BitSet)bitsetList.elementAt(i); // Ensure that generated BitSet is large enough for vocabulary p.growToInclude(maxVocabulary); genBitSet(p, i); } } /** Do something simple like: * private static final long[] mk_tokenSet_0() { * long[] data = { -2305839160922996736L, 63L, 16777216L, 0L, 0L, 0L }; * return data; * } * public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); * * Or, for large bitsets, optimize init so ranges are collapsed into loops. * This is most useful for lexers using unicode. */ private void genBitSet(BitSet p, int id) { int _tabs_ = tabs; // wanna have bitsets on module scope, so they are available // when module gets loaded. tabs = 0; println(""); println("### generate bit set"); println( "def mk" + getBitsetName(id) + "(): " ); tabs++; int n = p.lengthInLongWords(); if ( n= makeSwitchThreshold) { // Determine the name of the item to be compared String testExpr = lookaheadString(1); createdLL1Switch = true; // when parsing trees, convert null to valid tree node with NULL lookahead if (grammar instanceof TreeWalkerGrammar) { println("if not _t:"); tabs++; println("_t = antlr.ASTNULL"); tabs--; } println("la1 = " + testExpr); // print dummy if to get a regular genCases .. println("if False:"); tabs++; println("pass"); //println("assert 0 # lunatic case"); tabs--; for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); // ignore any non-LL(1) alts, predicated alts, // or end-of-token alts for case expressions if (!suitableForCaseExpression(alt)) { continue; } Lookahead p = alt.cache[1]; if (p.fset.degree() == 0 && !p.containsEpsilon()) { antlrTool.warning( "Alternate omitted due to empty prediction set", grammar.getFilename(), alt.head.getLine(), alt.head.getColumn()); } else { /* make the case statment, ie. if la1 in .. : */ genCases(p.fset); tabs++; genAlt(alt,blk); tabs--; } } /* does this else belong here? */ println("else:"); tabs++; } // do non-LL(1) and nondeterministic cases This is tricky in // the lexer, because of cases like: STAR : '*' ; ASSIGN_STAR // : "*="; Since nextToken is generated without a loop, then // the STAR will have end-of-token as it's lookahead set for // LA(2). So, we must generate the alternatives containing // trailing end-of-token in their lookahead sets *after* the // alternatives without end-of-token. This implements the // usual lexer convention that longer matches come before // shorter ones, e.g. "*=" matches ASSIGN_STAR not STAR // // For non-lexer grammars, this does not sort the alternates // by depth Note that alts whose lookahead is purely // end-of-token at k=1 end up as default or else clauses. int startDepth = (grammar instanceof LexerGrammar) ? grammar.maxk : 0; for (int altDepth = startDepth; altDepth >= 0; altDepth--) { for (int i = 0; i < blk.alternatives.size(); i++) { Alternative alt = blk.getAlternativeAt(i); if (DEBUG_CODE_GENERATOR) System.out.println("genAlt: " + i); // if we made a switch above, ignore what we already took care // of. Specifically, LL(1) alts with no preds // that do not have end-of-token in their prediction set // and that are not giant unicode sets. if (createdLL1Switch && suitableForCaseExpression(alt)) { if (DEBUG_CODE_GENERATOR) System.out.println("ignoring alt because it was in the switch"); continue; } String e; boolean unpredicted = false; if (grammar instanceof LexerGrammar) { // Calculate the "effective depth" of the alt, // which is the max depth at which // cache[depth]!=end-of-token int effectiveDepth = alt.lookaheadDepth; if (effectiveDepth == GrammarAnalyzer.NONDETERMINISTIC) { // use maximum lookahead effectiveDepth = grammar.maxk; } while (effectiveDepth >= 1 && alt.cache[effectiveDepth].containsEpsilon()) { effectiveDepth--; } // Ignore alts whose effective depth is other than // the ones we are generating for this iteration. if (effectiveDepth != altDepth) { if (DEBUG_CODE_GENERATOR) System.out.println( "ignoring alt because effectiveDepth!=altDepth" + effectiveDepth + "!=" + altDepth); continue; } unpredicted = lookaheadIsEmpty(alt, effectiveDepth); e = getLookaheadTestExpression(alt, effectiveDepth); } else { unpredicted = lookaheadIsEmpty(alt, grammar.maxk); e = getLookaheadTestExpression(alt, grammar.maxk); } // Was it a big unicode range that forced unsuitability // for a case expression? if (alt.cache[1].fset.degree() > caseSizeThreshold && suitableForCaseExpression(alt)) { if (nIF == 0) { println(" if " + e + ":"); } else { println(" elif " + e + ":"); } } else { if (unpredicted && alt.semPred == null && alt.synPred == null) { // The alt has empty prediction set and no // predicate to help out. if we have not // generated a previous if, just put {...} around // the end-of-token clause if (nIF == 0) { println("## "); tabs++; // to prevent an empty boyd // println("pass"); } finishingInfo.needAnErrorClause = false; } else { // check for sem and syn preds // Add any semantic predicate expression to the // lookahead test if (alt.semPred != null) { // if debugging, wrap the evaluation of the // predicate in a method translate $ and # // references ActionTransInfo tInfo = new ActionTransInfo(); String actionStr = processActionForSpecialSymbols( alt.semPred, blk.line, currentRule, tInfo); // ignore translation info...we don't need to // do anything with it. call that will inform // SemanticPredicateListeners of the result if (((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)) && grammar.debuggingOutput) { e = "(" + e + " and fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.PREDICTING, " + addSemPred(charFormatter.escapeString(actionStr)) + ", " + actionStr + "))"; } else { e = "(" + e + " and (" + actionStr + "))"; } } // Generate any syntactic predicates if (nIF > 0) { if (alt.synPred != null) { println("else:"); tabs++; /* who's closing this one? */ genSynPred(alt.synPred, e); closingBracesOfIFSequence++; } else { println("elif " + e + ":"); } } else { if (alt.synPred != null) { genSynPred(alt.synPred, e); } else { // when parsing trees, convert null to // valid tree node with NULL lookahead. if (grammar instanceof TreeWalkerGrammar) { println("if not _t:"); tabs++; println("_t = antlr.ASTNULL"); tabs--; } println("if " + e + ":"); } } } } nIF++; tabs++; genAlt(alt, blk); // this should have generated something. If not // we could end up in an empty else: tabs--; } } String ps = ""; //for (int i = 1; i <= closingBracesOfIFSequence; i++) { // ps += ""; //} // Restore the AST generation state genAST = savegenAST; // restore save text state saveText = oldsaveTest; // Return the finishing info. if (createdLL1Switch) { finishingInfo.postscript = ps; finishingInfo.generatedSwitch = true; finishingInfo.generatedAnIf = nIF > 0; } else { finishingInfo.postscript = ps; finishingInfo.generatedSwitch = false; finishingInfo.generatedAnIf = nIF > 0; } return finishingInfo; } private static boolean suitableForCaseExpression(Alternative a) { return a.lookaheadDepth == 1 && a.semPred == null && !a.cache[1].containsEpsilon() && a.cache[1].fset.degree() <= caseSizeThreshold; } /** Generate code to link an element reference into the AST */ private void genElementAST(AlternativeElement el) { // handle case where you're not building trees, but are in tree walker. // Just need to get labels set up. if (grammar instanceof TreeWalkerGrammar && !grammar.buildAST) { String elementRef; String astName; // Generate names and declarations of the AST variable(s) if (el.getLabel() == null) { elementRef = lt1Value; // Generate AST variables for unlabeled stuff astName = "tmp" + astVarNumber + "_AST"; astVarNumber++; // Map the generated AST variable in the alternate mapTreeVariable(el, astName); // Generate an "input" AST variable also println(astName + "_in = " + elementRef); } return; } if (grammar.buildAST && syntacticPredLevel == 0) { boolean needASTDecl = (genAST && (el.getLabel() != null || el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG ) ); // RK: if we have a grammar element always generate the decl // since some guy can access it from an action and we can't // peek ahead (well not without making a mess). // I'd prefer taking this out. if (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG && (el instanceof TokenRefElement)) { needASTDecl = true; } boolean doNoGuessTest = (grammar.hasSyntacticPredicate && needASTDecl); String elementRef; String astNameBase; // Generate names and declarations of the AST variable(s) if (el.getLabel() != null) { elementRef = el.getLabel(); astNameBase = el.getLabel(); } else { elementRef = lt1Value; // Generate AST variables for unlabeled stuff astNameBase = "tmp" + astVarNumber; ; astVarNumber++; } // Generate the declaration if required. if (needASTDecl) { // Generate the declaration if (el instanceof GrammarAtom) { GrammarAtom ga = (GrammarAtom)el; if (ga.getASTNodeType() != null) { genASTDeclaration(el, astNameBase, ga.getASTNodeType()); } else { genASTDeclaration(el, astNameBase, labeledElementASTType); } } else { genASTDeclaration(el, astNameBase, labeledElementASTType); } } // for convenience.. String astName = astNameBase + "_AST"; // Map the generated AST variable in the alternate mapTreeVariable(el, astName); if (grammar instanceof TreeWalkerGrammar) { // Generate an "input" AST variable also println(astName + "_in = None"); } // Enclose actions with !guessing if (doNoGuessTest) { // println("if (inputState.guessing==0) {"); // tabs++; } // if something has a label assume it will be used // so we must initialize the RefAST if (el.getLabel() != null) { if (el instanceof GrammarAtom) { println(astName + " = " + getASTCreateString((GrammarAtom)el, elementRef) + ""); } else { println(astName + " = " + getASTCreateString(elementRef) + ""); } } // if it has no label but a declaration exists initialize it. if (el.getLabel() == null && needASTDecl) { elementRef = lt1Value; if (el instanceof GrammarAtom) { println(astName + " = " + getASTCreateString((GrammarAtom)el, elementRef) + ""); } else { println(astName + " = " + getASTCreateString(elementRef) + ""); } // Map the generated AST variable in the alternate if (grammar instanceof TreeWalkerGrammar) { // set "input" AST variable also println(astName + "_in = " + elementRef + ""); } } if (genAST) { switch (el.getAutoGenType()) { case GrammarElement.AUTO_GEN_NONE: println("self.addASTChild(currentAST, " + astName + ")"); break; case GrammarElement.AUTO_GEN_CARET: println("self.makeASTRoot(currentAST, " + astName + ")"); break; default: break; } } if (doNoGuessTest) { // tabs--; } } } /** Close the try block and generate catch phrases * if the element has a labeled handler in the rule */ private void genErrorCatchForElement(AlternativeElement el) { if (el.getLabel() == null) return; String r = el.enclosingRuleName; if (grammar instanceof LexerGrammar) { r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName); } RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r); if (rs == null) { antlrTool.panic("Enclosing rule not found!"); } ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel()); if (ex != null) { tabs--; genErrorHandler(ex); } } /** Generate the catch phrases for a user-specified error handler */ private void genErrorHandler(ExceptionSpec ex) { // Each ExceptionHandler in the ExceptionSpec is a separate catch for (int i = 0; i < ex.handlers.size(); i++) { ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i); // Generate catch phrase println("except " + handler.exceptionTypeAndName.getText() + ":"); tabs++; if (grammar.hasSyntacticPredicate) { println("if not inputState.guessing:"); tabs++; } // When not guessing, execute user handler action ActionTransInfo tInfo = new ActionTransInfo(); printAction( processActionForSpecialSymbols(handler.action.getText(), handler.action.getLine(), currentRule, tInfo) ); if (grammar.hasSyntacticPredicate) { tabs--; println("else:"); tabs++; // When guessing, rethrow exception println( "raise " + extractIdOfAction(handler.exceptionTypeAndName)); tabs--; } // Close catch phrase tabs--; } } /** Generate a try { opening if the element has a labeled handler in the rule */ private void genErrorTryForElement(AlternativeElement el) { if (el.getLabel() == null) return; String r = el.enclosingRuleName; if (grammar instanceof LexerGrammar) { r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName); } RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r); if (rs == null) { antlrTool.panic("Enclosing rule not found!"); } ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel()); if (ex != null) { println("try: # for error handling"); tabs++; } } protected void genASTDeclaration(AlternativeElement el) { genASTDeclaration(el, labeledElementASTType); } protected void genASTDeclaration(AlternativeElement el, String node_type) { genASTDeclaration(el, el.getLabel(), node_type); } protected void genASTDeclaration(AlternativeElement el, String var_name, String node_type) { // already declared? if (declaredASTVariables.contains(el)) return; // emit code println(var_name + "_AST = None"); // mark as declared declaredASTVariables.put(el,el); } /** Generate a header that is common to all Python files */ protected void genHeader() { println("### $ANTLR " + Tool.version + ": " + "\"" + antlrTool.fileMinusPath(antlrTool.grammarFile) + "\"" + " -> " + "\"" + grammar.getClassName() + ".py\"$"); } /** Generate an iterator method for the Python CharScanner (sub)classes. */ // protected void genIterator() { // println("def __iter__(self):"); // tabs++; // println("return antlr.CharScannerIterator(self)"); // tabs--; // } /** Generate an automated test for Python CharScanner (sub)classes. */ protected void genLexerTest() { String className = grammar.getClassName(); println("if __name__ == '__main__' :"); tabs++; println("import sys"); println("import antlr"); println("import " + className); println(""); println("### create lexer - shall read from stdin"); println("try:"); tabs++; println("for token in " + className + ".Lexer():"); tabs++; println("print token"); println(""); tabs--; tabs--; println("except antlr.TokenStreamException, e:"); tabs++; println("print \"error: exception caught while lexing: \", e"); tabs--; tabs--; } private void genLiteralsTest() { println("### option { testLiterals=true } "); println("_ttype = self.testLiteralsTable(_ttype)"); } private void genLiteralsTestForPartialToken() { println("_ttype = self.testLiteralsTable(self.text.getBuffer(), _begin, self.text.length()-_begin), _ttype)"); } protected void genMatch(BitSet b) { } protected void genMatch(GrammarAtom atom) { if (atom instanceof StringLiteralElement) { if (grammar instanceof LexerGrammar) { genMatchUsingAtomText(atom); } else { genMatchUsingAtomTokenType(atom); } } else if (atom instanceof CharLiteralElement) { if (grammar instanceof LexerGrammar) { genMatchUsingAtomText(atom); } else { antlrTool.error("cannot ref character literals in grammar: " + atom); } } else if (atom instanceof TokenRefElement) { genMatchUsingAtomText(atom); } else if (atom instanceof WildcardElement) { gen((WildcardElement)atom); } } protected void genMatchUsingAtomText(GrammarAtom atom) { // match() for trees needs the _t cursor String astArgs = ""; if (grammar instanceof TreeWalkerGrammar) { astArgs = "_t,"; } // if in lexer and ! on element, save buffer index to kill later if (grammar instanceof LexerGrammar && (!saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("_saveIndex = self.text.length()"); } print(atom.not ? "self.matchNot(" : "self.match("); _print(astArgs); // print out what to match if (atom.atomText.equals("EOF")) { // horrible hack to handle EOF case _print("EOF_TYPE"); } else { _print(atom.atomText); } _println(")"); if (grammar instanceof LexerGrammar && (!saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) { println("self.text.setLength(_saveIndex)"); // kill text atom put in buffer } } protected void genMatchUsingAtomTokenType(GrammarAtom atom) { // match() for trees needs the _t cursor String astArgs = ""; if (grammar instanceof TreeWalkerGrammar) { astArgs = "_t,"; } // If the literal can be mangled, generate the symbolic constant instead String mangledName = null; String s = astArgs + getValueString(atom.getType(),true); // matching println((atom.not ? "self.matchNot(" : "self.match(") + s + ")"); } /** Generate the nextToken() rule. nextToken() is a synthetic * lexer rule that is the implicit OR of all user-defined * lexer rules. */ public void genNextToken() { // Are there any public rules? If not, then just generate a // fake nextToken(). boolean hasPublicRules = false; for (int i = 0; i < grammar.rules.size(); i++) { RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i); if (rs.isDefined() && rs.access.equals("public")) { hasPublicRules = true; break; } } if (!hasPublicRules) { println(""); println("def nextToken(self): "); tabs++; println("try:"); tabs++; println("self.uponEOF()"); tabs--; println("except antlr.CharStreamIOException, csioe:"); tabs++; println("raise antlr.TokenStreamIOException(csioe.io)"); tabs--; println("except antlr.CharStreamException, cse:"); tabs++; println("raise antlr.TokenStreamException(str(cse))"); tabs--; println("return antlr.CommonToken(type=EOF_TYPE, text=\"\")"); tabs--; return; } // Create the synthesized nextToken() rule RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken"); // Define the nextToken rule symbol RuleSymbol nextTokenRs = new RuleSymbol("mnextToken"); nextTokenRs.setDefined(); nextTokenRs.setBlock(nextTokenBlk); nextTokenRs.access = "private"; grammar.define(nextTokenRs); // Analyze the nextToken rule boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk); // Generate the next token rule String filterRule = null; if (((LexerGrammar)grammar).filterMode) { filterRule = ((LexerGrammar)grammar).filterRule; } println(""); println("def nextToken(self):"); tabs++; println("while True:"); tabs++; println("try: ### try again .."); tabs++; println("while True:"); tabs++; int _tabs_ = tabs; // while block println("_token = None"); println("_ttype = INVALID_TYPE"); if (((LexerGrammar)grammar).filterMode) { println("self.setCommitToPath(False)"); if (filterRule != null) { // Here's a good place to ensure that the filter rule actually exists if (!grammar.isDefined(CodeGenerator.encodeLexerRuleName(filterRule))) { grammar.antlrTool.error( "Filter rule " + filterRule + " does not exist in this lexer"); } else { RuleSymbol rs = (RuleSymbol)grammar.getSymbol( CodeGenerator.encodeLexerRuleName(filterRule)); if (!rs.isDefined()) { grammar.antlrTool.error( "Filter rule " + filterRule + " does not exist in this lexer"); } else if (rs.access.equals("public")) { grammar.antlrTool.error( "Filter rule " + filterRule + " must be protected"); } } println("_m = self.mark()"); } } println("self.resetText()"); println("try: ## for char stream error handling"); tabs++; _tabs_ = tabs; // inner try // Generate try around whole thing to trap scanner errors println("try: ##for lexical error handling"); tabs++; _tabs_ = tabs; // inner try // Test for public lexical rules with empty paths for (int i = 0; i < nextTokenBlk.getAlternatives().size(); i++) { Alternative a = nextTokenBlk.getAlternativeAt(i); if (a.cache[1].containsEpsilon()) { //String r = a.head.toString(); RuleRefElement rr = (RuleRefElement)a.head; String r = CodeGenerator.decodeLexerRuleName(rr.targetRule); antlrTool.warning("public lexical rule "+r+" is optional (can match \"nothing\")"); } } // Generate the block String newline = System.getProperty("line.separator"); /* generate the common block */ PythonBlockFinishingInfo howToFinish = genCommonBlock(nextTokenBlk, false); /* how to finish the block */ String errFinish = ""; // Is this a filter? if so we need to change the default handling. // In non filter mode we generate EOF token on EOF and stop, other- // wise an error gets generated. In filter mode we just continue // by consuming the unknown character till EOF is seen. if (((LexerGrammar)grammar).filterMode) { /* filter */ if (filterRule == null) { /* no specical filter rule has been given. */ errFinish += "self.filterdefault(self.LA(1))"; } else { errFinish += "self.filterdefault(self.LA(1), self.m" + filterRule + ", False)"; } } else { /* non filter */ /* if an IF has been generated (in the default clause), indendation ** is not correct. In that case we need to close on level++. **/ errFinish = "self.default(self.LA(1))" ; } /* finish the block */ genBlockFinish1(howToFinish, errFinish); // alt block has finished .., reset tabs! tabs = _tabs_; // at this point a valid token has been matched, undo "mark" that was done if (((LexerGrammar)grammar).filterMode && filterRule != null) { println("self.commit()"); } // Generate literals test if desired // make sure _ttype is set first; note _returnToken must be // non-null as the rule was required to create it. println("if not self._returnToken:"); tabs++; println("raise antlr.TryAgain ### found SKIP token"); tabs--; // There's one literal test (in Lexer) after the large switch // in 'nextToken'. if (((LexerGrammar)grammar).getTestLiterals()) { println("### option { testLiterals=true } "); //genLiteralsTest(); println("self.testForLiteral(self._returnToken)"); } // return token created by rule reference in switch println("### return token to caller"); println("return self._returnToken"); // Close try block tabs--; println("### handle lexical errors ...."); println("except antlr.RecognitionException, e:"); tabs++; if (((LexerGrammar)grammar).filterMode) { if (filterRule == null) { println("if not self.getCommitToPath():"); tabs++; println("self.consume()"); println("raise antlr.TryAgain()"); tabs--; } else { println("if not self.getCommitToPath(): "); tabs++; println("self.rewind(_m)"); println("self.resetText()"); println("try:"); tabs++; println("self.m" + filterRule + "(False)"); tabs--; println("except antlr.RecognitionException, ee:"); tabs++; println("### horrendous failure: error in filter rule"); println("self.reportError(ee)"); println("self.consume()"); tabs--; println("raise antlr.TryAgain()"); tabs--; } } if (nextTokenBlk.getDefaultErrorHandler()) { println("self.reportError(e)"); println("self.consume()"); } else { // pass on to invoking routine println("raise antlr.TokenStreamRecognitionException(e)"); } tabs--; //println(""); //println("### shall never be reached "); //println("assert 0"); // close CharStreamException try tabs--; println("### handle char stream errors ..."); println("except antlr.CharStreamException,cse:"); tabs++; println("if isinstance(cse, antlr.CharStreamIOException):"); tabs++; println("raise antlr.TokenStreamIOException(cse.io)"); tabs--; println("else:"); tabs++; println("raise antlr.TokenStreamException(str(cse))"); tabs--; tabs--; //println("### shall never be reached "); //println("assert 0"); // close for-loop tabs--; //println("### "); //println("### shall never be reached "); //println("assert 0"); tabs--; //println("### "); println("except antlr.TryAgain:"); tabs++; println("pass"); tabs--; // close method nextToken tabs--; //println("### "); //println("### shall never be reached"); //println("assert 0"); //println("### "); } /** Gen a named rule block. * ASTs are generated for each element of an alternative unless * the rule or the alternative have a '!' modifier. * * If an alternative defeats the default tree construction, it * must set _AST to the root of the returned AST. * * Each alternative that does automatic tree construction, builds * up root and child list pointers in an ASTPair structure. * * A rule finishes by setting the returnAST variable from the * ASTPair. * * @param rule The name of the rule to generate * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere) */ public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum) { tabs=1; if (!s.isDefined()) { antlrTool.error("undefined rule: " + s.getId()); return; } // Generate rule return type, name, arguments RuleBlock rblk = s.getBlock(); currentRule = rblk; currentASTResult = s.getId(); // clear list of declared ast variables.. declaredASTVariables.clear(); // Save the AST generation state, and set it to that of the rule boolean savegenAST = genAST; genAST = genAST && rblk.getAutoGen(); // boolean oldsaveTest = saveText; saveText = rblk.getAutoGen(); // print javadoc comment if any genJavadocComment(s); // Gen method name print("def " + s.getId() + "("); // Additional rule parameters common to all rules for this grammar _print(commonExtraParams); if (commonExtraParams.length() != 0 && rblk.argAction != null) { _print(","); } // Gen arguments if (rblk.argAction != null) { // Has specified arguments _println(""); tabs++; println(rblk.argAction); tabs--; print("):"); } else { // No specified arguments _print("):"); } println(""); tabs++; // Convert return action to variable declaration if (rblk.returnAction != null) { if (rblk.returnAction.indexOf('=') >= 0) println(rblk.returnAction); else { // mx println(extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " = None"); } } // print out definitions needed by rules for various grammar types println(commonLocalVars); if (grammar.traceRules) { if (grammar instanceof TreeWalkerGrammar) { println("self.traceIn(\"" + s.getId() + "\",_t)"); } else { println("self.traceIn(\"" + s.getId() + "\")"); } } if (grammar instanceof LexerGrammar) { // lexer rule default return value is the rule's token name // This is a horrible hack to support the built-in EOF lexer rule. if (s.getId().equals("mEOF")) println("_ttype = EOF_TYPE"); else println("_ttype = " + s.getId().substring(1)); println("_saveIndex = 0"); // used for element! (so we can kill text matched for element) } // if debugging, write code to mark entry to the rule if (grammar.debuggingOutput) if (grammar instanceof ParserGrammar) println("self.fireEnterRule(" + ruleNum + ", 0)"); else if (grammar instanceof LexerGrammar) println("self.fireEnterRule(" + ruleNum + ", _ttype)"); // Generate trace code if desired if (grammar.debuggingOutput || grammar.traceRules) { println("try: ### debugging"); tabs++; } // Initialize AST variables if (grammar instanceof TreeWalkerGrammar) { // "Input" value for rule println(s.getId() + "_AST_in = None"); println("if _t != antlr.ASTNULL:"); tabs++; println(s.getId() + "_AST_in = _t"); tabs--; } if (grammar.buildAST) { // Parser member used to pass AST returns from rule invocations println("self.returnAST = None"); println("currentAST = antlr.ASTPair()"); // User-settable return value for rule. println(s.getId() + "_AST = None"); } genBlockPreamble(rblk); genBlockInitAction(rblk); // Search for an unlabeled exception specification attached to the rule ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec(""); // Generate try block around the entire rule for error handling if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler()) { println("try: ## for error handling"); tabs++; } int _tabs_ = tabs; // Generate the alternatives if (rblk.alternatives.size() == 1) { // One alternative -- use simple form Alternative alt = rblk.getAlternativeAt(0); String pred = alt.semPred; if (pred != null) genSemPred(pred, currentRule.line); if (alt.synPred != null) { antlrTool.warning( "Syntactic predicate ignored for single alternative", grammar.getFilename(), alt.synPred.getLine(), alt.synPred.getColumn() ); } genAlt(alt, rblk); } else { // Multiple alternatives -- generate complex form boolean ok = grammar.theLLkAnalyzer.deterministic(rblk); PythonBlockFinishingInfo howToFinish = genCommonBlock(rblk, false); genBlockFinish(howToFinish, throwNoViable); } tabs = _tabs_; // Generate catch phrase for error handling if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler()) { // Close the try block tabs--; println(""); } // Generate user-defined or default catch phrases if (unlabeledUserSpec != null) { genErrorHandler(unlabeledUserSpec); } else if (rblk.getDefaultErrorHandler()) { // Generate default catch phrase println("except " + exceptionThrown + ", ex:"); tabs++; // Generate code to handle error if not guessing if (grammar.hasSyntacticPredicate) { println("if not self.inputState.guessing:"); tabs++; } println("self.reportError(ex)"); if (!(grammar instanceof TreeWalkerGrammar)) { // Generate code to consume until token in k==1 follow set Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode); String followSetName = getBitsetName(markBitsetForGen(follow.fset)); println("self.consume()"); println("self.consumeUntil(" + followSetName + ")"); } else { // Just consume one token println("if _t:"); tabs++; println("_t = _t.getNextSibling()"); tabs--; } if (grammar.hasSyntacticPredicate) { tabs--; // When guessing, rethrow exception println("else:"); tabs++; println("raise ex"); tabs--; } // Close catch phrase tabs--; println(""); } // Squirrel away the AST "return" value if (grammar.buildAST) { println("self.returnAST = " + s.getId() + "_AST"); } // Set return tree value for tree walkers if (grammar instanceof TreeWalkerGrammar) { println("self._retTree = _t"); } // Generate literals test for lexer rules so marked if (rblk.getTestLiterals()) { if (s.access.equals("protected")) { genLiteralsTestForPartialToken(); } else { genLiteralsTest(); } } // if doing a lexer rule, dump code to create token if necessary if (grammar instanceof LexerGrammar) { println("self.set_return_token(_createToken, _token, _ttype, _begin)"); } if(rblk.returnAction != null) { // if(grammar instanceof LexerGrammar) // { println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + ""); // } // else // { // println("return r"); // } } if (grammar.debuggingOutput || grammar.traceRules) { tabs--; println("finally: ### debugging"); tabs++; // If debugging, generate calls to mark exit of rule if (grammar.debuggingOutput) if (grammar instanceof ParserGrammar) println("self.fireExitRule(" + ruleNum + ", 0)"); else if (grammar instanceof LexerGrammar) println("self.fireExitRule(" + ruleNum + ", _ttype)"); if (grammar.traceRules) { if (grammar instanceof TreeWalkerGrammar) { println("self.traceOut(\"" + s.getId() + "\", _t)"); } else { println("self.traceOut(\"" + s.getId() + "\")"); } } tabs--; } tabs--; println(""); // Restore the AST generation state genAST = savegenAST; // restore char save state // saveText = oldsaveTest; } private void GenRuleInvocation(RuleRefElement rr) { // dump rule name _print("self." + rr.targetRule + "("); // lexers must tell rule if it should set _returnToken if (grammar instanceof LexerGrammar) { // if labeled, could access Token, so tell rule to create if (rr.getLabel() != null) { _print("True"); } else { _print("False"); } if (commonExtraArgs.length() != 0 || rr.args != null) { _print(", "); } } // Extra arguments common to all rules for this grammar _print(commonExtraArgs); if (commonExtraArgs.length() != 0 && rr.args != null) { _print(", "); } // Process arguments to method, if any RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule); if (rr.args != null) { // When not guessing, execute user arg action ActionTransInfo tInfo = new ActionTransInfo(); String args = processActionForSpecialSymbols(rr.args, 0, currentRule, tInfo); if (tInfo.assignToRoot || tInfo.refRuleRoot != null) { antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #" + currentRule.getRuleName(), grammar.getFilename(), rr.getLine(), rr.getColumn()); } _print(args); // Warn if the rule accepts no arguments if (rs.block.argAction == null) { antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments", grammar.getFilename(), rr.getLine(), rr.getColumn()); } } else { // For C++, no warning if rule has parameters, because there may be default // values for all of the parameters if (rs.block.argAction != null) { antlrTool.warning("Missing parameters on reference to rule " + rr.targetRule, grammar.getFilename(), rr.getLine(), rr.getColumn()); } } _println(")"); // move down to the first child while parsing if (grammar instanceof TreeWalkerGrammar) { println("_t = self._retTree"); } } protected void genSemPred(String pred, int line) { // translate $ and # references ActionTransInfo tInfo = new ActionTransInfo(); pred = processActionForSpecialSymbols(pred, line, currentRule, tInfo); // ignore translation info...we don't need to do anything with it. String escapedPred = charFormatter.escapeString(pred); // if debugging, wrap the semantic predicate evaluation in a method // that can tell SemanticPredicateListeners the result if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) pred = "fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.VALIDATING," + addSemPred(escapedPred) + ", " + pred + ")"; /* always .. */ println("if not " + pred + ":"); tabs++; println("raise SemanticException(\"" + escapedPred + "\")"); tabs--; } /** Write an array of Strings which are the semantic predicate * expressions. The debugger will reference them by number only */ protected void genSemPredMap() { Enumeration e = semPreds.elements(); println("_semPredNames = ["); tabs++; while (e.hasMoreElements()) { println("\"" + e.nextElement() + "\","); } tabs--; println("]"); } protected void genSynPred(SynPredBlock blk, String lookaheadExpr) { if (DEBUG_CODE_GENERATOR) System.out.println("gen=>(" + blk + ")"); // Dump synpred result variable println("synPredMatched" + blk.ID + " = False"); // Gen normal lookahead test println("if " + lookaheadExpr + ":"); tabs++; // Save input state if (grammar instanceof TreeWalkerGrammar) { println("_t" + blk.ID + " = _t"); } else { println("_m" + blk.ID + " = self.mark()"); } // Once inside the try, assume synpred works unless exception caught println("synPredMatched" + blk.ID + " = True"); println("self.inputState.guessing += 1"); // if debugging, tell listeners that a synpred has started if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) { println("self.fireSyntacticPredicateStarted()"); } syntacticPredLevel++; println("try:"); tabs++; gen((AlternativeBlock)blk); // gen code to test predicate tabs--; println("except " + exceptionThrown + ", pe:"); tabs++; println("synPredMatched" + blk.ID + " = False"); tabs--; // Restore input state if (grammar instanceof TreeWalkerGrammar) { println("_t = _t" + blk.ID + ""); } else { println("self.rewind(_m" + blk.ID + ")"); } println("self.inputState.guessing -= 1"); // if debugging, tell listeners how the synpred turned out if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))) { println("if synPredMatched" + blk.ID + ":"); tabs++; println("self.fireSyntacticPredicateSucceeded()"); tabs--; println("else:"); tabs++; println("self.fireSyntacticPredicateFailed()"); tabs--; } syntacticPredLevel--; tabs--; // Close lookahead test // Test synred result println("if synPredMatched" + blk.ID + ":"); } /** Generate a static array containing the names of the tokens, * indexed by the token type values. This static array is used * to format error messages so that the token identifers or literal * strings are displayed instead of the token numbers. * * If a lexical rule has a paraphrase, use it rather than the * token label. */ public void genTokenStrings() { // Generate a string for each token. This creates a static // array of Strings indexed by token type. int save_tabs = tabs; tabs = 0; println(""); println("_tokenNames = ["); tabs++; // Walk the token vocabulary and generate a Vector of strings // from the tokens. Vector v = grammar.tokenManager.getVocabulary(); for (int i = 0; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s == null) { s = "<" + String.valueOf(i) + ">"; } if (!s.startsWith("\"") && !s.startsWith("<")) { TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s); if (ts != null && ts.getParaphrase() != null) { s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\""); } } print(charFormatter.literalString(s)); if (i != v.size() - 1) { _print(", "); } _println(""); } // Close the string array initailizer tabs--; println("]"); tabs = save_tabs; } /** Create and set Integer token type objects that map * to Java Class objects (which AST node to create). */ protected void genTokenASTNodeMap() { println(""); println("def buildTokenTypeASTClassMap(self):"); // Generate a map.put("T","TNode") for each token // if heterogeneous node known for that token T. tabs++; boolean generatedNewHashtable = false; int n = 0; // Walk the token vocabulary and generate puts. Vector v = grammar.tokenManager.getVocabulary(); for (int i = 0; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s != null) { TokenSymbol ts = grammar.tokenManager.getTokenSymbol(s); if (ts != null && ts.getASTNodeType() != null) { n++; if ( !generatedNewHashtable ) { // only generate if we are going to add a mapping println("self.tokenTypeToASTClassMap = {}"); generatedNewHashtable = true; } println( "self.tokenTypeToASTClassMap[" + ts.getTokenType() + "] = " + ts.getASTNodeType() ); } } } if ( n==0 ) { println("self.tokenTypeToASTClassMap = None"); } tabs--; } /** Generate the token types Java file */ protected void genTokenTypes(TokenManager tm) throws IOException { // Open the token output Python file and set the currentOutput // stream // SAS: file open was moved to a method so a subclass can override // This was mainly for the VAJ interface // setupOutput(tm.getName() + TokenTypesFileSuffix); tabs = 0; // Generate the header common to all Python files // genHeader(); // Do not use printAction because we assume tabs==0 // println(behavior.getHeaderAction("")); // Generate a definition for each token type Vector v = tm.getVocabulary(); // Do special tokens manually println("SKIP = antlr.SKIP"); println("INVALID_TYPE = antlr.INVALID_TYPE"); println("EOF_TYPE = antlr.EOF_TYPE"); println("EOF = antlr.EOF"); println("NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD"); println("MIN_USER_TYPE = antlr.MIN_USER_TYPE"); for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) { String s = (String)v.elementAt(i); if (s != null) { if (s.startsWith("\"")) { // a string literal StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s); if (sl == null) antlrTool.panic("String literal " + s + " not in symbol table"); if (sl.label != null) { println(sl.label + " = " + i); } else { String mangledName = mangleLiteral(s); if (mangledName != null) { // We were able to create a meaningful mangled token name println(mangledName + " = " + i); // if no label specified, make the label equal to the mangled name sl.label = mangledName; } else { println("### " + s + " = " + i); } } } else if (!s.startsWith("<")) { println(s + " = " + i); } } } // Close the interface tabs--; exitIfError(); } /** Get a string for an expression to generate creation of an AST subtree. * @param v A Vector of String, where each element is an expression in the target language yielding an AST node. */ public String getASTCreateString(Vector v) { if (v.size() == 0) { return ""; } StringBuffer buf = new StringBuffer(); buf.append("antlr.make("); for (int i = 0; i < v.size(); i++) { buf.append(v.elementAt(i)); if(i+10 ) { tokenName = astCtorArgs.substring(0,firstComma); } TokenSymbol ts = grammar.tokenManager.getTokenSymbol(tokenName); if ( ts!=null ) { String astNodeType = ts.getASTNodeType(); String emptyText = ""; if ( nCommas==0 ) { // need to add 2nd arg of blank text for token text emptyText = ", \"\""; } if ( astNodeType!=null ) { return "self.astFactory.create(" + astCtorArgs + emptyText + ", " + astNodeType + ")"; } // fall through and just do a regular create with cast on front // if necessary (it differs from default "AST"). } if ( labeledElementASTType.equals("AST") ) { return "self.astFactory.create("+astCtorArgs+")"; } return "self.astFactory.create("+astCtorArgs+")"; } // create default type or (since 2.7.2) 3rd arg is classname return "self.astFactory.create(" + astCtorArgs + ")"; } protected String getLookaheadTestExpression(Lookahead[] look, int k) { StringBuffer e = new StringBuffer(100); boolean first = true; e.append("("); for (int i = 1; i <= k; i++) { BitSet p = look[i].fset; if (!first) { e.append(") and ("); } first = false; // Syn preds can yield (epsilon) lookahead. // There is no way to predict what that token would be. Just // allow anything instead. if (look[i].containsEpsilon()) { e.append("True"); } else { e.append(getLookaheadTestTerm(i, p)); } } e.append(")"); String s = e.toString(); return s; } /**Generate a lookahead test expression for an alternate. This * will be a series of tests joined by '&&' and enclosed by '()', * the number of such tests being determined by the depth of the lookahead. */ protected String getLookaheadTestExpression(Alternative alt, int maxDepth) { int depth = alt.lookaheadDepth; if (depth == GrammarAnalyzer.NONDETERMINISTIC) { // if the decision is nondeterministic, do the best we can: LL(k) // any predicates that are around will be generated later. depth = grammar.maxk; } if (maxDepth == 0) { // empty lookahead can result from alt with sem pred // that can see end of token. E.g., A : {pred}? ('a')? ; return "True"; } return getLookaheadTestExpression(alt.cache, depth); } /**Generate a depth==1 lookahead test expression given the BitSet. * This may be one of: * 1) a series of 'x==X||' tests * 2) a range test using >= && <= where possible, * 3) a bitset membership test for complex comparisons * @param k The lookahead level * @param p The lookahead set for level k */ protected String getLookaheadTestTerm(int k, BitSet p) { // Determine the name of the item to be compared String ts = lookaheadString(k); // Generate a range expression if possible int[] elems = p.toArray(); if (elementsAreRange(elems)) { String s = getRangeExpression(k, elems); return s; } // Generate a bitset membership test if possible StringBuffer e; int degree = p.degree(); if (degree == 0) { return "True"; } if (degree >= bitsetTestThreshold) { int bitsetIdx = markBitsetForGen(p); return getBitsetName(bitsetIdx) + ".member(" + ts + ")"; } // Otherwise, generate the long-winded series of "x==X||" tests e = new StringBuffer(); for (int i = 0; i < elems.length; i++) { // Get the compared-to item (token or character value) String cs = getValueString(elems[i],true); // Generate the element comparison if (i > 0) e.append(" or "); e.append(ts); e.append("=="); e.append(cs); } String x = e.toString(); return e.toString(); } /** Return an expression for testing a contiguous renage of elements * @param k The lookahead level * @param elems The elements representing the set, usually from BitSet.toArray(). * @return String containing test expression. */ public String getRangeExpression(int k, int[] elems) { if (!elementsAreRange(elems)) { antlrTool.panic("getRangeExpression called with non-range"); } int begin = elems[0]; int end = elems[elems.length - 1]; return "(" + lookaheadString(k) + " >= " + getValueString(begin,true) + " and " + lookaheadString(k) + " <= " + getValueString(end,true) + ")"; } /** getValueString: get a string representation of a token or char value * @param value The token or char value */ private String getValueString(int value,boolean wrap) { String cs; if (grammar instanceof LexerGrammar) { cs = charFormatter.literalChar(value); if(wrap) cs = "u'" + cs + "'"; return cs; } // Parser or TreeParser => tokens .. TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt( value); if (ts == null) { cs = "" + value; // return token type as string return cs; } String tId = ts.getId(); if (!(ts instanceof StringLiteralSymbol)) { cs = tId; return cs; } // if string literal, use predefined label if any // if no predefined, try to mangle into LITERAL_xxx. // if can't mangle, use int value as last resort StringLiteralSymbol sl = (StringLiteralSymbol)ts; String label = sl.getLabel(); if (label != null) { cs = label; } else { cs = mangleLiteral(tId); if (cs == null) { cs = String.valueOf(value); } } return cs; } /**Is the lookahead for this alt empty? */ protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) { int depth = alt.lookaheadDepth; if (depth == GrammarAnalyzer.NONDETERMINISTIC) { depth = grammar.maxk; } for (int i = 1; i <= depth && i <= maxDepth; i++) { BitSet p = alt.cache[i].fset; if (p.degree() != 0) { return false; } } return true; } private String lookaheadString(int k) { if (grammar instanceof TreeWalkerGrammar) { return "_t.getType()"; } return "self.LA(" + k + ")"; } /** Mangle a string literal into a meaningful token name. This is * only possible for literals that are all characters. The resulting * mangled literal name is literalsPrefix with the text of the literal * appended. * @return A string representing the mangled literal, or null if not possible. */ private String mangleLiteral(String s) { String mangled = antlrTool.literalsPrefix; for (int i = 1; i < s.length() - 1; i++) { if (!Character.isLetter(s.charAt(i)) && s.charAt(i) != '_') { return null; } mangled += s.charAt(i); } if (antlrTool.upperCaseMangledLiterals) { mangled = mangled.toUpperCase(); } return mangled; } /** Map an identifier to it's corresponding tree-node variable. * This is context-sensitive, depending on the rule and alternative * being generated * @param idParam The identifier name to map * @return The mapped id (which may be the same as the input), or null if * the mapping is invalid due to duplicates */ public String mapTreeId(String idParam, ActionTransInfo transInfo) { // if not in an action of a rule, nothing to map. if (currentRule == null) return idParam; boolean in_var = false; String id = idParam; if (grammar instanceof TreeWalkerGrammar) { if (!grammar.buildAST) { in_var = true; } // If the id ends with "_in", then map it to the input variable else if (id.length() > 3 && id.lastIndexOf("_in") == id.length() - 3) { // Strip off the "_in" id = id.substring(0, id.length() - 3); in_var = true; } } // Check the rule labels. If id is a label, then the output // variable is label_AST, and the input variable is plain label. for (int i = 0; i < currentRule.labeledElements.size(); i++) { AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i); if (elt.getLabel().equals(id)) { return in_var ? id : id + "_AST"; } } // Failing that, check the id-to-variable map for the alternative. // If the id is in the map, then output variable is the name in the // map, and input variable is name_in String s = (String)treeVariableMap.get(id); if (s != null) { if (s == NONUNIQUE) { // There is more than one element with this id antlrTool.error("Ambiguous reference to AST element "+id+ " in rule "+currentRule.getRuleName()); return null; } else if (s.equals(currentRule.getRuleName())) { // a recursive call to the enclosing rule is // ambiguous with the rule itself. antlrTool.error("Ambiguous reference to AST element "+id+ " in rule "+currentRule.getRuleName()); return null; } else { return in_var ? s + "_in" : s; } } // Failing that, check the rule name itself. Output variable // is rule_AST; input variable is rule_AST_in (treeparsers). if (id.equals(currentRule.getRuleName())) { String r = in_var ? id + "_AST_in" : id + "_AST"; if (transInfo != null) { if (!in_var) { transInfo.refRuleRoot = r; } } return r; } else { // id does not map to anything -- return itself. return id; } } /** Given an element and the name of an associated AST variable, * create a mapping between the element "name" and the variable name. */ private void mapTreeVariable(AlternativeElement e, String name) { // For tree elements, defer to the root if (e instanceof TreeElement) { mapTreeVariable(((TreeElement)e).root, name); return; } // Determine the name of the element, if any, for mapping purposes String elName = null; // Don't map labeled items if (e.getLabel() == null) { if (e instanceof TokenRefElement) { // use the token id elName = ((TokenRefElement)e).atomText; } else if (e instanceof RuleRefElement) { // use the rule name elName = ((RuleRefElement)e).targetRule; } } // Add the element to the tree variable map if it has a name if (elName != null) { if (treeVariableMap.get(elName) != null) { // Name is already in the map -- mark it as duplicate treeVariableMap.remove(elName); treeVariableMap.put(elName, NONUNIQUE); } else { treeVariableMap.put(elName, name); } } } /** Lexically process $var and tree-specifiers in the action. * This will replace #id and #(...) with the appropriate * function calls and/or variables etc... */ protected String processActionForSpecialSymbols(String actionStr, int line, RuleBlock currentRule, ActionTransInfo tInfo) { if (actionStr == null || actionStr.length() == 0) return null; if(isEmpty(actionStr)) return ""; // The action trans info tells us (at the moment) whether an // assignment was done to the rule's tree root. if (grammar == null) { // to be processd by PyCodeFmt?? return actionStr; } // Create a lexer to read an action and return the translated version antlr.actions.python.ActionLexer lexer = new antlr.actions.python.ActionLexer( actionStr, currentRule, this, tInfo); lexer.setLineOffset(line); lexer.setFilename(grammar.getFilename()); lexer.setTool(antlrTool); try { lexer.mACTION(true); actionStr = lexer.getTokenObject().getText(); } catch (RecognitionException ex) { lexer.reportError(ex); } catch (TokenStreamException tex) { antlrTool.panic("Error reading action:" + actionStr); } catch (CharStreamException io) { antlrTool.panic("Error reading action:" + actionStr); } return actionStr; } static boolean isEmpty(String s) { char c; boolean ws = true; /* figure out whether there's something to be done */ for(int i=0;ws && iAST")); } labeledElementInit = "None"; commonExtraArgs = "_t"; commonExtraParams = "self, _t"; commonLocalVars = ""; lt1Value = "_t"; exceptionThrown = "antlr.RecognitionException"; throwNoViable = "raise antlr.NoViableAltException(_t)"; treeWalkerClassName = "Walker"; if (g.hasOption("className")) { Token tcname = g.getOption("className"); if (tcname != null) { String cname = StringUtils.stripFrontBack(tcname.getText(), "\"", "\""); if (cname != null) { treeWalkerClassName = cname; } } } return; } /* serious error */ antlrTool.panic("Unknown grammar type"); } /** This method exists so a subclass, namely VAJCodeGenerator, * can open the file in its own evil way. JavaCodeGenerator * simply opens a text file... */ public void setupOutput(String className) throws IOException { currentOutput = antlrTool.openOutputFile(className + ".py"); } protected boolean isspace(char c) { boolean r = true; switch (c) { case '\n' : case '\r' : case ' ' : case '\t' : break; default: r = false; break; } return r; } protected void _printAction(String s) { if (s == null) { return; } char c; int offset; // shall be first no ws character in 's'. We are // going to remove at most this number of ws chars after // each newline. This will keep the block formatted as it is. // When going to figure out the offset, we need to rese the // counter after each newline has been seen. // Skip leading newlines, tabs and spaces int start = 0; int end = s.length(); boolean ws; offset = 0; ws = true; while (start < end && ws) { c = s.charAt(start++); switch (c) { case '\n' : offset = start; break; case '\r': if( (start)<=end && s.charAt(start) == '\n') start++; offset = start; break; case ' ' : break; case '\t': default: ws = false; break; } } if(ws == false) { start--; } offset = start - offset; // Skip leading newlines, tabs and spaces end = end - 1; while ((end > start) && isspace(s.charAt(end))) { end--; } boolean newline = false; int absorbed; for (int i = start; i <= end; ++i) { c = s.charAt(i); switch (c) { case '\n': newline = true; break; case '\r': newline = true; if ((i+1) <= end && s.charAt(i+1) == '\n') { i++; } break; case '\t': System.err.println("warning: tab characters used in Python action"); currentOutput.print(" "); break; case ' ': currentOutput.print(" "); break; default: currentOutput.print(c); break; } if (newline) { currentOutput.print("\n"); printTabs(); absorbed = 0; newline = false; // Absorb leading whitespace for(i=i+1;i<=end;++i) { c = s.charAt(i); if (!isspace(c)) { i--; break; } switch(c) { case '\n' : newline = true; break; case '\r': if ((i+1) <= end && s.charAt(i+1) == '\n') { i++; } newline = true; break; } if(newline) { currentOutput.print("\n"); printTabs(); absorbed = 0; newline = false; continue; } if(absorbed>>"); printAction( processActionForSpecialSymbols( grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null) ); println("### user action <<<"); } protected void _printJavadoc(String s) { char c; int end = s.length(); int start = 0; boolean newline = false; currentOutput.print("\n"); printTabs(); currentOutput.print("###"); for (int i = start; i < end; ++i) { c = s.charAt(i); switch (c) { case '\n': newline = true; break; case '\r': newline = true; if ((i+1) <= end && s.charAt(i+1) == '\n') { i++; } break; case '\t': currentOutput.print("\t"); break; case ' ': currentOutput.print(" "); break; default: currentOutput.print(c); break; } if (newline) { currentOutput.print("\n"); printTabs(); currentOutput.print("###"); newline = false; } } currentOutput.println(); } protected void genJavadocComment(Grammar g) { // print javadoc comment if any if (g.comment != null) { _printJavadoc(g.comment); } } protected void genJavadocComment(RuleSymbol g) { // print javadoc comment if any if (g.comment != null) { _printJavadoc(g.comment); } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/RecognitionException.java000066400000000000000000000033161161462365500261230ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/RecognitionException.java#1 $ */ public class RecognitionException extends ANTLRException { public String fileName; // not used by treeparsers public int line; public int column; public RecognitionException() { super("parsing error"); fileName = null; line = -1; column = -1; } /** * RecognitionException constructor comment. * @param s java.lang.String */ public RecognitionException(String s) { super(s); fileName = null; line = -1; column = -1; } /** @deprecated As of ANTLR 2.7.2 use {@see #RecognitionException(char, String, int, int) } */ public RecognitionException(String s, String fileName_, int line_) { this(s, fileName_, line_, -1); } /** * RecognitionException constructor comment. * @param s java.lang.String */ public RecognitionException(String s, String fileName_, int line_, int column_) { super(s); fileName = fileName_; line = line_; column = column_; } public String getFilename() { return fileName; } public int getLine() { return line; } public int getColumn() { return column; } /** @deprecated As of ANTLR 2.7.0 */ public String getErrorMessage() { return getMessage(); } public String toString() { return FileLineFormatter.getFormatter(). getFormatString(fileName, line, column) + getMessage(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/RuleBlock.java000066400000000000000000000162561161462365500236550ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/RuleBlock.java#1 $ */ import antlr.collections.impl.Vector; import java.util.Hashtable; /**A list of alternatives and info contained in * the rule definition. */ public class RuleBlock extends AlternativeBlock { protected String ruleName; protected String argAction = null; // string for rule arguments [...] protected String throwsSpec = null; protected String returnAction = null;// string for rule return type(s) <...> protected RuleEndElement endNode; // which node ends this rule? // Generate literal-testing code for lexer rule? protected boolean testLiterals = false; Vector labeledElements; // List of labeled elements found in this rule // This is a list of AlternativeElement (or subclass) protected boolean[] lock; // for analysis; used to avoid infinite loops // 1..k protected Lookahead cache[];// Each rule can cache it's lookahead computation. // This cache contains an epsilon // imaginary token if the FOLLOW is required. No // FOLLOW information is cached here. // The FIRST(rule) is stored in this cache; 1..k // This set includes FIRST of all alts. Hashtable exceptionSpecs; // table of String-to-ExceptionSpec. // grammar-settable options protected boolean defaultErrorHandler = true; protected String ignoreRule = null; /** Construct a named rule. */ public RuleBlock(Grammar g, String r) { super(g); ruleName = r; labeledElements = new Vector(); cache = new Lookahead[g.maxk + 1]; exceptionSpecs = new Hashtable(); setAutoGen(g instanceof ParserGrammar); } /** Construct a named rule with line number information */ public RuleBlock(Grammar g, String r, int line, boolean doAutoGen_) { this(g, r); this.line = line; setAutoGen(doAutoGen_); } public void addExceptionSpec(ExceptionSpec ex) { if (findExceptionSpec(ex.label) != null) { if (ex.label != null) { grammar.antlrTool.error("Rule '" + ruleName + "' already has an exception handler for label: " + ex.label); } else { grammar.antlrTool.error("Rule '" + ruleName + "' already has an exception handler"); } } else { exceptionSpecs.put((ex.label == null ? "" : ex.label.getText()), ex); } } public ExceptionSpec findExceptionSpec(Token label) { return (ExceptionSpec)exceptionSpecs.get(label == null ? "" : label.getText()); } public ExceptionSpec findExceptionSpec(String label) { return (ExceptionSpec)exceptionSpecs.get(label == null ? "" : label); } public void generate() { grammar.generator.gen(this); } public boolean getDefaultErrorHandler() { return defaultErrorHandler; } public RuleEndElement getEndElement() { return endNode; } public String getIgnoreRule() { return ignoreRule; } public String getRuleName() { return ruleName; } public boolean getTestLiterals() { return testLiterals; } public boolean isLexerAutoGenRule() { return ruleName.equals("nextToken"); } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public void prepareForAnalysis() { super.prepareForAnalysis(); lock = new boolean[grammar.maxk + 1]; } // rule option values public void setDefaultErrorHandler(boolean value) { defaultErrorHandler = value; } public void setEndElement(RuleEndElement re) { endNode = re; } public void setOption(Token key, Token value) { if (key.getText().equals("defaultErrorHandler")) { if (value.getText().equals("true")) { defaultErrorHandler = true; } else if (value.getText().equals("false")) { defaultErrorHandler = false; } else { grammar.antlrTool.error("Value for defaultErrorHandler must be true or false", grammar.getFilename(), key.getLine(), key.getColumn()); } } else if (key.getText().equals("testLiterals")) { if (!(grammar instanceof LexerGrammar)) { grammar.antlrTool.error("testLiterals option only valid for lexer rules", grammar.getFilename(), key.getLine(), key.getColumn()); } else { if (value.getText().equals("true")) { testLiterals = true; } else if (value.getText().equals("false")) { testLiterals = false; } else { grammar.antlrTool.error("Value for testLiterals must be true or false", grammar.getFilename(), key.getLine(), key.getColumn()); } } } else if (key.getText().equals("ignore")) { if (!(grammar instanceof LexerGrammar)) { grammar.antlrTool.error("ignore option only valid for lexer rules", grammar.getFilename(), key.getLine(), key.getColumn()); } else { ignoreRule = value.getText(); } } else if (key.getText().equals("paraphrase")) { if (!(grammar instanceof LexerGrammar)) { grammar.antlrTool.error("paraphrase option only valid for lexer rules", grammar.getFilename(), key.getLine(), key.getColumn()); } else { // find token def associated with this rule TokenSymbol ts = grammar.tokenManager.getTokenSymbol(ruleName); if (ts == null) { grammar.antlrTool.panic("cannot find token associated with rule " + ruleName); } ts.setParaphrase(value.getText()); } } else if (key.getText().equals("generateAmbigWarnings")) { if (value.getText().equals("true")) { generateAmbigWarnings = true; } else if (value.getText().equals("false")) { generateAmbigWarnings = false; } else { grammar.antlrTool.error("Value for generateAmbigWarnings must be true or false", grammar.getFilename(), key.getLine(), key.getColumn()); } } else { grammar.antlrTool.error("Invalid rule option: " + key.getText(), grammar.getFilename(), key.getLine(), key.getColumn()); } } public String toString() { String s = " FOLLOW={"; Lookahead cache[] = endNode.cache; int k = grammar.maxk; boolean allNull = true; for (int j = 1; j <= k; j++) { if (cache[j] == null) continue; s += cache[j].toString(",", grammar.tokenManager.getVocabulary()); allNull = false; if (j < k && cache[j + 1] != null) s += ";"; } s += "}"; if (allNull) s = ""; return ruleName + ": " + super.toString() + " ;" + s; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/RuleEndElement.java000066400000000000000000000015461161462365500246370ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/RuleEndElement.java#1 $ */ /**Contains a list of all places that reference * this enclosing rule. Useful for FOLLOW computations. */ class RuleEndElement extends BlockEndElement { protected Lookahead[] cache; // Each rule can cache it's lookahead computation. // The FOLLOW(rule) is stored in this cache. // 1..k protected boolean noFOLLOW; public RuleEndElement(Grammar g) { super(g); cache = new Lookahead[g.maxk + 1]; } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public String toString() { //return " [RuleEnd]"; return ""; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/RuleRefElement.java000066400000000000000000000035751161462365500246510ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/RuleRefElement.java#1 $ */ class RuleRefElement extends AlternativeElement { protected String targetRule; // which rule is being called? protected String args = null; // were any args passed to rule? protected String idAssign = null; // is the return type assigned to a variable? protected String label; public RuleRefElement(Grammar g, Token t, int autoGenType_) { super(g, t, autoGenType_); targetRule = t.getText(); // if ( Character.isUpperCase(targetRule.charAt(0)) ) { // lexer rule? if (t.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule? targetRule = CodeGenerator.encodeLexerRuleName(targetRule); } } // public RuleRefElement(Grammar g, String t, int line, int autoGenType_) { // super(g, autoGenType_); // targetRule = t; // if ( Character.isUpperCase(targetRule.charAt(0)) ) { // lexer rule? // targetRule = CodeGenerator.lexerRuleName(targetRule); // } // this.line = line; // } public void generate() { grammar.generator.gen(this); } public String getArgs() { return args; } public String getIdAssign() { return idAssign; } public String getLabel() { return label; } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public void setArgs(String a) { args = a; } public void setIdAssign(String id) { idAssign = id; } public void setLabel(String label_) { label = label_; } public String toString() { if (args != null) return " " + targetRule + args; else return " " + targetRule; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/RuleSymbol.java000066400000000000000000000024501161462365500240570ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/RuleSymbol.java#1 $ */ import antlr.collections.impl.Vector; class RuleSymbol extends GrammarSymbol { RuleBlock block; // list of alternatives boolean defined; // has the rule been defined yet? Vector references; // list of all nodes referencing this rule // not strictly needed by generic symbol table // but we will almost always analyze/gen code String access; // access specifier for this rule String comment; // A javadoc comment if any. public RuleSymbol(String r) { super(r); references = new Vector(); } public void addReference(RuleRefElement e) { references.appendElement(e); } public RuleBlock getBlock() { return block; } public RuleRefElement getReference(int i) { return (RuleRefElement)references.elementAt(i); } public boolean isDefined() { return defined; } public int numReferences() { return references.size(); } public void setBlock(RuleBlock rb) { block = rb; } public void setDefined() { defined = true; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/SemanticException.java000066400000000000000000000013031161462365500254000ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/SemanticException.java#1 $ */ public class SemanticException extends RecognitionException { public SemanticException(String s) { super(s); } /** @deprecated As of ANTLR 2.7.2 use {@see #SemanticException(char, String, int, int) } */ public SemanticException(String s, String fileName, int line) { this(s, fileName, line, -1); } public SemanticException(String s, String fileName, int line, int column) { super(s, fileName, line, column); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/SimpleTokenManager.java000066400000000000000000000100531161462365500255050ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/SimpleTokenManager.java#1 $ */ import java.io.*; import java.util.Hashtable; import java.util.Enumeration; import antlr.collections.impl.Vector; class SimpleTokenManager implements TokenManager, Cloneable { protected int maxToken = Token.MIN_USER_TYPE; // Token vocabulary is Vector of String's protected Vector vocabulary; // Hash table is a mapping from Strings to TokenSymbol private Hashtable table; // the ANTLR tool protected Tool antlrTool; // Name of the token manager protected String name; protected boolean readOnly = false; SimpleTokenManager(String name_, Tool tool_) { antlrTool = tool_; name = name_; // Don't make a bigger vector than we need, because it will show up in output sets. vocabulary = new Vector(1); table = new Hashtable(); // define EOF symbol TokenSymbol ts = new TokenSymbol("EOF"); ts.setTokenType(Token.EOF_TYPE); define(ts); // define but only in the vocabulary vector vocabulary.ensureCapacity(Token.NULL_TREE_LOOKAHEAD); vocabulary.setElementAt("NULL_TREE_LOOKAHEAD", Token.NULL_TREE_LOOKAHEAD); } public Object clone() { SimpleTokenManager tm; try { tm = (SimpleTokenManager)super.clone(); tm.vocabulary = (Vector)this.vocabulary.clone(); tm.table = (Hashtable)this.table.clone(); tm.maxToken = this.maxToken; tm.antlrTool = this.antlrTool; tm.name = this.name; } catch (CloneNotSupportedException e) { antlrTool.panic("cannot clone token manager"); return null; } return tm; } /** define a token */ public void define(TokenSymbol ts) { // Add the symbol to the vocabulary vector vocabulary.ensureCapacity(ts.getTokenType()); vocabulary.setElementAt(ts.getId(), ts.getTokenType()); // add the symbol to the hash table mapToTokenSymbol(ts.getId(), ts); } /** Simple token manager doesn't have a name -- must be set externally */ public String getName() { return name; } /** Get a token symbol by index */ public String getTokenStringAt(int idx) { return (String)vocabulary.elementAt(idx); } /** Get the TokenSymbol for a string */ public TokenSymbol getTokenSymbol(String sym) { return (TokenSymbol)table.get(sym); } /** Get a token symbol by index */ public TokenSymbol getTokenSymbolAt(int idx) { return getTokenSymbol(getTokenStringAt(idx)); } /** Get an enumerator over the symbol table */ public Enumeration getTokenSymbolElements() { return table.elements(); } public Enumeration getTokenSymbolKeys() { return table.keys(); } /** Get the token vocabulary (read-only). * @return A Vector of TokenSymbol */ public Vector getVocabulary() { return vocabulary; } /** Simple token manager is not read-only */ public boolean isReadOnly() { return false; } /** Map a label or string to an existing token symbol */ public void mapToTokenSymbol(String name, TokenSymbol sym) { // System.out.println("mapToTokenSymbol("+name+","+sym+")"); table.put(name, sym); } /** Get the highest token type in use */ public int maxTokenType() { return maxToken - 1; } /** Get the next unused token type */ public int nextTokenType() { return maxToken++; } /** Set the name of the token manager */ public void setName(String name_) { name = name_; } public void setReadOnly(boolean ro) { readOnly = ro; } /** Is a token symbol defined? */ public boolean tokenDefined(String symbol) { return table.containsKey(symbol); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/StringLiteralElement.java000066400000000000000000000041761161462365500260660ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/StringLiteralElement.java#1 $ */ class StringLiteralElement extends GrammarAtom { // atomText with quotes stripped and escape codes processed protected String processedAtomText; public StringLiteralElement(Grammar g, Token t, int autoGenType) { super(g, t, autoGenType); if (!(g instanceof LexerGrammar)) { // lexer does not have token types for string literals TokenSymbol ts = grammar.tokenManager.getTokenSymbol(atomText); if (ts == null) { g.antlrTool.error("Undefined literal: " + atomText, grammar.getFilename(), t.getLine(), t.getColumn()); } else { tokenType = ts.getTokenType(); } } line = t.getLine(); // process the string literal text by removing quotes and escaping chars // If a lexical grammar, add the characters to the char vocabulary processedAtomText = new String(); for (int i = 1; i < atomText.length() - 1; i++) { char c = atomText.charAt(i); if (c == '\\') { if (i + 1 < atomText.length() - 1) { i++; c = atomText.charAt(i); switch (c) { case 'n': c = '\n'; break; case 'r': c = '\r'; break; case 't': c = '\t'; break; } } } if (g instanceof LexerGrammar) { ((LexerGrammar)g).charVocabulary.add(c); } processedAtomText += c; } } public void generate() { grammar.generator.gen(this); } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/StringLiteralSymbol.java000066400000000000000000000010701161462365500257300ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/StringLiteralSymbol.java#1 $ */ class StringLiteralSymbol extends TokenSymbol { protected String label; // was this string literal labeled? public StringLiteralSymbol(String r) { super(r); } public String getLabel() { return label; } public void setLabel(String label) { this.label = label; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/StringUtils.java000077500000000000000000000053731161462365500242630ustar00rootroot00000000000000package antlr; public class StringUtils { /** General-purpose utility function for removing * characters from back of string * @param s The string to process * @param c The character to remove * @return The resulting string */ static public String stripBack(String s, char c) { while (s.length() > 0 && s.charAt(s.length() - 1) == c) { s = s.substring(0, s.length() - 1); } return s; } /** General-purpose utility function for removing * characters from back of string * @param s The string to process * @param remove A string containing the set of characters to remove * @return The resulting string */ static public String stripBack(String s, String remove) { boolean changed; do { changed = false; for (int i = 0; i < remove.length(); i++) { char c = remove.charAt(i); while (s.length() > 0 && s.charAt(s.length() - 1) == c) { changed = true; s = s.substring(0, s.length() - 1); } } } while (changed); return s; } /** General-purpose utility function for removing * characters from front of string * @param s The string to process * @param c The character to remove * @return The resulting string */ static public String stripFront(String s, char c) { while (s.length() > 0 && s.charAt(0) == c) { s = s.substring(1); } return s; } /** General-purpose utility function for removing * characters from front of string * @param s The string to process * @param remove A string containing the set of characters to remove * @return The resulting string */ static public String stripFront(String s, String remove) { boolean changed; do { changed = false; for (int i = 0; i < remove.length(); i++) { char c = remove.charAt(i); while (s.length() > 0 && s.charAt(0) == c) { changed = true; s = s.substring(1); } } } while (changed); return s; } /** General-purpose utility function for removing * characters from the front and back of string * @param s The string to process * @param head exact string to strip from head * @param tail exact string to strip from tail * @return The resulting string */ public static String stripFrontBack(String src, String head, String tail) { int h = src.indexOf(head); int t = src.lastIndexOf(tail); if (h == -1 || t == -1) return src; return src.substring(h + 1, t); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/SynPredBlock.java000066400000000000000000000012571161462365500243250ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/SynPredBlock.java#1 $ */ class SynPredBlock extends AlternativeBlock { public SynPredBlock(Grammar g) { super(g); } public SynPredBlock(Grammar g, Token start) { super(g, start, false); } public void generate() { grammar.generator.gen(this); } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public String toString() { return super.toString() + "=>"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/Token.java000066400000000000000000000030631161462365500230430ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/Token.java#1 $ */ /** A token is minimally a token type. Subclasses can add the text matched * for the token and line info. */ public class Token implements Cloneable { // constants public static final int MIN_USER_TYPE = 4; public static final int NULL_TREE_LOOKAHEAD = 3; public static final int INVALID_TYPE = 0; public static final int EOF_TYPE = 1; public static final int SKIP = -1; // each Token has at least a token type protected int type = INVALID_TYPE; // the illegal token object public static Token badToken = new Token(INVALID_TYPE, ""); public Token() { } public Token(int t) { type = t; } public Token(int t, String txt) { type = t; setText(txt); } public int getColumn() { return 0; } public int getLine() { return 0; } public String getFilename() { return null; } public void setFilename(String name) { } public String getText() { return ""; } public void setText(String t) { } public void setColumn(int c) { } public void setLine(int l) { } public int getType() { return type; } public void setType(int t) { type = t; } public String toString() { return "[\"" + getText() + "\",<" + getType() + ">]"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenBuffer.java000066400000000000000000000072201161462365500241740ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenBuffer.java#1 $ */ /**A Stream of Token objects fed to the parser from a Tokenizer that can * be rewound via mark()/rewind() methods. *

* A dynamic array is used to buffer up all the input tokens. Normally, * "k" tokens are stored in the buffer. More tokens may be stored during * guess mode (testing syntactic predicate), or when LT(i>k) is referenced. * Consumption of tokens is deferred. In other words, reading the next * token is not done by conume(), but deferred until needed by LA or LT. *

* * @see antlr.Token * @see antlr.Tokenizer * @see antlr.TokenQueue */ import java.io.IOException; public class TokenBuffer { // Token source protected TokenStream input; // Number of active markers int nMarkers = 0; // Additional offset used when markers are active int markerOffset = 0; // Number of calls to consume() since last LA() or LT() call int numToConsume = 0; // Circular queue TokenQueue queue; /** Create a token buffer */ public TokenBuffer(TokenStream input_) { input = input_; queue = new TokenQueue(1); } /** Reset the input buffer to empty state */ public final void reset() { nMarkers = 0; markerOffset = 0; numToConsume = 0; queue.reset(); } /** Mark another token for deferred consumption */ public final void consume() { numToConsume++; } /** Ensure that the token buffer is sufficiently full */ private final void fill(int amount) throws TokenStreamException { syncConsume(); // Fill the buffer sufficiently to hold needed tokens while (queue.nbrEntries < amount + markerOffset) { // Append the next token queue.append(input.nextToken()); } } /** return the Tokenizer (needed by ParseView) */ public TokenStream getInput() { return input; } /** Get a lookahead token value */ public final int LA(int i) throws TokenStreamException { fill(i); return queue.elementAt(markerOffset + i - 1).getType(); } /** Get a lookahead token */ public final Token LT(int i) throws TokenStreamException { fill(i); return queue.elementAt(markerOffset + i - 1); } /**Return an integer marker that can be used to rewind the buffer to * its current state. */ public final int mark() { syncConsume(); //System.out.println("Marking at " + markerOffset); //try { for (int i = 1; i <= 2; i++) { System.out.println("LA("+i+")=="+LT(i).getText()); } } catch (ScannerException e) {} nMarkers++; return markerOffset; } /**Rewind the token buffer to a marker. * @param mark Marker returned previously from mark() */ public final void rewind(int mark) { syncConsume(); markerOffset = mark; nMarkers--; //System.out.println("Rewinding to " + mark); //try { for (int i = 1; i <= 2; i++) { System.out.println("LA("+i+")=="+LT(i).getText()); } } catch (ScannerException e) {} } /** Sync up deferred consumption */ private final void syncConsume() { while (numToConsume > 0) { if (nMarkers > 0) { // guess mode -- leave leading tokens and bump offset. markerOffset++; } else { // normal mode -- remove first token queue.removeFirst(); } numToConsume--; } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenManager.java000066400000000000000000000030561161462365500243400ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenManager.java#1 $ */ import java.util.Hashtable; import java.util.Enumeration; import antlr.collections.impl.Vector; /** Interface that describes the set of defined tokens */ interface TokenManager { public Object clone(); /** define a token symbol */ public void define(TokenSymbol ts); /** Get the name of the token manager */ public String getName(); /** Get a token string by index */ public String getTokenStringAt(int idx); /** Get the TokenSymbol for a string */ public TokenSymbol getTokenSymbol(String sym); public TokenSymbol getTokenSymbolAt(int idx); /** Get an enumerator over the symbol table */ public Enumeration getTokenSymbolElements(); public Enumeration getTokenSymbolKeys(); /** Get the token vocabulary (read-only). * @return A Vector of Strings indexed by token type */ public Vector getVocabulary(); /** Is this token manager read-only? */ public boolean isReadOnly(); public void mapToTokenSymbol(String name, TokenSymbol sym); /** Get the highest token type in use */ public int maxTokenType(); /** Get the next unused token type */ public int nextTokenType(); public void setName(String n); public void setReadOnly(boolean ro); /** Is a token symbol defined? */ public boolean tokenDefined(String symbol); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenQueue.java000066400000000000000000000054001161462365500240450ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenQueue.java#1 $ */ /** A private circular buffer object used by the token buffer */ class TokenQueue { /** Physical circular buffer of tokens */ private Token[] buffer; /** buffer.length-1 for quick modulos */ private int sizeLessOne; /** physical index of front token */ private int offset; /** number of tokens in the queue */ protected int nbrEntries; public TokenQueue(int minSize) { // Find first power of 2 >= to requested size int size; if ( minSize<0 ) { init(16); // pick some value for them return; } // check for overflow if ( minSize>=(Integer.MAX_VALUE/2) ) { init(Integer.MAX_VALUE); // wow that's big. return; } for (size = 2; size < minSize; size *= 2) { ; } init(size); } /** Add token to end of the queue * @param tok The token to add */ public final void append(Token tok) { if (nbrEntries == buffer.length) { expand(); } buffer[(offset + nbrEntries) & sizeLessOne] = tok; nbrEntries++; } /** Fetch a token from the queue by index * @param idx The index of the token to fetch, where zero is the token at the front of the queue */ public final Token elementAt(int idx) { return buffer[(offset + idx) & sizeLessOne]; } /** Expand the token buffer by doubling its capacity */ private final void expand() { Token[] newBuffer = new Token[buffer.length * 2]; // Copy the contents to the new buffer // Note that this will store the first logical item in the // first physical array element. for (int i = 0; i < buffer.length; i++) { newBuffer[i] = elementAt(i); } // Re-initialize with new contents, keep old nbrEntries buffer = newBuffer; sizeLessOne = buffer.length - 1; offset = 0; } /** Initialize the queue. * @param size The initial size of the queue */ private final void init(int size) { // Allocate buffer buffer = new Token[size]; // Other initialization sizeLessOne = size - 1; offset = 0; nbrEntries = 0; } /** Clear the queue. Leaving the previous buffer alone. */ public final void reset() { offset = 0; nbrEntries = 0; } /** Remove token from front of queue */ public final void removeFirst() { offset = (offset + 1) & sizeLessOne; nbrEntries--; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenRangeElement.java000066400000000000000000000025351161462365500253350ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenRangeElement.java#1 $ */ class TokenRangeElement extends AlternativeElement { String label; protected int begin = Token.INVALID_TYPE; protected int end = Token.INVALID_TYPE; protected String beginText; protected String endText; public TokenRangeElement(Grammar g, Token t1, Token t2, int autoGenType) { super(g, t1, autoGenType); begin = grammar.tokenManager.getTokenSymbol(t1.getText()).getTokenType(); beginText = t1.getText(); end = grammar.tokenManager.getTokenSymbol(t2.getText()).getTokenType(); endText = t2.getText(); line = t1.getLine(); } public void generate() { grammar.generator.gen(this); } public String getLabel() { return label; } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public void setLabel(String label_) { label = label_; } public String toString() { if (label != null) { return " " + label + ":" + beginText + ".." + endText; } else { return " " + beginText + ".." + endText; } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenRefElement.java000066400000000000000000000023761161462365500250200ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenRefElement.java#1 $ */ class TokenRefElement extends GrammarAtom { public TokenRefElement(Grammar g, Token t, boolean inverted, int autoGenType) { super(g, t, autoGenType); not = inverted; TokenSymbol ts = grammar.tokenManager.getTokenSymbol(atomText); if (ts == null) { g.antlrTool.error("Undefined token symbol: " + atomText, grammar.getFilename(), t.getLine(), t.getColumn()); } else { tokenType = ts.getTokenType(); // set the AST node type to whatever was set in tokens {...} // section (if anything); // Lafter, after this is created, the element option can set this. setASTNodeType(ts.getASTNodeType()); } line = t.getLine(); } public void generate() { grammar.generator.gen(this); } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenStream.java000066400000000000000000000005141161462365500242150ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenStream.java#1 $ */ public interface TokenStream { public Token nextToken() throws TokenStreamException; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenStreamBasicFilter.java000066400000000000000000000022211161462365500263220ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenStreamBasicFilter.java#1 $ */ import antlr.collections.impl.BitSet; /** This object is a TokenStream that passes through all * tokens except for those that you tell it to discard. * There is no buffering of the tokens. */ public class TokenStreamBasicFilter implements TokenStream { /** The set of token types to discard */ protected BitSet discardMask; /** The input stream */ protected TokenStream input; public TokenStreamBasicFilter(TokenStream input) { this.input = input; discardMask = new BitSet(); } public void discard(int ttype) { discardMask.add(ttype); } public void discard(BitSet mask) { discardMask = mask; } public Token nextToken() throws TokenStreamException { Token tok = input.nextToken(); while (tok != null && discardMask.member(tok.getType())) { tok = input.nextToken(); } return tok; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenStreamException.java000066400000000000000000000007571161462365500261050ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenStreamException.java#1 $ */ /** * Anything that goes wrong while generating a stream of tokens. */ public class TokenStreamException extends ANTLRException { public TokenStreamException() { } public TokenStreamException(String s) { super(s); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenStreamHiddenTokenFilter.java000066400000000000000000000116551161462365500275100ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenStreamHiddenTokenFilter.java#1 $ */ import antlr.collections.impl.BitSet; /**This object filters a token stream coming from a lexer * or another TokenStream so that only certain token channels * get transmitted to the parser. * * Any of the channels can be filtered off as "hidden" channels whose * tokens can be accessed from the parser. */ public class TokenStreamHiddenTokenFilter extends TokenStreamBasicFilter implements TokenStream { // protected BitSet discardMask; protected BitSet hideMask; protected CommonHiddenStreamToken nextMonitoredToken; /** track tail of hidden list emanating from previous * monitored token */ protected CommonHiddenStreamToken lastHiddenToken; protected CommonHiddenStreamToken firstHidden = null; public TokenStreamHiddenTokenFilter(TokenStream input) { super(input); hideMask = new BitSet(); } protected void consume() throws TokenStreamException { nextMonitoredToken = (CommonHiddenStreamToken)input.nextToken(); } private void consumeFirst() throws TokenStreamException { consume(); // get first token of input stream // Handle situation where hidden or discarded tokens // appear first in input stream CommonHiddenStreamToken p = null; // while hidden or discarded scarf tokens while (hideMask.member(LA(1).getType()) || discardMask.member(LA(1).getType())) { if (hideMask.member(LA(1).getType())) { if (p == null) { p = LA(1); } else { p.setHiddenAfter(LA(1)); LA(1).setHiddenBefore(p); // double-link p = LA(1); } lastHiddenToken = p; if (firstHidden == null) { firstHidden = p; // record hidden token if first } } consume(); } } public BitSet getDiscardMask() { return discardMask; } /** Return a ptr to the hidden token appearing immediately after * token t in the input stream. */ public CommonHiddenStreamToken getHiddenAfter(CommonHiddenStreamToken t) { return t.getHiddenAfter(); } /** Return a ptr to the hidden token appearing immediately before * token t in the input stream. */ public CommonHiddenStreamToken getHiddenBefore(CommonHiddenStreamToken t) { return t.getHiddenBefore(); } public BitSet getHideMask() { return hideMask; } /** Return the first hidden token if one appears * before any monitored token. */ public CommonHiddenStreamToken getInitialHiddenToken() { return firstHidden; } public void hide(int m) { hideMask.add(m); } public void hide(BitSet mask) { hideMask = mask; } protected CommonHiddenStreamToken LA(int i) { return nextMonitoredToken; } /** Return the next monitored token. * Test the token following the monitored token. * If following is another monitored token, save it * for the next invocation of nextToken (like a single * lookahead token) and return it then. * If following is unmonitored, nondiscarded (hidden) * channel token, add it to the monitored token. * * Note: EOF must be a monitored Token. */ public Token nextToken() throws TokenStreamException { // handle an initial condition; don't want to get lookahead // token of this splitter until first call to nextToken if (LA(1) == null) { consumeFirst(); } // we always consume hidden tokens after monitored, thus, // upon entry LA(1) is a monitored token. CommonHiddenStreamToken monitored = LA(1); // point to hidden tokens found during last invocation monitored.setHiddenBefore(lastHiddenToken); lastHiddenToken = null; // Look for hidden tokens, hook them into list emanating // from the monitored tokens. consume(); CommonHiddenStreamToken p = monitored; // while hidden or discarded scarf tokens while (hideMask.member(LA(1).getType()) || discardMask.member(LA(1).getType())) { if (hideMask.member(LA(1).getType())) { // attach the hidden token to the monitored in a chain // link forwards p.setHiddenAfter(LA(1)); // link backwards if (p != monitored) { //hidden cannot point to monitored tokens LA(1).setHiddenBefore(p); } p = lastHiddenToken = LA(1); } consume(); } return monitored; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenStreamIOException.java000066400000000000000000000012101161462365500263160ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenStreamIOException.java#1 $ */ import java.io.IOException; /** * Wraps an IOException in a TokenStreamException */ public class TokenStreamIOException extends TokenStreamException { public IOException io; /** * TokenStreamIOException constructor comment. * @param s java.lang.String */ public TokenStreamIOException(IOException io) { super(io.getMessage()); this.io = io; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenStreamRecognitionException.java000066400000000000000000000012461161462365500303000ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenStreamRecognitionException.java#1 $ */ /** * Wraps a RecognitionException in a TokenStreamException so you * can pass it along. */ public class TokenStreamRecognitionException extends TokenStreamException { public RecognitionException recog; public TokenStreamRecognitionException(RecognitionException re) { super(re.getMessage()); this.recog = re; } public String toString() { return recog.toString(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenStreamRetryException.java000066400000000000000000000010351161462365500271210ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TokenStreamRetryException.java#1 $ */ /** * Aborted recognition of current token. Try to get one again. * Used by TokenStreamSelector.retry() to force nextToken() * of stream to re-enter and retry. */ public class TokenStreamRetryException extends TokenStreamException { public TokenStreamRetryException() { } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TokenStreamRewriteEngine.java000066400000000000000000000307001161462365500267050ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html */ import antlr.collections.impl.BitSet; import java.util.*; /** This token stream tracks the *entire* token stream coming from * a lexer, but does not pass on the whitespace (or whatever else * you want to discard) to the parser. * * This class can then be asked for the ith token in the input stream. * Useful for dumping out the input stream exactly after doing some * augmentation or other manipulations. Tokens are index from 0..n-1 * * You can insert stuff, replace, and delete chunks. Note that the * operations are done lazily--only if you convert the buffer to a * String. This is very efficient because you are not moving data around * all the time. As the buffer of tokens is converted to strings, the * toString() method(s) check to see if there is an operation at the * current index. If so, the operation is done and then normal String * rendering continues on the buffer. This is like having multiple Turing * machine instruction streams (programs) operating on a single input tape. :) * * Since the operations are done lazily at toString-time, operations do not * screw up the token index values. That is, an insert operation at token * index i does not change the index values for tokens i+1..n-1. * * Because operations never actually alter the buffer, you may always get * the original token stream back without undoing anything. Since * the instructions are queued up, you can easily simulate transactions and * roll back any changes if there is an error just by removing instructions. * For example, * * TokenStreamRewriteEngine rewriteEngine = * new TokenStreamRewriteEngine(lexer); * JavaRecognizer parser = new JavaRecognizer(rewriteEngine); * ... * rewriteEngine.insertAfter("pass1", t, "foobar");} * rewriteEngine.insertAfter("pass2", u, "start");} * System.out.println(rewriteEngine.toString("pass1")); * System.out.println(rewriteEngine.toString("pass2")); * * You can also have multiple "instruction streams" and get multiple * rewrites from a single pass over the input. Just name the instruction * streams and use that name again when printing the buffer. This could be * useful for generating a C file and also its header file--all from the * same buffer. * * If you don't use named rewrite streams, a "default" stream is used. * * Terence Parr, parrt@cs.usfca.edu * University of San Francisco * February 2004 */ public class TokenStreamRewriteEngine implements TokenStream { public static final int MIN_TOKEN_INDEX = 0; static class RewriteOperation { protected int index; protected String text; protected RewriteOperation(int index, String text) { this.index = index; this.text = text; } /** Execute the rewrite operation by possibly adding to the buffer. * Return the index of the next token to operate on. */ public int execute(StringBuffer buf) { return index; } } static class InsertBeforeOp extends RewriteOperation { public InsertBeforeOp(int index, String text) { super(index,text); } public int execute(StringBuffer buf) { buf.append(text); return index; } } static class ReplaceOp extends RewriteOperation { protected int lastIndex; public ReplaceOp(int from, int to, String text) { super(from,text); lastIndex = to; } public int execute(StringBuffer buf) { if ( text!=null ) { buf.append(text); } return lastIndex+1; } } static class DeleteOp extends ReplaceOp { public DeleteOp(int from, int to) { super(from, to, null); } } public static final String DEFAULT_PROGRAM_NAME = "default"; public static final int PROGRAM_INIT_SIZE = 100; /** Track the incoming list of tokens */ protected List tokens; /** You may have multiple, named streams of rewrite operations. * I'm calling these things "programs." * Maps String (name) -> rewrite (List) */ protected Map programs = null; /** Map String (program name) -> Integer index */ protected Map lastRewriteTokenIndexes = null; /** track index of tokens */ protected int index = MIN_TOKEN_INDEX; /** Who do we suck tokens from? */ protected TokenStream stream; /** Which (whitespace) token(s) to throw out */ protected BitSet discardMask = new BitSet(); public TokenStreamRewriteEngine(TokenStream upstream) { this(upstream,1000); } public TokenStreamRewriteEngine(TokenStream upstream, int initialSize) { stream = upstream; tokens = new ArrayList(initialSize); programs = new HashMap(); programs.put(DEFAULT_PROGRAM_NAME, new ArrayList(PROGRAM_INIT_SIZE)); lastRewriteTokenIndexes = new HashMap(); } public Token nextToken() throws TokenStreamException { TokenWithIndex t; // suck tokens until end of stream or we find a non-discarded token do { t = (TokenWithIndex)stream.nextToken(); if ( t!=null ) { t.setIndex(index); // what is t's index in list? if ( t.getType()!=Token.EOF_TYPE ) { tokens.add(t); // track all tokens except EOF } index++; // move to next position } } while ( t!=null && discardMask.member(t.getType()) ); return t; } public void rollback(int instructionIndex) { rollback(DEFAULT_PROGRAM_NAME, instructionIndex); } /** Rollback the instruction stream for a program so that * the indicated instruction (via instructionIndex) is no * longer in the stream. UNTESTED! */ public void rollback(String programName, int instructionIndex) { List is = (List)programs.get(programName); if ( is!=null ) { programs.put(programName, is.subList(MIN_TOKEN_INDEX,instructionIndex)); } } public void deleteProgram() { deleteProgram(DEFAULT_PROGRAM_NAME); } /** Reset the program so that no instructions exist */ public void deleteProgram(String programName) { rollback(programName, MIN_TOKEN_INDEX); } /** If op.index > lastRewriteTokenIndexes, just add to the end. * Otherwise, do linear */ protected void addToSortedRewriteList(RewriteOperation op) { addToSortedRewriteList(DEFAULT_PROGRAM_NAME, op); } protected void addToSortedRewriteList(String programName, RewriteOperation op) { List rewrites = getProgram(programName); // if at or beyond last op's index, just append if ( op.index>=getLastRewriteTokenIndex(programName) ) { rewrites.add(op); // append to list of operations // record the index of this operation for next time through setLastRewriteTokenIndex(programName, op.index); return; } // not after the last one, so must insert to ordered list Comparator comparator = new Comparator() { public int compare(Object o, Object o1) { RewriteOperation a = (RewriteOperation)o; RewriteOperation b = (RewriteOperation)o1; if ( a.indexb.index ) return 1; return 0; } }; int pos = Collections.binarySearch(rewrites, op, comparator); if ( pos<0 ) { rewrites.add(-pos-1, op); } } public void insertAfter(Token t, String text) { insertAfter(DEFAULT_PROGRAM_NAME, t, text); } public void insertAfter(int index, String text) { insertAfter(DEFAULT_PROGRAM_NAME, index, text); } public void insertAfter(String programName, Token t, String text) { insertAfter(programName,((TokenWithIndex)t).getIndex(), text); } public void insertAfter(String programName, int index, String text) { // to insert after, just insert before next index (even if past end) insertBefore(programName,index+1, text); } public void insertBefore(Token t, String text) { insertBefore(DEFAULT_PROGRAM_NAME, t, text); } public void insertBefore(int index, String text) { insertBefore(DEFAULT_PROGRAM_NAME, index, text); } public void insertBefore(String programName, Token t, String text) { insertBefore(programName, ((TokenWithIndex)t).getIndex(), text); } public void insertBefore(String programName, int index, String text) { addToSortedRewriteList(programName, new InsertBeforeOp(index,text)); } public void replace(int index, String text) { replace(DEFAULT_PROGRAM_NAME, index, index, text); } public void replace(int from, int to, String text) { replace(DEFAULT_PROGRAM_NAME, from, to, text); } public void replace(Token indexT, String text) { replace(DEFAULT_PROGRAM_NAME, indexT, indexT, text); } public void replace(Token from, Token to, String text) { replace(DEFAULT_PROGRAM_NAME, from, to, text); } public void replace(String programName, int from, int to, String text) { addToSortedRewriteList(new ReplaceOp(from, to, text)); } public void replace(String programName, Token from, Token to, String text) { replace(programName, ((TokenWithIndex)from).getIndex(), ((TokenWithIndex)to).getIndex(), text); } public void delete(int index) { delete(DEFAULT_PROGRAM_NAME, index, index); } public void delete(int from, int to) { delete(DEFAULT_PROGRAM_NAME, from, to); } public void delete(Token indexT) { delete(DEFAULT_PROGRAM_NAME, indexT, indexT); } public void delete(Token from, Token to) { delete(DEFAULT_PROGRAM_NAME, from, to); } public void delete(String programName, int from, int to) { replace(programName,from,to,null); } public void delete(String programName, Token from, Token to) { replace(programName,from,to,null); } public void discard(int ttype) { discardMask.add(ttype); } public TokenWithIndex getToken(int i) { return (TokenWithIndex)tokens.get(i); } public int getTokenStreamSize() { return tokens.size(); } public String toOriginalString() { return toOriginalString(MIN_TOKEN_INDEX, getTokenStreamSize()-1); } public String toOriginalString(int start, int end) { StringBuffer buf = new StringBuffer(); for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i=MIN_TOKEN_INDEX && tokenCursor<=end && tokenCursor=MIN_TOKEN_INDEX && i<=end && i,line=" + line + ",col=" + col + "]\n"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/Tool.java000066400000000000000000000546731161462365500227150ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/Tool.java#1 $ */ import java.io.*; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; import antlr.PreservingFileWriter; import antlr.Version; public class Tool { public static String version = ""; /** Object that handles analysis errors */ ToolErrorHandler errorHandler; /** Was there an error during parsing or analysis? */ protected boolean hasError = false; /** Generate diagnostics? (vs code) */ boolean genDiagnostics = false; /** Generate DocBook vs code? */ boolean genDocBook = false; /** Generate HTML vs code? */ boolean genHTML = false; /** Current output directory for generated files */ protected static String outputDir = "."; // Grammar input protected String grammarFile; transient Reader f = new InputStreamReader(System.in); // SAS: changed for proper text io // transient DataInputStream in = null; protected static String literalsPrefix = "LITERAL_"; protected static boolean upperCaseMangledLiterals = false; /** C++ file level options */ protected NameSpace nameSpace = null; protected String namespaceAntlr = null; protected String namespaceStd = null; protected boolean genHashLines = true; protected boolean noConstructors = false; private BitSet cmdLineArgValid = new BitSet(); /** Construct a new Tool. */ public Tool() { errorHandler = new DefaultToolErrorHandler(this); } public String getGrammarFile() { return grammarFile; } public boolean hasError() { return hasError; } public NameSpace getNameSpace() { return nameSpace; } public String getNamespaceStd() { return namespaceStd; } public String getNamespaceAntlr() { return namespaceAntlr; } public boolean getGenHashLines() { return genHashLines; } public String getLiteralsPrefix() { return literalsPrefix; } public boolean getUpperCaseMangledLiterals() { return upperCaseMangledLiterals; } public void setFileLineFormatter(FileLineFormatter formatter) { FileLineFormatter.setFormatter(formatter); } protected void checkForInvalidArguments(String[] args, BitSet cmdLineArgValid) { // check for invalid command line args for (int a = 0; a < args.length; a++) { if (!cmdLineArgValid.member(a)) { warning("invalid command-line argument: " + args[a] + "; ignored"); } } } /** This example is from the book _Java in a Nutshell_ by David * Flanagan. Written by David Flanagan. Copyright (c) 1996 * O'Reilly & Associates. You may study, use, modify, and * distribute this example for any purpose. This example is * provided WITHOUT WARRANTY either expressed or implied. */ public void copyFile(String source_name, String dest_name) throws IOException { File source_file = new File(source_name); File destination_file = new File(dest_name); Reader source = null; Writer destination = null; char[] buffer; int bytes_read; try { // First make sure the specified source file // exists, is a file, and is readable. if (!source_file.exists() || !source_file.isFile()) throw new FileCopyException("FileCopy: no such source file: " + source_name); if (!source_file.canRead()) throw new FileCopyException("FileCopy: source file " + "is unreadable: " + source_name); // If the destination exists, make sure it is a writeable file // and ask before overwriting it. If the destination doesn't // exist, make sure the directory exists and is writeable. if (destination_file.exists()) { if (destination_file.isFile()) { DataInputStream in = new DataInputStream(System.in); String response; if (!destination_file.canWrite()) throw new FileCopyException("FileCopy: destination " + "file is unwriteable: " + dest_name); /* System.out.print("File " + dest_name + " already exists. Overwrite? (Y/N): "); System.out.flush(); response = in.readLine(); if (!response.equals("Y") && !response.equals("y")) throw new FileCopyException("FileCopy: copy cancelled."); */ } else { throw new FileCopyException("FileCopy: destination " + "is not a file: " + dest_name); } } else { File parentdir = parent(destination_file); if (!parentdir.exists()) throw new FileCopyException("FileCopy: destination " + "directory doesn't exist: " + dest_name); if (!parentdir.canWrite()) throw new FileCopyException("FileCopy: destination " + "directory is unwriteable: " + dest_name); } // If we've gotten this far, then everything is okay; we can // copy the file. source = new BufferedReader(new FileReader(source_file)); destination = new BufferedWriter(new FileWriter(destination_file)); buffer = new char[1024]; while (true) { bytes_read = source.read(buffer, 0, 1024); if (bytes_read == -1) break; destination.write(buffer, 0, bytes_read); } } // No matter what happens, always close any streams we've opened. finally { if (source != null) { try { source.close(); } catch (IOException e) { ; } } if (destination != null) { try { destination.close(); } catch (IOException e) { ; } } } } /** Perform processing on the grammar file. Can only be called * from main() @param args The command-line arguments passed to * main(). This wrapper does the System.exit for use with command-line. */ public void doEverythingWrapper(String[] args) { int exitCode = doEverything(args); System.exit(exitCode); } /** Process args and have ANTLR do it's stuff without calling System.exit. * Just return the result code. Makes it easy for ANT build tool. */ public int doEverything(String[] args) { // run the preprocessor to handle inheritance first. // Start preprocessor. This strips generates an argument list // without -glib options (inside preTool) antlr.preprocessor.Tool preTool = new antlr.preprocessor.Tool(this, args); boolean preprocess_ok = preTool.preprocess(); String[] modifiedArgs = preTool.preprocessedArgList(); // process arguments for the Tool processArguments(modifiedArgs); if (!preprocess_ok) { return 1; } f = getGrammarReader(); ANTLRLexer lexer = new ANTLRLexer(f); TokenBuffer tokenBuf = new TokenBuffer(lexer); LLkAnalyzer analyzer = new LLkAnalyzer(this); MakeGrammar behavior = new MakeGrammar(this, args, analyzer); try { ANTLRParser p = new ANTLRParser(tokenBuf, behavior, this); p.setFilename(grammarFile); p.grammar(); if (hasError()) { fatalError("Exiting due to errors."); } checkForInvalidArguments(modifiedArgs, cmdLineArgValid); // Create the right code generator according to the "language" option CodeGenerator codeGen; // SAS: created getLanguage() method so subclass can override // (necessary for VAJ interface) String codeGenClassName = "antlr." + getLanguage(behavior) + "CodeGenerator"; try { Class codeGenClass = Class.forName(codeGenClassName); codeGen = (CodeGenerator)codeGenClass.newInstance(); codeGen.setBehavior(behavior); codeGen.setAnalyzer(analyzer); codeGen.setTool(this); codeGen.gen(); } catch (ClassNotFoundException cnfe) { panic("Cannot instantiate code-generator: " + codeGenClassName); } catch (InstantiationException ie) { panic("Cannot instantiate code-generator: " + codeGenClassName); } catch (IllegalArgumentException ie) { panic("Cannot instantiate code-generator: " + codeGenClassName); } catch (IllegalAccessException iae) { panic("code-generator class '" + codeGenClassName + "' is not accessible"); } } catch (RecognitionException pe) { fatalError("Unhandled parser error: " + pe.getMessage()); } catch (TokenStreamException io) { fatalError("TokenStreamException: " + io.getMessage()); } return 0; } /** Issue an error * @param s The message */ public void error(String s) { hasError = true; System.err.println("error: " + s); } /** Issue an error with line number information * @param s The message * @param file The file that has the error (or null) * @param line The grammar file line number on which the error occured (or -1) * @param column The grammar file column number on which the error occured (or -1) */ public void error(String s, String file, int line, int column) { hasError = true; System.err.println(FileLineFormatter.getFormatter(). getFormatString(file, line, column) + s); } /** When we are 1.1 compatible... public static Object factory2 (String p, Object[] initargs) { Class c; Object o = null; try { int argslen = initargs.length; Class cl[] = new Class[argslen]; for (int i=0;i<argslen;i++) { cl[i] = Class.forName(initargs[i].getClass().getName()); } c = Class.forName (p); Constructor con = c.getConstructor (cl); o = con.newInstance (initargs); } catch (Exception e) { System.err.println ("Can't make a " + p); } return o; } */ public Object factory(String p) { Class c; Object o = null; try { c = Class.forName(p);// get class def o = c.newInstance(); // make a new one } catch (Exception e) { // either class not found, // class is interface/abstract, or // class or initializer is not accessible. warning("Can't create an object of type " + p); return null; } return o; } public String fileMinusPath(String f) { String separator = System.getProperty("file.separator"); int endOfPath = f.lastIndexOf(separator); if (endOfPath == -1) { return f; // no path found } return f.substring(endOfPath + 1); } /** Determine the language used for this run of ANTLR * This was made a method so the subclass can override it */ public String getLanguage(MakeGrammar behavior) { if (genDiagnostics) { return "Diagnostic"; } if (genHTML) { return "HTML"; } if (genDocBook) { return "DocBook"; } return behavior.language; } public String getOutputDirectory() { return outputDir; } private static void help() { System.err.println("usage: java antlr.Tool [args] file.g"); System.err.println(" -o outputDir specify output directory where all output generated."); System.err.println(" -glib superGrammar specify location of supergrammar file."); System.err.println(" -debug launch the ParseView debugger upon parser invocation."); System.err.println(" -html generate a html file from your grammar."); System.err.println(" -docbook generate a docbook sgml file from your grammar."); System.err.println(" -diagnostic generate a textfile with diagnostics."); System.err.println(" -trace have all rules call traceIn/traceOut."); System.err.println(" -traceLexer have lexer rules call traceIn/traceOut."); System.err.println(" -traceParser have parser rules call traceIn/traceOut."); System.err.println(" -traceTreeParser have tree parser rules call traceIn/traceOut."); System.err.println(" -h|-help|--help this message"); } public static void main(String[] args) { System.err.println("ANTLR Parser Generator Version " + Version.project_version + " 1989-2005 jGuru.com"); version = Version.project_version; try { if (args.length == 0) { help(); System.exit(1); } for (int i = 0; i < args.length; ++i) { if (args[i].equals("-h") || args[i].equals("-help") || args[i].equals("--help") ) { help(); System.exit(1); } } Tool theTool = new Tool(); theTool.doEverything(args); theTool = null; } catch (Exception e) { System.err.println(System.getProperty("line.separator") + System.getProperty("line.separator")); System.err.println("#$%%*&@# internal error: " + e.toString()); System.err.println("[complain to nearest government official"); System.err.println(" or send hate-mail to parrt@jguru.com;"); System.err.println(" please send stack trace with report.]" + System.getProperty("line.separator")); e.printStackTrace(); } System.exit(0); } /** This method is used by all code generators to create new output * files. If the outputDir set by -o is not present it will be created here. */ public PrintWriter openOutputFile(String f) throws IOException { if( outputDir != "." ) { File out_dir = new File(outputDir); if( ! out_dir.exists() ) out_dir.mkdirs(); } return new PrintWriter(new PreservingFileWriter(outputDir + System.getProperty("file.separator") + f)); } public Reader getGrammarReader() { Reader f = null; try { if (grammarFile != null) { f = new BufferedReader(new FileReader(grammarFile)); } } catch (IOException e) { fatalError("cannot open grammar file " + grammarFile); } return f; } /** @since 2.7.2 */ public void reportException(Exception e, String message) { System.err.println(message == null ? e.getMessage() : message + ": " + e.getMessage()); } /** @since 2.7.2 */ public void reportProgress(String message) { System.out.println(message); } /** An error occured that should stop the Tool from doing any work. * The default implementation currently exits (via * {@link java.lang.System.exit(int)} after printing an error message to * stderr. However, the tools should expect that a subclass * will override this to throw an unchecked exception such as * {@link java.lang.IllegalStateException} or another subclass of * {@link java.lang.RuntimeException}. If this method is overriden, * it must never return normally; i.e. it must always * throw an exception or call System.exit. * @since 2.7.2 * @param s The message */ public void fatalError(String message) { System.err.println(message); System.exit(1); } /** Issue an unknown fatal error. If this method is overriden, * it must never return normally; i.e. it must always * throw an exception or call System.exit. * @deprecated as of 2.7.2 use {@link #fatalError(String)}. By default * this method executes fatalError("panic");. */ public void panic() { fatalError("panic"); } /** Issue a fatal error message. If this method is overriden, * it must never return normally; i.e. it must always * throw an exception or call System.exit. * @deprecated as of 2.7.2 use {@link #fatalError(String)}. By defaykt * this method executes fatalError("panic: " + s);. * @param s The message */ public void panic(String s) { fatalError("panic: " + s); } // File.getParent() can return null when the file is specified without // a directory or is in the root directory. // This method handles those cases. public File parent(File f) { String dirname = f.getParent(); if (dirname == null) { if (f.isAbsolute()) return new File(File.separator); else return new File(System.getProperty("user.dir")); } return new File(dirname); } /** Parse a list such as "f1.g;f2.g;..." and return a Vector * of the elements. */ public static Vector parseSeparatedList(String list, char separator) { java.util.StringTokenizer st = new java.util.StringTokenizer(list, String.valueOf(separator)); Vector v = new Vector(10); while ( st.hasMoreTokens() ) { v.appendElement(st.nextToken()); } if (v.size() == 0) return null; return v; } /** given a filename, strip off the directory prefix (if any) * and return it. Return "./" if f has no dir prefix. */ public String pathToFile(String f) { String separator = System.getProperty("file.separator"); int endOfPath = f.lastIndexOf(separator); if (endOfPath == -1) { // no path, use current directory return "." + System.getProperty("file.separator"); } return f.substring(0, endOfPath + 1); } /**

Process the command-line arguments. Can only be called by Tool. * A bitset is collected of all correct arguments via setArgOk.

* @param args The command-line arguments passed to main() * */ protected void processArguments(String[] args) { for (int i = 0; i < args.length; i++) { if (args[i].equals("-diagnostic")) { genDiagnostics = true; genHTML = false; setArgOK(i); } else if (args[i].equals("-o")) { setArgOK(i); if (i + 1 >= args.length) { error("missing output directory with -o option; ignoring"); } else { i++; setOutputDirectory(args[i]); setArgOK(i); } } else if (args[i].equals("-html")) { genHTML = true; genDiagnostics = false; setArgOK(i); } else if (args[i].equals("-docbook")) { genDocBook = true; genDiagnostics = false; setArgOK(i); } else { if (args[i].charAt(0) != '-') { // Must be the grammar file grammarFile = args[i]; setArgOK(i); } } } } public void setArgOK(int i) { cmdLineArgValid.add(i); } public void setOutputDirectory(String o) { outputDir = o; } /** Issue an error; used for general tool errors not for grammar stuff * @param s The message */ public void toolError(String s) { System.err.println("error: " + s); } /** Issue a warning * @param s the message */ public void warning(String s) { System.err.println("warning: " + s); } /** Issue a warning with line number information * @param s The message * @param file The file that has the warning (or null) * @param line The grammar file line number on which the warning occured (or -1) * @param column The grammar file line number on which the warning occured (or -1) */ public void warning(String s, String file, int line, int column) { System.err.println(FileLineFormatter.getFormatter(). getFormatString(file, line, column) + "warning:" + s); } /** Issue a warning with line number information * @param s The lines of the message * @param file The file that has the warning * @param line The grammar file line number on which the warning occured */ public void warning(String[] s, String file, int line, int column) { if (s == null || s.length == 0) { panic("bad multi-line message to Tool.warning"); } System.err.println(FileLineFormatter.getFormatter(). getFormatString(file, line, column) + "warning:" + s[0]); for (int i = 1; i < s.length; i++) { System.err.println(FileLineFormatter.getFormatter(). getFormatString(file, line, column) + " " + s[i]); } } /** * Support C++ & C# namespaces (for now). * C++: Add a nested namespace name to the current namespace. * C# : Specify an enclosing namespace for the generated code. * DAW: David Wagner -- C# support by kunle odutola */ public void setNameSpace(String name) { if (null == nameSpace) nameSpace = new NameSpace(StringUtils.stripFrontBack(name, "\"", "\"")); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ToolErrorHandler.java000066400000000000000000000030231161462365500252040ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ToolErrorHandler.java#1 $ */ import antlr.collections.impl.BitSet; interface ToolErrorHandler { /** Issue a warning about ambiguity between a alternates * @param blk The block being analyzed * @param lexicalAnalysis true for lexical rule * @param depth The depth of the ambiguity * @param sets An array of bitsets containing the ambiguities * @param altIdx1 The zero-based index of the first ambiguous alternative * @param altIdx2 The zero-based index of the second ambiguous alternative */ public void warnAltAmbiguity( Grammar grammar, AlternativeBlock blk, boolean lexicalAnalysis, int depth, Lookahead[] sets, int altIdx1, int altIdx2 ); /** Issue a warning about ambiguity between an alternate and exit path. * @param blk The block being analyzed * @param lexicalAnalysis true for lexical rule * @param depth The depth of the ambiguity * @param sets An array of bitsets containing the ambiguities * @param altIdx The zero-based index of the ambiguous alternative */ public void warnAltExitAmbiguity( Grammar grammar, BlockWithImpliedExitPath blk, boolean lexicalAnalysis, int depth, Lookahead[] sets, int altIdx ); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TreeBlockContext.java000066400000000000000000000023541161462365500252040ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TreeBlockContext.java#1 $ */ /**The context needed to add root,child elements to a Tree. There * is only one alternative (i.e., a list of children). We subclass to * specialize. MakeGrammar.addElementToCurrentAlt will work correctly * now for either a block of alts or a Tree child list. * * The first time addAlternativeElement is called, it sets the root element * rather than adding it to one of the alternative lists. Rather than have * the grammar duplicate the rules for grammar atoms etc... we use the same * grammar and same refToken behavior etc... We have to special case somewhere * and here is where we do it. */ class TreeBlockContext extends BlockContext { protected boolean nextElementIsRoot = true; public void addAlternativeElement(AlternativeElement e) { TreeElement tree = (TreeElement)block; if (nextElementIsRoot) { tree.root = (GrammarAtom)e; nextElementIsRoot = false; } else { super.addAlternativeElement(e); } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TreeElement.java000066400000000000000000000016241161462365500241750ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TreeElement.java#1 $ */ /** A TreeElement is a block with one alternative and a root node */ class TreeElement extends AlternativeBlock { GrammarAtom root; public TreeElement(Grammar g, Token start) { super(g, start, false); } public void generate() { grammar.generator.gen(this); } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public String toString() { String s = " #(" + root; Alternative a = (Alternative)alternatives.elementAt(0); AlternativeElement p = a.head; while (p != null) { s += p; p = p.next; } return s + " )"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TreeParser.java000066400000000000000000000121511161462365500240350ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TreeParser.java#1 $ */ import java.util.NoSuchElementException; import antlr.collections.AST; import antlr.collections.impl.BitSet; public class TreeParser { /** The AST Null object; the parsing cursor is set to this when * it is found to be null. This way, we can test the * token type of a node without having to have tests for null * everywhere. */ public static ASTNULLType ASTNULL = new ASTNULLType(); /** Where did this rule leave off parsing; avoids a return parameter */ protected AST _retTree; /** guessing nesting level; guessing==0 implies not guessing */ // protected int guessing = 0; /** Nesting level of registered handlers */ // protected int exceptionLevel = 0; protected TreeParserSharedInputState inputState; /** Table of token type to token names */ protected String[] tokenNames; /** AST return value for a rule is squirreled away here */ protected AST returnAST; /** AST support code; parser and treeparser delegate to this object */ protected ASTFactory astFactory = new ASTFactory(); /** Used to keep track of indentdepth for traceIn/Out */ protected int traceDepth = 0; public TreeParser() { inputState = new TreeParserSharedInputState(); } /** Get the AST return value squirreled away in the parser */ public AST getAST() { return returnAST; } public ASTFactory getASTFactory() { return astFactory; } public String getTokenName(int num) { return tokenNames[num]; } public String[] getTokenNames() { return tokenNames; } protected void match(AST t, int ttype) throws MismatchedTokenException { //System.out.println("match("+ttype+"); cursor is "+t); if (t == null || t == ASTNULL || t.getType() != ttype) { throw new MismatchedTokenException(getTokenNames(), t, ttype, false); } } /**Make sure current lookahead symbol matches the given set * Throw an exception upon mismatch, which is catch by either the * error handler or by the syntactic predicate. */ public void match(AST t, BitSet b) throws MismatchedTokenException { if (t == null || t == ASTNULL || !b.member(t.getType())) { throw new MismatchedTokenException(getTokenNames(), t, b, false); } } protected void matchNot(AST t, int ttype) throws MismatchedTokenException { //System.out.println("match("+ttype+"); cursor is "+t); if (t == null || t == ASTNULL || t.getType() == ttype) { throw new MismatchedTokenException(getTokenNames(), t, ttype, true); } } /** @deprecated as of 2.7.2. This method calls System.exit() and writes * directly to stderr, which is usually not appropriate when * a parser is embedded into a larger application. Since the method is * static, it cannot be overridden to avoid these problems. * ANTLR no longer uses this method internally or in generated code. */ public static void panic() { System.err.println("TreeWalker: panic"); System.exit(1); } /** Parser error-reporting function can be overridden in subclass */ public void reportError(RecognitionException ex) { System.err.println(ex.toString()); } /** Parser error-reporting function can be overridden in subclass */ public void reportError(String s) { System.err.println("error: " + s); } /** Parser warning-reporting function can be overridden in subclass */ public void reportWarning(String s) { System.err.println("warning: " + s); } /** Specify an object with support code (shared by * Parser and TreeParser. Normally, the programmer * does not play with this, using setASTNodeType instead. */ public void setASTFactory(ASTFactory f) { astFactory = f; } /** Specify the type of node to create during tree building. * @deprecated since 2.7.2 */ public void setASTNodeType(String nodeType) { setASTNodeClass(nodeType); } /** Specify the type of node to create during tree building */ public void setASTNodeClass(String nodeType) { astFactory.setASTNodeType(nodeType); } public void traceIndent() { for (int i = 0; i < traceDepth; i++) System.out.print(" "); } public void traceIn(String rname, AST t) { traceDepth += 1; traceIndent(); System.out.println("> " + rname + "(" + (t != null?t.toString():"null") + ")" + ((inputState.guessing > 0)?" [guessing]":"")); } public void traceOut(String rname, AST t) { traceIndent(); System.out.println("< " + rname + "(" + (t != null?t.toString():"null") + ")" + ((inputState.guessing > 0)?" [guessing]":"")); traceDepth--; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TreeParserSharedInputState.java000066400000000000000000000011041161462365500272010ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TreeParserSharedInputState.java#1 $ */ /** This object contains the data associated with an * input AST. Multiple parsers * share a single TreeParserSharedInputState to parse * the same tree or to have the parser walk multiple * trees. */ public class TreeParserSharedInputState { /** Are we guessing (guessing>0)? */ public int guessing = 0; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TreeSpecifierNode.java000066400000000000000000000021071161462365500253200ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TreeSpecifierNode.java#1 $ */ class TreeSpecifierNode { private TreeSpecifierNode parent = null; private TreeSpecifierNode firstChild = null; private TreeSpecifierNode nextSibling = null; private Token tok; TreeSpecifierNode(Token tok_) { tok = tok_; } public TreeSpecifierNode getFirstChild() { return firstChild; } public TreeSpecifierNode getNextSibling() { return nextSibling; } // Accessors public TreeSpecifierNode getParent() { return parent; } public Token getToken() { return tok; } public void setFirstChild(TreeSpecifierNode child) { firstChild = child; child.parent = this; } // Structure-building public void setNextSibling(TreeSpecifierNode sibling) { nextSibling = sibling; sibling.parent = parent; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/TreeWalkerGrammar.java000066400000000000000000000052561161462365500253450ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/TreeWalkerGrammar.java#1 $ */ import java.util.Hashtable; import java.util.Enumeration; import java.io.IOException; import antlr.collections.impl.BitSet; import antlr.collections.impl.Vector; /** Parser-specific grammar subclass */ class TreeWalkerGrammar extends Grammar { // true for transform mode protected boolean transform = false; TreeWalkerGrammar(String className_, Tool tool_, String superClass) { super(className_, tool_, superClass); } /** Top-level call to generate the code for this grammar */ public void generate() throws IOException { generator.gen(this); } // Get name of class from which generated parser/lexer inherits protected String getSuperClass() { return "TreeParser"; } /**Process command line arguments. * -trace have all rules call traceIn/traceOut * -traceParser have parser rules call traceIn/traceOut * -debug generate debugging output for parser debugger */ public void processArguments(String[] args) { for (int i = 0; i < args.length; i++) { if (args[i].equals("-trace")) { traceRules = true; antlrTool.setArgOK(i); } else if (args[i].equals("-traceTreeParser")) { traceRules = true; antlrTool.setArgOK(i); } // else if ( args[i].equals("-debug") ) { // debuggingOutput = true; // superClass = "parseview.DebuggingTreeWalker"; // Tool.setArgOK(i); // } } } /** Set tree parser options */ public boolean setOption(String key, Token value) { if (key.equals("buildAST")) { if (value.getText().equals("true")) { buildAST = true; } else if (value.getText().equals("false")) { buildAST = false; } else { antlrTool.error("buildAST option must be true or false", getFilename(), value.getLine(), value.getColumn()); } return true; } if (key.equals("ASTLabelType")) { super.setOption(key, value); return true; } if (key.equals("className")) { super.setOption(key, value); return true; } if (super.setOption(key, value)) { return true; } antlrTool.error("Invalid option: " + key, getFilename(), value.getLine(), value.getColumn()); return false; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/Version.java000066400000000000000000000004541161462365500234110ustar00rootroot00000000000000package antlr; public class Version { public static final String version = "2"; public static final String subversion = "7"; public static final String patchlevel = "5"; public static final String datestamp = "20050517"; public static final String project_version = "2.7.5 (20050517)"; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/Version.java.in000066400000000000000000000005341161462365500240150ustar00rootroot00000000000000package antlr; public class Version { public static final String version = "@VERSION@"; public static final String subversion = "@SUBVERSION@"; public static final String patchlevel = "@PATCHLEVEL@"; public static final String datestamp = "@TIMESTAMP@"; public static final String project_version = "@PACKAGE_VERSION@ (@TIMESTAMP@)"; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/WildcardElement.java000066400000000000000000000015661161462365500250340ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/WildcardElement.java#1 $ */ class WildcardElement extends GrammarAtom { protected String label; public WildcardElement(Grammar g, Token t, int autoGenType) { super(g, t, autoGenType); line = t.getLine(); } public void generate() { grammar.generator.gen(this); } public String getLabel() { return label; } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public void setLabel(String label_) { label = label_; } public String toString() { String s = " "; if (label != null) s += label + ":"; return s + "."; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/ZeroOrMoreBlock.java000066400000000000000000000012731161462365500250020ustar00rootroot00000000000000package antlr; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/ZeroOrMoreBlock.java#1 $ */ class ZeroOrMoreBlock extends BlockWithImpliedExitPath { public ZeroOrMoreBlock(Grammar g) { super(g); } public ZeroOrMoreBlock(Grammar g, Token start) { super(g, start); } public void generate() { grammar.generator.gen(this); } public Lookahead look(int k) { return grammar.theLLkAnalyzer.look(k, this); } public String toString() { return super.toString() + "*"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/000077500000000000000000000000001161462365500225565ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/cpp/000077500000000000000000000000001161462365500233405ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/cpp/ActionLexer.java000066400000000000000000001774751161462365500264450ustar00rootroot00000000000000// $ANTLR : "action.g" -> "ActionLexer.java"$ package antlr.actions.cpp; import java.io.InputStream; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.TokenStreamRecognitionException; import antlr.CharStreamException; import antlr.CharStreamIOException; import antlr.ANTLRException; import java.io.Reader; import java.util.Hashtable; import antlr.CharScanner; import antlr.InputBuffer; import antlr.ByteBuffer; import antlr.CharBuffer; import antlr.Token; import antlr.CommonToken; import antlr.RecognitionException; import antlr.NoViableAltForCharException; import antlr.MismatchedCharException; import antlr.TokenStream; import antlr.ANTLRHashString; import antlr.LexerSharedInputState; import antlr.collections.impl.BitSet; import antlr.SemanticException; import java.io.StringReader; import antlr.collections.impl.Vector; import antlr.*; /** Perform the following translations: AST related translations ## -> currentRule_AST #(x,y,z) -> codeGenerator.getASTCreateString(vector-of(x,y,z)) #[x] -> codeGenerator.getASTCreateString(x) #x -> codeGenerator.mapTreeId(x) Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts. Text related translations $append(x) -> text.append(x) $setText(x) -> text.setLength(_begin); text.append(x) $getText -> new String(text.getBuffer(),_begin,text.length()-_begin) $setToken(x) -> _token = x $setType(x) -> _ttype = x $FOLLOW(r) -> FOLLOW set name for rule r (optional arg) $FIRST(r) -> FIRST set name for rule r (optional arg) */ public class ActionLexer extends antlr.CharScanner implements ActionLexerTokenTypes, TokenStream { protected RuleBlock currentRule; protected CodeGenerator generator; protected int lineOffset = 0; private Tool antlrTool; // The ANTLR tool ActionTransInfo transInfo; public ActionLexer(String s, RuleBlock currentRule, CodeGenerator generator, ActionTransInfo transInfo ) { this(new StringReader(s)); this.currentRule = currentRule; this.generator = generator; this.transInfo = transInfo; } public void setLineOffset(int lineOffset) { setLine(lineOffset); } public void setTool(Tool tool) { this.antlrTool = tool; } public void reportError(RecognitionException e) { antlrTool.error("Syntax error in action: "+e,getFilename(),getLine(),getColumn()); } public void reportError(String s) { antlrTool.error(s,getFilename(),getLine(),getColumn()); } public void reportWarning(String s) { if ( getFilename()==null ) antlrTool.warning(s); else antlrTool.warning(s,getFilename(),getLine(),getColumn()); } public ActionLexer(InputStream in) { this(new ByteBuffer(in)); } public ActionLexer(Reader in) { this(new CharBuffer(in)); } public ActionLexer(InputBuffer ib) { this(new LexerSharedInputState(ib)); } public ActionLexer(LexerSharedInputState state) { super(state); caseSensitiveLiterals = true; setCaseSensitive(true); literals = new Hashtable(); } public Token nextToken() throws TokenStreamException { Token theRetToken=null; tryAgain: for (;;) { Token _token = null; int _ttype = Token.INVALID_TYPE; resetText(); try { // for char stream error handling try { // for lexical error handling if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff'))) { mACTION(true); theRetToken=_returnToken; } else { if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);} else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } if ( _returnToken==null ) continue tryAgain; // found SKIP token _ttype = _returnToken.getType(); _returnToken.setType(_ttype); return _returnToken; } catch (RecognitionException e) { throw new TokenStreamRecognitionException(e); } } catch (CharStreamException cse) { if ( cse instanceof CharStreamIOException ) { throw new TokenStreamIOException(((CharStreamIOException)cse).io); } else { throw new TokenStreamException(cse.getMessage()); } } } } public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ACTION; int _saveIndex; { int _cnt502=0; _loop502: do { switch ( LA(1)) { case '#': { mAST_ITEM(false); break; } case '$': { mTEXT_ITEM(false); break; } default: if ((_tokenSet_0.member(LA(1)))) { mSTUFF(false); } else { if ( _cnt502>=1 ) { break _loop502; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } } _cnt502++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } /** stuff in between #(...) and #id items * Allow the escaping of the # for C preprocessor stuff. */ protected final void mSTUFF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STUFF; int _saveIndex; switch ( LA(1)) { case '"': { mSTRING(false); break; } case '\'': { mCHAR(false); break; } case '\n': { match('\n'); newline(); break; } default: if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) { mCOMMENT(false); } else if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match("\r\n"); newline(); } else if ((LA(1)=='\\') && (LA(2)=='#') && (true)) { match('\\'); match('#'); text.setLength(_begin); text.append("#"); } else if ((LA(1)=='/') && (_tokenSet_1.member(LA(2)))) { match('/'); { match(_tokenSet_1); } } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); newline(); } else if ((_tokenSet_2.member(LA(1))) && (true) && (true)) { { match(_tokenSet_2); } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mAST_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_ITEM; int _saveIndex; Token t=null; Token id=null; Token ctor=null; if ((LA(1)=='#') && (LA(2)=='(')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mTREE(true); t=_returnToken; } else if ((LA(1)=='#') && (_tokenSet_3.member(LA(2)))) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mID(true); id=_returnToken; String idt = id.getText(); String mapped = generator.mapTreeId(id.getText(), transInfo); // verify that it's not a preprocessor macro... if( mapped!=null && ! idt.equals( mapped ) ) { text.setLength(_begin); text.append(mapped); } else { if(idt.equals("if") || idt.equals("define") || idt.equals("ifdef") || idt.equals("ifndef") || idt.equals("else") || idt.equals("elif") || idt.equals("endif") || idt.equals("warning") || idt.equals("error") || idt.equals("ident") || idt.equals("pragma") || idt.equals("include")) { text.setLength(_begin); text.append("#"+idt); } } { if ((_tokenSet_4.member(LA(1))) && (true) && (true)) { mWS(false); } else { } } { if ((LA(1)=='=') && (true) && (true)) { mVAR_ASSIGN(false); } else { } } } else if ((LA(1)=='#') && (LA(2)=='[')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mAST_CONSTRUCTOR(true); ctor=_returnToken; } else if ((LA(1)=='#') && (LA(2)=='#')) { match("##"); if( currentRule != null ) { String r = currentRule.getRuleName()+"_AST"; text.setLength(_begin); text.append(r); if ( transInfo!=null ) { transInfo.refRuleRoot=r; // we ref root of tree } } else { reportWarning("\"##\" not valid in this context"); text.setLength(_begin); text.append("##"); } { if ((_tokenSet_4.member(LA(1))) && (true) && (true)) { mWS(false); } else { } } { if ((LA(1)=='=') && (true) && (true)) { mVAR_ASSIGN(false); } else { } } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ITEM; int _saveIndex; Token a1=null; Token a2=null; Token a3=null; Token a4=null; Token a5=null; Token a6=null; if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='O')) { match("$FOLLOW"); { if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a5=_returnToken; match(')'); } else { } } String rule = currentRule.getRuleName(); if ( a5!=null ) { rule = a5.getText(); } String setName = generator.getFOLLOWBitSet(rule, 1); // System.out.println("FOLLOW("+rule+")="+setName); if ( setName==null ) { reportError("$FOLLOW("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { text.setLength(_begin); text.append(setName); } } else if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='I')) { match("$FIRST"); { if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a6=_returnToken; match(')'); } else { } } String rule = currentRule.getRuleName(); if ( a6!=null ) { rule = a6.getText(); } String setName = generator.getFIRSTBitSet(rule, 1); // System.out.println("FIRST("+rule+")="+setName); if ( setName==null ) { reportError("$FIRST("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { text.setLength(_begin); text.append(setName); } } else if ((LA(1)=='$') && (LA(2)=='a')) { match("$append"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a1=_returnToken; match(')'); String t = "text += "+a1.getText(); text.setLength(_begin); text.append(t); } else if ((LA(1)=='$') && (LA(2)=='s')) { match("$set"); { if ((LA(1)=='T') && (LA(2)=='e')) { match("Text"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a2=_returnToken; match(')'); String t; t = "{ text.erase(_begin); text += "+a2.getText()+"; }"; text.setLength(_begin); text.append(t); } else if ((LA(1)=='T') && (LA(2)=='o')) { match("Token"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a3=_returnToken; match(')'); String t="_token = "+a3.getText(); text.setLength(_begin); text.append(t); } else if ((LA(1)=='T') && (LA(2)=='y')) { match("Type"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a4=_returnToken; match(')'); String t="_ttype = "+a4.getText(); text.setLength(_begin); text.append(t); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if ((LA(1)=='$') && (LA(2)=='g')) { match("$getText"); text.setLength(_begin); text.append("text.substr(_begin,text.length()-_begin)"); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = COMMENT; int _saveIndex; if ((LA(1)=='/') && (LA(2)=='/')) { mSL_COMMENT(false); } else if ((LA(1)=='/') && (LA(2)=='*')) { mML_COMMENT(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STRING; int _saveIndex; match('"'); { _loop599: do { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_7.member(LA(1)))) { matchNot('"'); } else { break _loop599; } } while (true); } match('"'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mCHAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = CHAR; int _saveIndex; match('\''); { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_8.member(LA(1)))) { matchNot('\''); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } match('\''); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTREE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TREE; int _saveIndex; Token t=null; Token t2=null; StringBuffer buf = new StringBuffer(); int n=0; Vector terms = new Vector(10); _saveIndex=text.length(); match('('); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mTREE_ELEMENT(true); text.setLength(_saveIndex); t=_returnToken; terms.appendElement( generator.processStringForASTConstructor(t.getText()) ); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': case ',': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop528: do { if ((LA(1)==',')) { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mTREE_ELEMENT(true); text.setLength(_saveIndex); t2=_returnToken; terms.appendElement( generator.processStringForASTConstructor(t2.getText()) ); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': case ',': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else { break _loop528; } } while (true); } text.setLength(_begin); text.append(generator.getASTCreateString(terms)); _saveIndex=text.length(); match(')'); text.setLength(_saveIndex); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WS; int _saveIndex; { int _cnt619=0; _loop619: do { if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match('\r'); match('\n'); newline(); } else if ((LA(1)==' ') && (true) && (true)) { match(' '); } else if ((LA(1)=='\t') && (true) && (true)) { match('\t'); } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); newline(); } else if ((LA(1)=='\n') && (true) && (true)) { match('\n'); newline(); } else { if ( _cnt619>=1 ) { break _loop619; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt619++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID; int _saveIndex; { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '_': { match('_'); break; } case ':': { match("::"); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop585: do { if ((_tokenSet_9.member(LA(1))) && (true) && (true)) { { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } case '_': { match('_'); break; } case ':': { match("::"); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else { break _loop585; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mVAR_ASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = VAR_ASSIGN; int _saveIndex; match('='); // inform the code generator that an assignment was done to // AST root for the rule if invoker set refRuleRoot. if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) { transInfo.assignToRoot=true; } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mAST_CONSTRUCTOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_CONSTRUCTOR; int _saveIndex; Token x=null; Token y=null; _saveIndex=text.length(); match('['); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); x=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ',': case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case ',': { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); y=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); match(']'); text.setLength(_saveIndex); // System.out.println("AST_CONSTRUCTOR: "+((x==null)?"null":x.getText())+ // ", "+((y==null)?"null":y.getText())); String ys = generator.processStringForASTConstructor(x.getText()); // the second does not need processing coz it's a string // (eg second param of astFactory.create(x,y) if ( y!=null ) ys += ","+y.getText(); text.setLength(_begin); text.append( generator.getASTCreateString(null,ys) ); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG; int _saveIndex; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '"': case '$': case '\'': case '+': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { int _cnt559=0; _loop559: do { if ((_tokenSet_10.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mTEXT_ARG_ELEMENT(false); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_11.member(LA(2))) && (true)) { mWS(false); } else if ((_tokenSet_11.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else { if ( _cnt559>=1 ) { break _loop559; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt559++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTREE_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TREE_ELEMENT; int _saveIndex; Token id=null; boolean was_mapped; switch ( LA(1)) { case '(': { mTREE(false); break; } case '[': { mAST_CONSTRUCTOR(false); break; } case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mID_ELEMENT(false); break; } case '"': { mSTRING(false); break; } default: if ((LA(1)=='#') && (LA(2)=='(')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mTREE(false); } else if ((LA(1)=='#') && (LA(2)=='[')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mAST_CONSTRUCTOR(false); } else if ((LA(1)=='#') && (_tokenSet_12.member(LA(2)))) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); was_mapped=mID_ELEMENT(true); id=_returnToken; // RK: I have a queer feeling that this maptreeid is redundant.. if ( ! was_mapped ) { String t = generator.mapTreeId(id.getText(), null); // System.out.println("mapped: "+id.getText()+" -> "+t); if ( t!=null ) { text.setLength(_begin); text.append(t); } } } else if ((LA(1)=='#') && (LA(2)=='#')) { match("##"); if( currentRule != null ) { String t = currentRule.getRuleName()+"_AST"; text.setLength(_begin); text.append(t); } else { reportError("\"##\" not valid in this context"); text.setLength(_begin); text.append("##"); } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } /** An ID_ELEMENT can be a func call, array ref, simple var, * or AST label ref. */ protected final boolean mID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { boolean mapped=false; int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID_ELEMENT; int _saveIndex; Token id=null; mID(true); id=_returnToken; { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_13.member(LA(2))) && (true)) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_13.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '(': case '<': { { switch ( LA(1)) { case '<': { match('<'); { _loop542: do { if ((_tokenSet_14.member(LA(1)))) { matchNot('>'); } else { break _loop542; } } while (true); } match('>'); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_15.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_15.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mARG(false); { _loop547: do { if ((LA(1)==',')) { match(','); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); } else { break _loop547; } } while (true); } break; } case '\t': case '\n': case '\r': case ' ': case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(')'); break; } case '[': { { int _cnt552=0; _loop552: do { if ((LA(1)=='[')) { match('['); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(']'); } else { if ( _cnt552>=1 ) { break _loop552; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt552++; } while (true); } break; } case '.': { match('.'); mID_ELEMENT(false); break; } case ':': { match("::"); mID_ELEMENT(false); break; } default: if ((LA(1)=='-') && (LA(2)=='>') && (_tokenSet_12.member(LA(3)))) { match("->"); mID_ELEMENT(false); } else if ((_tokenSet_16.member(LA(1))) && (true) && (true)) { mapped = true; String t = generator.mapTreeId(id.getText(), transInfo); // System.out.println("mapped: "+id.getText()+" -> "+t); if ( t!=null ) { text.setLength(_begin); text.append(t); } { if (((_tokenSet_17.member(LA(1))) && (_tokenSet_16.member(LA(2))) && (true))&&(transInfo!=null && transInfo.refRuleRoot!=null)) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '=': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mVAR_ASSIGN(false); } else if ((_tokenSet_18.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; return mapped; } /** The arguments of a #[...] constructor are text, token type, * or a tree. */ protected final void mAST_CTOR_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_CTOR_ELEMENT; int _saveIndex; if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mSTRING(false); } else if ((_tokenSet_19.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mTREE_ELEMENT(false); } else if (((LA(1) >= '0' && LA(1) <= '9'))) { mINT(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = INT; int _saveIndex; { int _cnt610=0; _loop610: do { if (((LA(1) >= '0' && LA(1) <= '9'))) { mDIGIT(false); } else { if ( _cnt610>=1 ) { break _loop610; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt610++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ARG; int _saveIndex; { switch ( LA(1)) { case '\'': { mCHAR(false); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mINT_OR_FLOAT(false); break; } default: if ((_tokenSet_19.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mTREE_ELEMENT(false); } else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mSTRING(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop580: do { if ((_tokenSet_20.member(LA(1))) && (_tokenSet_21.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '*': case '+': case '-': case '/': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '+': { match('+'); break; } case '-': { match('-'); break; } case '*': { match('*'); break; } case '/': { match('/'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); } else { break _loop580; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG_ELEMENT; int _saveIndex; switch ( LA(1)) { case ':': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mTEXT_ARG_ID_ELEMENT(false); break; } case '"': { mSTRING(false); break; } case '\'': { mCHAR(false); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mINT_OR_FLOAT(false); break; } case '$': { mTEXT_ITEM(false); break; } case '+': { match('+'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG_ID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG_ID_ELEMENT; int _saveIndex; Token id=null; mID(true); id=_returnToken; { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_22.member(LA(2))) && (true)) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_22.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '(': { match('('); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_23.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_23.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { _loop568: do { if ((_tokenSet_24.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mTEXT_ARG(false); { _loop567: do { if ((LA(1)==',')) { match(','); mTEXT_ARG(false); } else { break _loop567; } } while (true); } } else { break _loop568; } } while (true); } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(')'); break; } case '[': { { int _cnt573=0; _loop573: do { if ((LA(1)=='[')) { match('['); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_24.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_24.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } mTEXT_ARG(false); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(']'); } else { if ( _cnt573>=1 ) { break _loop573; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt573++; } while (true); } break; } case '.': { match('.'); mTEXT_ARG_ID_ELEMENT(false); break; } case '-': { match("->"); mTEXT_ARG_ID_ELEMENT(false); break; } default: if ((LA(1)==':') && (LA(2)==':') && (_tokenSet_12.member(LA(3)))) { match("::"); mTEXT_ARG_ID_ELEMENT(false); } else if ((_tokenSet_11.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mINT_OR_FLOAT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = INT_OR_FLOAT; int _saveIndex; { int _cnt613=0; _loop613: do { if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_25.member(LA(2))) && (true)) { mDIGIT(false); } else { if ( _cnt613>=1 ) { break _loop613; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt613++; } while (true); } { if ((LA(1)=='L') && (_tokenSet_26.member(LA(2))) && (true)) { match('L'); } else if ((LA(1)=='l') && (_tokenSet_26.member(LA(2))) && (true)) { match('l'); } else if ((LA(1)=='.')) { match('.'); { _loop616: do { if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_26.member(LA(2))) && (true)) { mDIGIT(false); } else { break _loop616; } } while (true); } } else if ((_tokenSet_26.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SL_COMMENT; int _saveIndex; match("//"); { _loop590: do { // nongreedy exit test if ((LA(1)=='\n'||LA(1)=='\r') && (true) && (true)) break _loop590; if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { matchNot(EOF_CHAR); } else { break _loop590; } } while (true); } { if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match("\r\n"); } else if ((LA(1)=='\n')) { match('\n'); } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } newline(); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ML_COMMENT; int _saveIndex; match("/*"); { _loop594: do { // nongreedy exit test if ((LA(1)=='*') && (LA(2)=='/') && (true)) break _loop594; if ((LA(1)=='\r') && (LA(2)=='\n') && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\r'); match('\n'); newline(); } else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\r'); newline(); } else if ((LA(1)=='\n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\n'); newline(); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop594; } } while (true); } match("*/"); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ESC; int _saveIndex; match('\\'); { switch ( LA(1)) { case 'n': { match('n'); break; } case 'r': { match('r'); break; } case 't': { match('t'); break; } case 'v': { match('v'); break; } case 'b': { match('b'); break; } case 'f': { match('f'); break; } case '"': { match('"'); break; } case '\'': { match('\''); break; } case '\\': { match('\\'); break; } case '0': case '1': case '2': case '3': { { matchRange('0','3'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } case '4': case '5': case '6': case '7': { { matchRange('4','7'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = DIGIT; int _saveIndex; matchRange('0','9'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } private static final long[] mk_tokenSet_0() { long[] data = new long[8]; data[0]=-103079215112L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = new long[8]; data[0]=-145135534866440L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); private static final long[] mk_tokenSet_2() { long[] data = new long[8]; data[0]=-141407503262728L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2()); private static final long[] mk_tokenSet_3() { long[] data = { 288230380446688768L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3()); private static final long[] mk_tokenSet_4() { long[] data = { 4294977024L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4()); private static final long[] mk_tokenSet_5() { long[] data = { 1103806604800L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5()); private static final long[] mk_tokenSet_6() { long[] data = { 576189812881499648L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6()); private static final long[] mk_tokenSet_7() { long[] data = new long[8]; data[0]=-17179869192L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7()); private static final long[] mk_tokenSet_8() { long[] data = new long[8]; data[0]=-549755813896L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8()); private static final long[] mk_tokenSet_9() { long[] data = { 576179277326712832L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9()); private static final long[] mk_tokenSet_10() { long[] data = { 576188709074894848L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10()); private static final long[] mk_tokenSet_11() { long[] data = { 576208504579171840L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11()); private static final long[] mk_tokenSet_12() { long[] data = { 288230376151711744L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_12 = new BitSet(mk_tokenSet_12()); private static final long[] mk_tokenSet_13() { long[] data = { 3747275269732312576L, 671088640L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_13 = new BitSet(mk_tokenSet_13()); private static final long[] mk_tokenSet_14() { long[] data = new long[8]; data[0]=-4611686018427387912L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_14 = new BitSet(mk_tokenSet_14()); private static final long[] mk_tokenSet_15() { long[] data = { 576183181451994624L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_15 = new BitSet(mk_tokenSet_15()); private static final long[] mk_tokenSet_16() { long[] data = { 2306051920717948416L, 536870912L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_16 = new BitSet(mk_tokenSet_16()); private static final long[] mk_tokenSet_17() { long[] data = { 2305843013508670976L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_17 = new BitSet(mk_tokenSet_17()); private static final long[] mk_tokenSet_18() { long[] data = { 208911504254464L, 536870912L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_18 = new BitSet(mk_tokenSet_18()); private static final long[] mk_tokenSet_19() { long[] data = { 288231527202947072L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_19 = new BitSet(mk_tokenSet_19()); private static final long[] mk_tokenSet_20() { long[] data = { 189120294954496L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_20 = new BitSet(mk_tokenSet_20()); private static final long[] mk_tokenSet_21() { long[] data = { 576370098428716544L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_21 = new BitSet(mk_tokenSet_21()); private static final long[] mk_tokenSet_22() { long[] data = { 576315157207066112L, 576460746666278910L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_22 = new BitSet(mk_tokenSet_22()); private static final long[] mk_tokenSet_23() { long[] data = { 576190912393127424L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_23 = new BitSet(mk_tokenSet_23()); private static final long[] mk_tokenSet_24() { long[] data = { 576188713369871872L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_24 = new BitSet(mk_tokenSet_24()); private static final long[] mk_tokenSet_25() { long[] data = { 576459193230304768L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_25 = new BitSet(mk_tokenSet_25()); private static final long[] mk_tokenSet_26() { long[] data = { 576388824486127104L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_26 = new BitSet(mk_tokenSet_26()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/cpp/ActionLexerTokenTypes.java000066400000000000000000000012261161462365500304470ustar00rootroot00000000000000// $ANTLR : "action.g" -> "ActionLexer.java"$ package antlr.actions.cpp; public interface ActionLexerTokenTypes { int EOF = 1; int NULL_TREE_LOOKAHEAD = 3; int ACTION = 4; int STUFF = 5; int AST_ITEM = 6; int TEXT_ITEM = 7; int TREE = 8; int TREE_ELEMENT = 9; int AST_CONSTRUCTOR = 10; int AST_CTOR_ELEMENT = 11; int ID_ELEMENT = 12; int TEXT_ARG = 13; int TEXT_ARG_ELEMENT = 14; int TEXT_ARG_ID_ELEMENT = 15; int ARG = 16; int ID = 17; int VAR_ASSIGN = 18; int COMMENT = 19; int SL_COMMENT = 20; int ML_COMMENT = 21; int CHAR = 22; int STRING = 23; int ESC = 24; int DIGIT = 25; int INT = 26; int INT_OR_FLOAT = 27; int WS = 28; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/cpp/action.g000066400000000000000000000235701161462365500247740ustar00rootroot00000000000000header { package antlr.actions.cpp; } { import java.io.StringReader; import antlr.collections.impl.Vector; import antlr.*; } /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id:$ */ /** Perform the following translations: AST related translations ## -> currentRule_AST #(x,y,z) -> codeGenerator.getASTCreateString(vector-of(x,y,z)) #[x] -> codeGenerator.getASTCreateString(x) #x -> codeGenerator.mapTreeId(x) Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts. Text related translations $append(x) -> text.append(x) $setText(x) -> text.setLength(_begin); text.append(x) $getText -> new String(text.getBuffer(),_begin,text.length()-_begin) $setToken(x) -> _token = x $setType(x) -> _ttype = x $FOLLOW(r) -> FOLLOW set name for rule r (optional arg) $FIRST(r) -> FIRST set name for rule r (optional arg) */ class ActionLexer extends Lexer; options { k = 3; charVocabulary = '\3'..'\377'; testLiterals=false; interactive=true; } { protected RuleBlock currentRule; protected CodeGenerator generator; protected int lineOffset = 0; private Tool antlrTool; // The ANTLR tool ActionTransInfo transInfo; public ActionLexer(String s, RuleBlock currentRule, CodeGenerator generator, ActionTransInfo transInfo ) { this(new StringReader(s)); this.currentRule = currentRule; this.generator = generator; this.transInfo = transInfo; } public void setLineOffset(int lineOffset) { setLine(lineOffset); } public void setTool(Tool tool) { this.antlrTool = tool; } public void reportError(RecognitionException e) { antlrTool.error("Syntax error in action: "+e,getFilename(),getLine(),getColumn()); } public void reportError(String s) { antlrTool.error(s,getFilename(),getLine(),getColumn()); } public void reportWarning(String s) { if ( getFilename()==null ) antlrTool.warning(s); else antlrTool.warning(s,getFilename(),getLine(),getColumn()); } } // rules are protected because we don't care about nextToken(). public ACTION : ( STUFF | AST_ITEM | TEXT_ITEM )+ ; /** stuff in between #(...) and #id items * Allow the escaping of the # for C preprocessor stuff. */ protected STUFF : COMMENT | STRING | CHAR | "\r\n" { newline(); } | '\\' '#' { $setText("#"); } | '\r' { newline(); } | '\n' { newline(); } | '/' ~('/'|'*') // non-comment start '/' // | ( ~('/'|'\n'|'\r'|'$'|'#'|'"'|'\'') )+ | ~('/'|'\n'|'\r'|'$'|'#'|'"'|'\'') ; protected AST_ITEM : '#'! t:TREE // #( ) | '#'! (WS)? id:ID // #a_name (=)? { String idt = id.getText(); String mapped = generator.mapTreeId(id.getText(), transInfo); // verify that it's not a preprocessor macro... if( mapped!=null && ! idt.equals( mapped ) ) { $setText(mapped); } else { if(idt.equals("if") || idt.equals("define") || idt.equals("ifdef") || idt.equals("ifndef") || idt.equals("else") || idt.equals("elif") || idt.equals("endif") || idt.equals("warning") || idt.equals("error") || idt.equals("ident") || idt.equals("pragma") || idt.equals("include")) { $setText("#"+idt); } } } (WS)? ( options {greedy=true;} : VAR_ASSIGN )? | '#'! ctor:AST_CONSTRUCTOR // #[ ] | "##" { if( currentRule != null ) { String r = currentRule.getRuleName()+"_AST"; $setText(r); if ( transInfo!=null ) { transInfo.refRuleRoot=r; // we ref root of tree } } else { reportWarning("\"##\" not valid in this context"); $setText("##"); } } (WS)? ( options {greedy=true;} : VAR_ASSIGN )? ; protected TEXT_ITEM : "$append" (WS)? '(' a1:TEXT_ARG ')' { String t = "text += "+a1.getText(); $setText(t); } | "$set" ( "Text" (WS)? '(' a2:TEXT_ARG ')' { String t; t = "{ text.erase(_begin); text += "+a2.getText()+"; }"; $setText(t); } | "Token" (WS)? '(' a3:TEXT_ARG ')' { String t="_token = "+a3.getText(); $setText(t); } | "Type" (WS)? '(' a4:TEXT_ARG ')' { String t="_ttype = "+a4.getText(); $setText(t); } ) | "$getText" { $setText("text.substr(_begin,text.length()-_begin)"); } | "$FOLLOW" ( (WS)? '(' a5:TEXT_ARG ')' )? { String rule = currentRule.getRuleName(); if ( a5!=null ) { rule = a5.getText(); } String setName = generator.getFOLLOWBitSet(rule, 1); // System.out.println("FOLLOW("+rule+")="+setName); if ( setName==null ) { reportError("$FOLLOW("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { $setText(setName); } } | "$FIRST" ( (WS)? '(' a6:TEXT_ARG ')' )? { String rule = currentRule.getRuleName(); if ( a6!=null ) { rule = a6.getText(); } String setName = generator.getFIRSTBitSet(rule, 1); // System.out.println("FIRST("+rule+")="+setName); if ( setName==null ) { reportError("$FIRST("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { $setText(setName); } } ; protected TREE! { StringBuffer buf = new StringBuffer(); int n=0; Vector terms = new Vector(10); } : '(' (WS)? t:TREE_ELEMENT { terms.appendElement( generator.processStringForASTConstructor(t.getText()) ); } (WS)? ( ',' (WS)? t2:TREE_ELEMENT { terms.appendElement( generator.processStringForASTConstructor(t2.getText()) ); } (WS)? )* {$setText(generator.getASTCreateString(terms));} ')' ; protected TREE_ELEMENT { boolean was_mapped; } : '#'! TREE | '#'! AST_CONSTRUCTOR | '#'! was_mapped=id:ID_ELEMENT { // RK: I have a queer feeling that this maptreeid is redundant.. if ( ! was_mapped ) { String t = generator.mapTreeId(id.getText(), null); // System.out.println("mapped: "+id.getText()+" -> "+t); if ( t!=null ) { $setText(t); } } } | "##" { if( currentRule != null ) { String t = currentRule.getRuleName()+"_AST"; $setText(t); } else { reportError("\"##\" not valid in this context"); $setText("##"); } } | TREE | AST_CONSTRUCTOR | ID_ELEMENT | STRING ; // FIXME: RK - the getASTCreateString here is broken. // getASTCreateString can not cleanly see if a constructor like // tokens { FOR; } // forLoop:! "for" bla bla // { #forLoop = #([FOR,"for"], bla bla ) } // should use ForNode as AST. // protected AST_CONSTRUCTOR! : '[' (WS)? x:AST_CTOR_ELEMENT (WS)? (',' (WS)? y:AST_CTOR_ELEMENT (WS)? )? ']' { // System.out.println("AST_CONSTRUCTOR: "+((x==null)?"null":x.getText())+ // ", "+((y==null)?"null":y.getText())); String ys = generator.processStringForASTConstructor(x.getText()); // the second does not need processing coz it's a string // (eg second param of astFactory.create(x,y) if ( y!=null ) ys += ","+y.getText(); $setText( generator.getASTCreateString(null,ys) ); } ; /** The arguments of a #[...] constructor are text, token type, * or a tree. */ protected AST_CTOR_ELEMENT : STRING | INT | TREE_ELEMENT ; /** An ID_ELEMENT can be a func call, array ref, simple var, * or AST label ref. */ protected ID_ELEMENT returns [boolean mapped=false] : id:ID (options {greedy=true;}:WS!)? ( ('<' (~'>')* '>')? // allow typecast '(' (options {greedy=true;}:WS!)? ( ARG (',' (WS!)? ARG)* )? (WS!)? ')' // method call | ( '[' (WS!)? ARG (WS!)? ']' )+ // array reference | '.' ID_ELEMENT | "->" ID_ELEMENT | "::" ID_ELEMENT | /* could be a token reference or just a user var */ { mapped = true; String t = generator.mapTreeId(id.getText(), transInfo); // System.out.println("mapped: "+id.getText()+" -> "+t); if ( t!=null ) { $setText(t); } } // if #rule referenced, check for assignment ( options {greedy=true;} : {transInfo!=null && transInfo.refRuleRoot!=null}? (WS)? VAR_ASSIGN )? ) ; protected TEXT_ARG : (WS)? ( TEXT_ARG_ELEMENT (options {greedy=true;}:WS)? )+ ; protected TEXT_ARG_ELEMENT : TEXT_ARG_ID_ELEMENT | STRING | CHAR | INT_OR_FLOAT | TEXT_ITEM | '+' ; protected TEXT_ARG_ID_ELEMENT : id:ID (options {greedy=true;}:WS!)? ( '(' (options {greedy=true;}:WS!)? ( TEXT_ARG (',' TEXT_ARG)* )* (WS!)? ')' // method call | ( '[' (WS!)? TEXT_ARG (WS!)? ']' )+ // array reference | '.' TEXT_ARG_ID_ELEMENT | "->" TEXT_ARG_ID_ELEMENT | "::" TEXT_ARG_ID_ELEMENT | ) ; protected ARG : ( TREE_ELEMENT | STRING | CHAR | INT_OR_FLOAT ) (options {greedy=true;} : (WS)? ( '+'| '-' | '*' | '/' ) (WS)? ARG )* ; protected ID : ('a'..'z'|'A'..'Z'|'_'|"::") (options {greedy=true;} : ('a'..'z'|'A'..'Z'|'0'..'9'|'_'|"::"))* ; protected VAR_ASSIGN : '=' { // inform the code generator that an assignment was done to // AST root for the rule if invoker set refRuleRoot. if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) { transInfo.assignToRoot=true; } } ; protected COMMENT : SL_COMMENT | ML_COMMENT ; protected SL_COMMENT : "//" (options {greedy=false;}:.)* ('\n'|"\r\n"|'\r') {newline();} ; protected ML_COMMENT : "/*" ( options {greedy=false;} : '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} | . )* "*/" ; protected CHAR : '\'' ( ESC | ~'\'' ) '\'' ; protected STRING : '"' (ESC|~'"')* '"' ; protected ESC : '\\' ( 'n' | 'r' | 't' | 'v' | 'b' | 'f' | '"' | '\'' | '\\' | ('0'..'3') ( options {greedy=true;} : DIGIT ( options {greedy=true;} : DIGIT )? )? | ('4'..'7') (options {greedy=true;}:DIGIT)? ) ; protected DIGIT : '0'..'9' ; protected INT : (DIGIT)+ ; protected INT_OR_FLOAT : (options {greedy=true;}:DIGIT)+ ( options {greedy=true;} : '.' (options {greedy=true;}:DIGIT)* | 'L' | 'l' )? ; protected WS : ( options {greedy=true;} : ' ' | '\t' | '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} )+ ; nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/csharp/000077500000000000000000000000001161462365500240365ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/csharp/ActionLexer.java000066400000000000000000002025321161462365500271220ustar00rootroot00000000000000// $ANTLR : "action.g" -> "ActionLexer.java"$ package antlr.actions.csharp; import java.io.InputStream; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.TokenStreamRecognitionException; import antlr.CharStreamException; import antlr.CharStreamIOException; import antlr.ANTLRException; import java.io.Reader; import java.util.Hashtable; import antlr.CharScanner; import antlr.InputBuffer; import antlr.ByteBuffer; import antlr.CharBuffer; import antlr.Token; import antlr.CommonToken; import antlr.RecognitionException; import antlr.NoViableAltForCharException; import antlr.MismatchedCharException; import antlr.TokenStream; import antlr.ANTLRHashString; import antlr.LexerSharedInputState; import antlr.collections.impl.BitSet; import antlr.SemanticException; import java.io.StringReader; import antlr.collections.impl.Vector; import antlr.*; /** Perform the following translations: AST related translations ## -> currentRule_AST #(x,y,z) -> codeGenerator.getASTCreateString(vector-of(x,y,z)) #[x] -> codeGenerator.getASTCreateString(x) #x -> codeGenerator.mapTreeId(x) Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts. Text related translations $append(x) -> text.append(x) $setText(x) -> text.setLength(_begin); text.append(x) $getText -> new String(text.getBuffer(),_begin,text.length()-_begin) $setToken(x) -> _token = x $setType(x) -> _ttype = x $FOLLOW(r) -> FOLLOW set name for rule r (optional arg) $FIRST(r) -> FIRST set name for rule r (optional arg) */ public class ActionLexer extends antlr.CharScanner implements ActionLexerTokenTypes, TokenStream { protected RuleBlock currentRule; protected CodeGenerator generator; protected int lineOffset = 0; private Tool antlrTool; // The ANTLR tool ActionTransInfo transInfo; public ActionLexer( String s, RuleBlock currentRule, CodeGenerator generator, ActionTransInfo transInfo ) { this(new StringReader(s)); this.currentRule = currentRule; this.generator = generator; this.transInfo = transInfo; } public void setLineOffset(int lineOffset) { setLine(lineOffset); } public void setTool(Tool tool) { this.antlrTool = tool; } public void reportError(RecognitionException e) { antlrTool.error("Syntax error in action: "+e,getFilename(),getLine(),getColumn()); } public void reportError(String s) { antlrTool.error(s,getFilename(),getLine(),getColumn()); } public void reportWarning(String s) { if ( getFilename()==null ) antlrTool.warning(s); else antlrTool.warning(s,getFilename(),getLine(),getColumn()); } public ActionLexer(InputStream in) { this(new ByteBuffer(in)); } public ActionLexer(Reader in) { this(new CharBuffer(in)); } public ActionLexer(InputBuffer ib) { this(new LexerSharedInputState(ib)); } public ActionLexer(LexerSharedInputState state) { super(state); caseSensitiveLiterals = true; setCaseSensitive(true); literals = new Hashtable(); } public Token nextToken() throws TokenStreamException { Token theRetToken=null; tryAgain: for (;;) { Token _token = null; int _ttype = Token.INVALID_TYPE; resetText(); try { // for char stream error handling try { // for lexical error handling if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff'))) { mACTION(true); theRetToken=_returnToken; } else { if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);} else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } if ( _returnToken==null ) continue tryAgain; // found SKIP token _ttype = _returnToken.getType(); _returnToken.setType(_ttype); return _returnToken; } catch (RecognitionException e) { throw new TokenStreamRecognitionException(e); } } catch (CharStreamException cse) { if ( cse instanceof CharStreamIOException ) { throw new TokenStreamIOException(((CharStreamIOException)cse).io); } else { throw new TokenStreamException(cse.getMessage()); } } } } public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ACTION; int _saveIndex; { int _cnt623=0; _loop623: do { switch ( LA(1)) { case '#': { mAST_ITEM(false); break; } case '$': { mTEXT_ITEM(false); break; } default: if ((_tokenSet_0.member(LA(1)))) { mSTUFF(false); } else { if ( _cnt623>=1 ) { break _loop623; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } } _cnt623++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } /** stuff in between #(...) and #id items * Allow the escaping of the # for C# preprocessor stuff. */ protected final void mSTUFF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STUFF; int _saveIndex; switch ( LA(1)) { case '"': { mSTRING(false); break; } case '\'': { mCHAR(false); break; } case '\n': { match('\n'); newline(); break; } default: if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) { mCOMMENT(false); } else if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match("\r\n"); newline(); } else if ((LA(1)=='\\') && (LA(2)=='#') && (true)) { match('\\'); match('#'); text.setLength(_begin); text.append("#"); } else if ((LA(1)=='/') && (_tokenSet_1.member(LA(2)))) { match('/'); { match(_tokenSet_1); } } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); newline(); } else if ((_tokenSet_2.member(LA(1))) && (true) && (true)) { { match(_tokenSet_2); } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mAST_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_ITEM; int _saveIndex; Token t=null; Token id=null; Token ctor=null; if ((LA(1)=='#') && (LA(2)=='(')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mTREE(true); t=_returnToken; } else if ((LA(1)=='#') && (_tokenSet_3.member(LA(2)))) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mID(true); id=_returnToken; String idt = id.getText(); String mapped = generator.mapTreeId(id.getText(), transInfo); // verify that it's not a preprocessor macro... if ( (mapped != null) && !idt.equals(mapped) ) { text.setLength(_begin); text.append(mapped); } else { if (idt.equals("define") || idt.equals("undef") || idt.equals("if") || idt.equals("elif") || idt.equals("else") || idt.equals("endif") || idt.equals("line") || idt.equals("error") || idt.equals("warning") || idt.equals("region") || idt.equals("endregion")) { text.setLength(_begin); text.append("#"+idt); } } { if ((_tokenSet_4.member(LA(1))) && (true) && (true)) { mWS(false); } else { } } { if ((LA(1)=='=') && (true) && (true)) { mVAR_ASSIGN(false); } else { } } } else if ((LA(1)=='#') && (LA(2)=='[')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mAST_CONSTRUCTOR(true); ctor=_returnToken; } else if ((LA(1)=='#') && (LA(2)=='#')) { match("##"); if( currentRule != null ) { String r = currentRule.getRuleName()+"_AST"; text.setLength(_begin); text.append(r); if ( transInfo!=null ) { transInfo.refRuleRoot=r; // we ref root of tree } } else { reportWarning("\"##\" not valid in this context"); text.setLength(_begin); text.append("##"); } { if ((_tokenSet_4.member(LA(1))) && (true) && (true)) { mWS(false); } else { } } { if ((LA(1)=='=') && (true) && (true)) { mVAR_ASSIGN(false); } else { } } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ITEM; int _saveIndex; Token a1=null; Token a2=null; Token a3=null; Token a4=null; Token a5=null; Token a6=null; if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='O')) { match("$FOLLOW"); { if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a5=_returnToken; match(')'); } else { } } String rule = currentRule.getRuleName(); if ( a5!=null ) { rule = a5.getText(); } String setName = generator.getFOLLOWBitSet(rule, 1); // System.out.println("FOLLOW("+rule+")="+setName); if ( setName==null ) { reportError("$FOLLOW("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { text.setLength(_begin); text.append(setName); } } else if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='I')) { match("$FIRST"); { if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a6=_returnToken; match(')'); } else { } } String rule = currentRule.getRuleName(); if ( a6!=null ) { rule = a6.getText(); } String setName = generator.getFIRSTBitSet(rule, 1); // System.out.println("FIRST("+rule+")="+setName); if ( setName==null ) { reportError("$FIRST("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { text.setLength(_begin); text.append(setName); } } else if ((LA(1)=='$') && (LA(2)=='a')) { match("$append"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a1=_returnToken; match(')'); String t = "text.Append("+a1.getText()+")"; text.setLength(_begin); text.append(t); } else if ((LA(1)=='$') && (LA(2)=='s')) { match("$set"); { if ((LA(1)=='T') && (LA(2)=='e')) { match("Text"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a2=_returnToken; match(')'); String t; t = "text.Length = _begin; text.Append("+a2.getText()+")"; text.setLength(_begin); text.append(t); } else if ((LA(1)=='T') && (LA(2)=='o')) { match("Token"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a3=_returnToken; match(')'); String t="_token = "+a3.getText(); text.setLength(_begin); text.append(t); } else if ((LA(1)=='T') && (LA(2)=='y')) { match("Type"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a4=_returnToken; match(')'); String t="_ttype = "+a4.getText(); text.setLength(_begin); text.append(t); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if ((LA(1)=='$') && (LA(2)=='g')) { match("$getText"); text.setLength(_begin); text.append("text.ToString(_begin, text.Length-_begin)"); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = COMMENT; int _saveIndex; if ((LA(1)=='/') && (LA(2)=='/')) { mSL_COMMENT(false); } else if ((LA(1)=='/') && (LA(2)=='*')) { mML_COMMENT(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STRING; int _saveIndex; match('"'); { _loop720: do { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_7.member(LA(1)))) { matchNot('"'); } else { break _loop720; } } while (true); } match('"'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mCHAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = CHAR; int _saveIndex; match('\''); { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_8.member(LA(1)))) { matchNot('\''); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } match('\''); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTREE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TREE; int _saveIndex; Token t=null; Token t2=null; StringBuffer buf = new StringBuffer(); int n=0; Vector terms = new Vector(10); _saveIndex=text.length(); match('('); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mTREE_ELEMENT(true); text.setLength(_saveIndex); t=_returnToken; terms.appendElement( generator.processStringForASTConstructor(t.getText()) ); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': case ',': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop649: do { if ((LA(1)==',')) { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mTREE_ELEMENT(true); text.setLength(_saveIndex); t2=_returnToken; terms.appendElement( generator.processStringForASTConstructor(t2.getText()) ); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': case ',': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else { break _loop649; } } while (true); } text.setLength(_begin); text.append(generator.getASTCreateString(terms)); _saveIndex=text.length(); match(')'); text.setLength(_saveIndex); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WS; int _saveIndex; { int _cnt740=0; _loop740: do { if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match('\r'); match('\n'); newline(); } else if ((LA(1)==' ') && (true) && (true)) { match(' '); } else if ((LA(1)=='\t') && (true) && (true)) { match('\t'); } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); newline(); } else if ((LA(1)=='\n') && (true) && (true)) { match('\n'); newline(); } else { if ( _cnt740>=1 ) { break _loop740; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt740++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID; int _saveIndex; { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '_': { match('_'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop706: do { if ((_tokenSet_9.member(LA(1))) && (true) && (true)) { { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } case '_': { match('_'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else { break _loop706; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mVAR_ASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = VAR_ASSIGN; int _saveIndex; match('='); // inform the code generator that an assignment was done to // AST root for the rule if invoker set refRuleRoot. if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) { transInfo.assignToRoot=true; } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mAST_CONSTRUCTOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_CONSTRUCTOR; int _saveIndex; Token x=null; Token y=null; Token z=null; _saveIndex=text.length(); match('['); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); x=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ',': case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { if ((LA(1)==',') && (_tokenSet_10.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); y=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ',': case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else if ((LA(1)==','||LA(1)==']') && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case ',': { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); z=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); match(']'); text.setLength(_saveIndex); String args = generator.processStringForASTConstructor(x.getText()); // the second does not need processing coz it's a string // (eg second param of astFactory.create(x,y) if ( y!=null ) args += ","+y.getText(); if ( z!=null ) args += ","+z.getText(); text.setLength(_begin); text.append(generator.getASTCreateString(null,args)); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG; int _saveIndex; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '"': case '$': case '\'': case '+': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { int _cnt680=0; _loop680: do { if ((_tokenSet_11.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mTEXT_ARG_ELEMENT(false); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_12.member(LA(2))) && (true)) { mWS(false); } else if ((_tokenSet_12.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else { if ( _cnt680>=1 ) { break _loop680; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt680++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTREE_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TREE_ELEMENT; int _saveIndex; Token id=null; boolean was_mapped; switch ( LA(1)) { case '(': { mTREE(false); break; } case '[': { mAST_CONSTRUCTOR(false); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mID_ELEMENT(false); break; } case '"': { mSTRING(false); break; } default: if ((LA(1)=='#') && (LA(2)=='(')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mTREE(false); } else if ((LA(1)=='#') && (LA(2)=='[')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mAST_CONSTRUCTOR(false); } else if ((LA(1)=='#') && (_tokenSet_13.member(LA(2)))) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); was_mapped=mID_ELEMENT(true); id=_returnToken; // RK: I have a queer feeling that this maptreeid is redundant.. if ( ! was_mapped ) { String t = generator.mapTreeId(id.getText(), null); if ( t!=null ) { text.setLength(_begin); text.append(t); } } } else if ((LA(1)=='#') && (LA(2)=='#')) { match("##"); if( currentRule != null ) { String t = currentRule.getRuleName()+"_AST"; text.setLength(_begin); text.append(t); } else { reportError("\"##\" not valid in this context"); text.setLength(_begin); text.append("##"); } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } /** An ID_ELEMENT can be a func call, array ref, simple var, * or AST label ref. */ protected final boolean mID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { boolean mapped=false; int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID_ELEMENT; int _saveIndex; Token id=null; mID(true); id=_returnToken; { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_14.member(LA(2))) && (true)) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_14.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '(': { match('('); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_15.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_15.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mARG(false); { _loop668: do { if ((LA(1)==',')) { match(','); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); } else { break _loop668; } } while (true); } break; } case '\t': case '\n': case '\r': case ' ': case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(')'); break; } case '[': { { int _cnt673=0; _loop673: do { if ((LA(1)=='[')) { match('['); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(']'); } else { if ( _cnt673>=1 ) { break _loop673; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt673++; } while (true); } break; } case '.': { match('.'); mID_ELEMENT(false); break; } default: if ((LA(1)=='-') && (LA(2)=='>') && (_tokenSet_13.member(LA(3)))) { match("->"); mID_ELEMENT(false); } else if ((_tokenSet_16.member(LA(1))) && (true) && (true)) { mapped = true; String t = generator.mapTreeId(id.getText(), transInfo); // System.out.println("mapped: "+id.getText()+" -> "+t); if ( t!=null ) { text.setLength(_begin); text.append(t); } { if (((_tokenSet_17.member(LA(1))) && (_tokenSet_16.member(LA(2))) && (true))&&(transInfo!=null && transInfo.refRuleRoot!=null)) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '=': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mVAR_ASSIGN(false); } else if ((_tokenSet_18.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; return mapped; } /** The arguments of a #[...] constructor are text, token type, * or a tree. */ protected final void mAST_CTOR_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_CTOR_ELEMENT; int _saveIndex; if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mSTRING(false); } else if ((_tokenSet_19.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mTREE_ELEMENT(false); } else if (((LA(1) >= '0' && LA(1) <= '9'))) { mINT(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = INT; int _saveIndex; { int _cnt731=0; _loop731: do { if (((LA(1) >= '0' && LA(1) <= '9'))) { mDIGIT(false); } else { if ( _cnt731>=1 ) { break _loop731; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt731++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ARG; int _saveIndex; { switch ( LA(1)) { case '\'': { mCHAR(false); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mINT_OR_FLOAT(false); break; } default: if ((_tokenSet_19.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mTREE_ELEMENT(false); } else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mSTRING(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop701: do { if ((_tokenSet_20.member(LA(1))) && (_tokenSet_21.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '*': case '+': case '-': case '/': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '+': { match('+'); break; } case '-': { match('-'); break; } case '*': { match('*'); break; } case '/': { match('/'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); } else { break _loop701; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG_ELEMENT; int _saveIndex; switch ( LA(1)) { case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mTEXT_ARG_ID_ELEMENT(false); break; } case '"': { mSTRING(false); break; } case '\'': { mCHAR(false); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mINT_OR_FLOAT(false); break; } case '$': { mTEXT_ITEM(false); break; } case '+': { match('+'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG_ID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG_ID_ELEMENT; int _saveIndex; Token id=null; mID(true); id=_returnToken; { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_22.member(LA(2))) && (true)) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_22.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '(': { match('('); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_23.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_23.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { _loop689: do { if ((_tokenSet_24.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mTEXT_ARG(false); { _loop688: do { if ((LA(1)==',')) { match(','); mTEXT_ARG(false); } else { break _loop688; } } while (true); } } else { break _loop689; } } while (true); } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(')'); break; } case '[': { { int _cnt694=0; _loop694: do { if ((LA(1)=='[')) { match('['); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_24.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_24.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } mTEXT_ARG(false); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(']'); } else { if ( _cnt694>=1 ) { break _loop694; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt694++; } while (true); } break; } case '.': { match('.'); mTEXT_ARG_ID_ELEMENT(false); break; } case '-': { match("->"); mTEXT_ARG_ID_ELEMENT(false); break; } case '\t': case '\n': case '\r': case ' ': case '"': case '$': case '\'': case ')': case '+': case ',': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case ']': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mINT_OR_FLOAT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = INT_OR_FLOAT; int _saveIndex; { int _cnt734=0; _loop734: do { if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_25.member(LA(2))) && (true)) { mDIGIT(false); } else { if ( _cnt734>=1 ) { break _loop734; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt734++; } while (true); } { if ((LA(1)=='L') && (_tokenSet_26.member(LA(2))) && (true)) { match('L'); } else if ((LA(1)=='l') && (_tokenSet_26.member(LA(2))) && (true)) { match('l'); } else if ((LA(1)=='.')) { match('.'); { _loop737: do { if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_26.member(LA(2))) && (true)) { mDIGIT(false); } else { break _loop737; } } while (true); } } else if ((_tokenSet_26.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SL_COMMENT; int _saveIndex; match("//"); { _loop711: do { // nongreedy exit test if ((LA(1)=='\n'||LA(1)=='\r') && (true) && (true)) break _loop711; if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { matchNot(EOF_CHAR); } else { break _loop711; } } while (true); } { if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match("\r\n"); } else if ((LA(1)=='\n')) { match('\n'); } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } newline(); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ML_COMMENT; int _saveIndex; match("/*"); { _loop715: do { // nongreedy exit test if ((LA(1)=='*') && (LA(2)=='/') && (true)) break _loop715; if ((LA(1)=='\r') && (LA(2)=='\n') && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\r'); match('\n'); newline(); } else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\r'); newline(); } else if ((LA(1)=='\n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\n'); newline(); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop715; } } while (true); } match("*/"); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ESC; int _saveIndex; match('\\'); { switch ( LA(1)) { case 'n': { match('n'); break; } case 'r': { match('r'); break; } case 't': { match('t'); break; } case 'b': { match('b'); break; } case 'f': { match('f'); break; } case '"': { match('"'); break; } case '\'': { match('\''); break; } case '\\': { match('\\'); break; } case '0': case '1': case '2': case '3': { { matchRange('0','3'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } case '4': case '5': case '6': case '7': { { matchRange('4','7'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = DIGIT; int _saveIndex; matchRange('0','9'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } private static final long[] mk_tokenSet_0() { long[] data = new long[8]; data[0]=-103079215112L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = new long[8]; data[0]=-145135534866440L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); private static final long[] mk_tokenSet_2() { long[] data = new long[8]; data[0]=-141407503262728L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2()); private static final long[] mk_tokenSet_3() { long[] data = { 4294977024L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3()); private static final long[] mk_tokenSet_4() { long[] data = { 4294977024L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4()); private static final long[] mk_tokenSet_5() { long[] data = { 1103806604800L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5()); private static final long[] mk_tokenSet_6() { long[] data = { 287959436729787904L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6()); private static final long[] mk_tokenSet_7() { long[] data = new long[8]; data[0]=-17179869192L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7()); private static final long[] mk_tokenSet_8() { long[] data = new long[8]; data[0]=-549755813896L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8()); private static final long[] mk_tokenSet_9() { long[] data = { 287948901175001088L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9()); private static final long[] mk_tokenSet_10() { long[] data = { 287950056521213440L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10()); private static final long[] mk_tokenSet_11() { long[] data = { 287958332923183104L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11()); private static final long[] mk_tokenSet_12() { long[] data = { 287978128427460096L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_12 = new BitSet(mk_tokenSet_12()); private static final long[] mk_tokenSet_13() { long[] data = { 0L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_13 = new BitSet(mk_tokenSet_13()); private static final long[] mk_tokenSet_14() { long[] data = { 2306123388973753856L, 671088640L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_14 = new BitSet(mk_tokenSet_14()); private static final long[] mk_tokenSet_15() { long[] data = { 287952805300282880L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_15 = new BitSet(mk_tokenSet_15()); private static final long[] mk_tokenSet_16() { long[] data = { 2306051920717948416L, 536870912L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_16 = new BitSet(mk_tokenSet_16()); private static final long[] mk_tokenSet_17() { long[] data = { 2305843013508670976L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_17 = new BitSet(mk_tokenSet_17()); private static final long[] mk_tokenSet_18() { long[] data = { 208911504254464L, 536870912L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_18 = new BitSet(mk_tokenSet_18()); private static final long[] mk_tokenSet_19() { long[] data = { 1151051235328L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_19 = new BitSet(mk_tokenSet_19()); private static final long[] mk_tokenSet_20() { long[] data = { 189120294954496L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_20 = new BitSet(mk_tokenSet_20()); private static final long[] mk_tokenSet_21() { long[] data = { 288139722277004800L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_21 = new BitSet(mk_tokenSet_21()); private static final long[] mk_tokenSet_22() { long[] data = { 288084781055354368L, 576460746666278910L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_22 = new BitSet(mk_tokenSet_22()); private static final long[] mk_tokenSet_23() { long[] data = { 287960536241415680L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_23 = new BitSet(mk_tokenSet_23()); private static final long[] mk_tokenSet_24() { long[] data = { 287958337218160128L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_24 = new BitSet(mk_tokenSet_24()); private static final long[] mk_tokenSet_25() { long[] data = { 288228817078593024L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_25 = new BitSet(mk_tokenSet_25()); private static final long[] mk_tokenSet_26() { long[] data = { 288158448334415360L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_26 = new BitSet(mk_tokenSet_26()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/csharp/ActionLexerTokenTypes.java000066400000000000000000000012311161462365500311410ustar00rootroot00000000000000// $ANTLR : "action.g" -> "ActionLexer.java"$ package antlr.actions.csharp; public interface ActionLexerTokenTypes { int EOF = 1; int NULL_TREE_LOOKAHEAD = 3; int ACTION = 4; int STUFF = 5; int AST_ITEM = 6; int TEXT_ITEM = 7; int TREE = 8; int TREE_ELEMENT = 9; int AST_CONSTRUCTOR = 10; int AST_CTOR_ELEMENT = 11; int ID_ELEMENT = 12; int TEXT_ARG = 13; int TEXT_ARG_ELEMENT = 14; int TEXT_ARG_ID_ELEMENT = 15; int ARG = 16; int ID = 17; int VAR_ASSIGN = 18; int COMMENT = 19; int SL_COMMENT = 20; int ML_COMMENT = 21; int CHAR = 22; int STRING = 23; int ESC = 24; int DIGIT = 25; int INT = 26; int INT_OR_FLOAT = 27; int WS = 28; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/csharp/action.g000077500000000000000000000237021161462365500254720ustar00rootroot00000000000000header { package antlr.actions.csharp; } { import java.io.StringReader; import antlr.collections.impl.Vector; import antlr.*; } /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * Port to C# by Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com * * $Id:$ */ // HISTORY: // // 01-Aug-2002 kunle Now handles C# preprocessor directives // [ Based on Ric's C/C++ preprocessor code ] // 07-Dec-2002 kunle Now supports the new #[ID,text,nodeType] syntax. // /** Perform the following translations: AST related translations ## -> currentRule_AST #(x,y,z) -> codeGenerator.getASTCreateString(vector-of(x,y,z)) #[x] -> codeGenerator.getASTCreateString(x) #x -> codeGenerator.mapTreeId(x) Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts. Text related translations $append(x) -> text.append(x) $setText(x) -> text.setLength(_begin); text.append(x) $getText -> new String(text.getBuffer(),_begin,text.length()-_begin) $setToken(x) -> _token = x $setType(x) -> _ttype = x $FOLLOW(r) -> FOLLOW set name for rule r (optional arg) $FIRST(r) -> FIRST set name for rule r (optional arg) */ class ActionLexer extends Lexer; options { k=3; charVocabulary='\3'..'\377'; testLiterals=false; interactive=true; } { protected RuleBlock currentRule; protected CodeGenerator generator; protected int lineOffset = 0; private Tool antlrTool; // The ANTLR tool ActionTransInfo transInfo; public ActionLexer( String s, RuleBlock currentRule, CodeGenerator generator, ActionTransInfo transInfo ) { this(new StringReader(s)); this.currentRule = currentRule; this.generator = generator; this.transInfo = transInfo; } public void setLineOffset(int lineOffset) { setLine(lineOffset); } public void setTool(Tool tool) { this.antlrTool = tool; } public void reportError(RecognitionException e) { antlrTool.error("Syntax error in action: "+e,getFilename(),getLine(),getColumn()); } public void reportError(String s) { antlrTool.error(s,getFilename(),getLine(),getColumn()); } public void reportWarning(String s) { if ( getFilename()==null ) antlrTool.warning(s); else antlrTool.warning(s,getFilename(),getLine(),getColumn()); } } // rules are protected because we don't care about nextToken(). public ACTION : ( STUFF | AST_ITEM | TEXT_ITEM )+ ; /** stuff in between #(...) and #id items * Allow the escaping of the # for C# preprocessor stuff. */ protected STUFF : COMMENT | STRING | CHAR | "\r\n" { newline(); } | '\\' '#' { $setText("#"); } | '\r' { newline(); } | '\n' { newline(); } | '/' ~('/'|'*') // non-comment start '/' // | ( ~('/'|'\n'|'\r'|'$'|'#'|'"'|'\'') )+ | ~('/'|'\n'|'\r'|'$'|'#'|'"'|'\'') ; protected AST_ITEM : '#'! t:TREE // #( ) | '#'! (WS)? id:ID // #a_name (=)? { String idt = id.getText(); String mapped = generator.mapTreeId(id.getText(), transInfo); // verify that it's not a preprocessor macro... if ( (mapped != null) && !idt.equals(mapped) ) { $setText(mapped); } else { if (idt.equals("define") || idt.equals("undef") || idt.equals("if") || idt.equals("elif") || idt.equals("else") || idt.equals("endif") || idt.equals("line") || idt.equals("error") || idt.equals("warning") || idt.equals("region") || idt.equals("endregion")) { $setText("#"+idt); } } } (WS)? ( options {greedy=true;} : VAR_ASSIGN )? | '#'! ctor:AST_CONSTRUCTOR // #[ ] | "##" { if( currentRule != null ) { String r = currentRule.getRuleName()+"_AST"; $setText(r); if ( transInfo!=null ) { transInfo.refRuleRoot=r; // we ref root of tree } } else { reportWarning("\"##\" not valid in this context"); $setText("##"); } } (WS)? ( options {greedy=true;} : VAR_ASSIGN )? ; protected TEXT_ITEM : "$append" (WS)? '(' a1:TEXT_ARG ')' { String t = "text.Append("+a1.getText()+")"; $setText(t); } | "$set" ( "Text" (WS)? '(' a2:TEXT_ARG ')' { String t; t = "text.Length = _begin; text.Append("+a2.getText()+")"; $setText(t); } | "Token" (WS)? '(' a3:TEXT_ARG ')' { String t="_token = "+a3.getText(); $setText(t); } | "Type" (WS)? '(' a4:TEXT_ARG ')' { String t="_ttype = "+a4.getText(); $setText(t); } ) | "$getText" { $setText("text.ToString(_begin, text.Length-_begin)"); } | "$FOLLOW" ( (WS)? '(' a5:TEXT_ARG ')' )? { String rule = currentRule.getRuleName(); if ( a5!=null ) { rule = a5.getText(); } String setName = generator.getFOLLOWBitSet(rule, 1); // System.out.println("FOLLOW("+rule+")="+setName); if ( setName==null ) { reportError("$FOLLOW("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { $setText(setName); } } | "$FIRST" ( (WS)? '(' a6:TEXT_ARG ')' )? { String rule = currentRule.getRuleName(); if ( a6!=null ) { rule = a6.getText(); } String setName = generator.getFIRSTBitSet(rule, 1); // System.out.println("FIRST("+rule+")="+setName); if ( setName==null ) { reportError("$FIRST("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { $setText(setName); } } ; protected TREE! { StringBuffer buf = new StringBuffer(); int n=0; Vector terms = new Vector(10); } : '(' (WS)? t:TREE_ELEMENT { terms.appendElement( generator.processStringForASTConstructor(t.getText()) ); } (WS)? ( ',' (WS)? t2:TREE_ELEMENT { terms.appendElement( generator.processStringForASTConstructor(t2.getText()) ); } (WS)? )* {$setText(generator.getASTCreateString(terms));} ')' ; protected TREE_ELEMENT { boolean was_mapped; } : '#'! TREE | '#'! AST_CONSTRUCTOR | '#'! was_mapped=id:ID_ELEMENT { // RK: I have a queer feeling that this maptreeid is redundant.. if ( ! was_mapped ) { String t = generator.mapTreeId(id.getText(), null); if ( t!=null ) { $setText(t); } } } | "##" { if( currentRule != null ) { String t = currentRule.getRuleName()+"_AST"; $setText(t); } else { reportError("\"##\" not valid in this context"); $setText("##"); } } | TREE | AST_CONSTRUCTOR | ID_ELEMENT | STRING ; // FIXME: RK - the getASTCreateString here is broken. // getASTCreateString can not cleanly see if a constructor like // tokens { FOR; } // forLoop:! "for" bla bla // { #forLoop = #([FOR,"for"], bla bla ) } // should use ForNode as AST. // protected AST_CONSTRUCTOR! : '[' (WS)? x:AST_CTOR_ELEMENT (WS)? (',' (WS)? y:AST_CTOR_ELEMENT (WS)? )? (',' (WS)? z:AST_CTOR_ELEMENT (WS)? )? ']' { String args = generator.processStringForASTConstructor(x.getText()); // the second does not need processing coz it's a string // (eg second param of astFactory.create(x,y) if ( y!=null ) args += ","+y.getText(); if ( z!=null ) args += ","+z.getText(); $setText(generator.getASTCreateString(null,args)); } ; /** The arguments of a #[...] constructor are text, token type, * or a tree. */ protected AST_CTOR_ELEMENT : STRING | INT | TREE_ELEMENT ; /** An ID_ELEMENT can be a func call, array ref, simple var, * or AST label ref. */ protected ID_ELEMENT returns [boolean mapped=false] : id:ID (options {greedy=true;}:WS!)? ( '(' (options {greedy=true;}:WS!)? ( ARG (',' (WS!)? ARG)* )? (WS!)? ')' // method call | ( '[' (WS!)? ARG (WS!)? ']' )+ // array reference | '.' ID_ELEMENT | "->" ID_ELEMENT | /* could be a token reference or just a user var */ { mapped = true; String t = generator.mapTreeId(id.getText(), transInfo); // System.out.println("mapped: "+id.getText()+" -> "+t); if ( t!=null ) { $setText(t); } } // if #rule referenced, check for assignment ( options {greedy=true;} : {transInfo!=null && transInfo.refRuleRoot!=null}? (WS)? VAR_ASSIGN )? ) ; protected TEXT_ARG : (WS)? ( TEXT_ARG_ELEMENT (options {greedy=true;}:WS)? )+ ; protected TEXT_ARG_ELEMENT : TEXT_ARG_ID_ELEMENT | STRING | CHAR | INT_OR_FLOAT | TEXT_ITEM | '+' ; protected TEXT_ARG_ID_ELEMENT : id:ID (options {greedy=true;}:WS!)? ( '(' (options {greedy=true;}:WS!)? ( TEXT_ARG (',' TEXT_ARG)* )* (WS!)? ')' // method call | ( '[' (WS!)? TEXT_ARG (WS!)? ']' )+ // array reference | '.' TEXT_ARG_ID_ELEMENT | "->" TEXT_ARG_ID_ELEMENT | ) ; protected ARG : ( TREE_ELEMENT | STRING | CHAR | INT_OR_FLOAT ) (options {greedy=true;} : (WS)? ( '+'| '-' | '*' | '/' ) (WS)? ARG )* ; protected ID : ('a'..'z'|'A'..'Z'|'_') (options {greedy=true;} : ('a'..'z'|'A'..'Z'|'0'..'9'|'_'))* ; protected VAR_ASSIGN : '=' { // inform the code generator that an assignment was done to // AST root for the rule if invoker set refRuleRoot. if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) { transInfo.assignToRoot=true; } } ; protected COMMENT : SL_COMMENT | ML_COMMENT ; protected SL_COMMENT : "//" (options {greedy=false;}:.)* ('\n'|"\r\n"|'\r') {newline();} ; protected ML_COMMENT : "/*" ( options {greedy=false;} : '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} | . )* "*/" ; protected CHAR : '\'' ( ESC | ~'\'' ) '\'' ; protected STRING : '"' (ESC|~'"')* '"' ; protected ESC : '\\' ( 'n' | 'r' | 't' | 'b' | 'f' | '"' | '\'' | '\\' | ('0'..'3') ( options {greedy=true;} : DIGIT ( options {greedy=true;} : DIGIT )? )? | ('4'..'7') (options {greedy=true;}:DIGIT)? ) ; protected DIGIT : '0'..'9' ; protected INT : (DIGIT)+ ; protected INT_OR_FLOAT : (options {greedy=true;}:DIGIT)+ ( options {greedy=true;} : '.' (options {greedy=true;}:DIGIT)* | 'L' | 'l' )? ; protected WS : ( options {greedy=true;} : ' ' | '\t' | '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} )+ ; nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/java/000077500000000000000000000000001161462365500234775ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/java/ActionLexer.java000066400000000000000000001751241161462365500265710ustar00rootroot00000000000000// $ANTLR : "action.g" -> "ActionLexer.java"$ package antlr.actions.java; import java.io.InputStream; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.TokenStreamRecognitionException; import antlr.CharStreamException; import antlr.CharStreamIOException; import antlr.ANTLRException; import java.io.Reader; import java.util.Hashtable; import antlr.CharScanner; import antlr.InputBuffer; import antlr.ByteBuffer; import antlr.CharBuffer; import antlr.Token; import antlr.CommonToken; import antlr.RecognitionException; import antlr.NoViableAltForCharException; import antlr.MismatchedCharException; import antlr.TokenStream; import antlr.ANTLRHashString; import antlr.LexerSharedInputState; import antlr.collections.impl.BitSet; import antlr.SemanticException; import java.io.StringReader; import antlr.collections.impl.Vector; import antlr.*; /** Perform the following translations: AST related translations ## -> currentRule_AST #(x,y,z) -> codeGenerator.getASTCreateString(vector-of(x,y,z)) #[x] -> codeGenerator.getASTCreateString(x) #x -> codeGenerator.mapTreeId(x) Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts. Text related translations $append(x) -> text.append(x) $setText(x) -> text.setLength(_begin); text.append(x) $getText -> new String(text.getBuffer(),_begin,text.length()-_begin) $setToken(x) -> _token = x $setType(x) -> _ttype = x $FOLLOW(r) -> FOLLOW set name for rule r (optional arg) $FIRST(r) -> FIRST set name for rule r (optional arg) */ public class ActionLexer extends antlr.CharScanner implements ActionLexerTokenTypes, TokenStream { protected RuleBlock currentRule; protected CodeGenerator generator; protected int lineOffset = 0; private Tool antlrTool; // The ANTLR tool ActionTransInfo transInfo; public ActionLexer( String s, RuleBlock currentRule, CodeGenerator generator, ActionTransInfo transInfo) { this(new StringReader(s)); this.currentRule = currentRule; this.generator = generator; this.transInfo = transInfo; } public void setLineOffset(int lineOffset) { // this.lineOffset = lineOffset; setLine(lineOffset); } public void setTool(Tool tool) { this.antlrTool = tool; } public void reportError(RecognitionException e) { antlrTool.error("Syntax error in action: "+e,getFilename(),getLine(),getColumn()); } public void reportError(String s) { antlrTool.error(s,getFilename(),getLine(),getColumn()); } public void reportWarning(String s) { if ( getFilename()==null ) { antlrTool.warning(s); } else { antlrTool.warning(s,getFilename(),getLine(), getColumn()); } } public ActionLexer(InputStream in) { this(new ByteBuffer(in)); } public ActionLexer(Reader in) { this(new CharBuffer(in)); } public ActionLexer(InputBuffer ib) { this(new LexerSharedInputState(ib)); } public ActionLexer(LexerSharedInputState state) { super(state); caseSensitiveLiterals = true; setCaseSensitive(true); literals = new Hashtable(); } public Token nextToken() throws TokenStreamException { Token theRetToken=null; tryAgain: for (;;) { Token _token = null; int _ttype = Token.INVALID_TYPE; resetText(); try { // for char stream error handling try { // for lexical error handling if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff'))) { mACTION(true); theRetToken=_returnToken; } else { if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);} else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } if ( _returnToken==null ) continue tryAgain; // found SKIP token _ttype = _returnToken.getType(); _returnToken.setType(_ttype); return _returnToken; } catch (RecognitionException e) { throw new TokenStreamRecognitionException(e); } } catch (CharStreamException cse) { if ( cse instanceof CharStreamIOException ) { throw new TokenStreamIOException(((CharStreamIOException)cse).io); } else { throw new TokenStreamException(cse.getMessage()); } } } } public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ACTION; int _saveIndex; { int _cnt382=0; _loop382: do { switch ( LA(1)) { case '#': { mAST_ITEM(false); break; } case '$': { mTEXT_ITEM(false); break; } default: if ((_tokenSet_0.member(LA(1)))) { mSTUFF(false); } else { if ( _cnt382>=1 ) { break _loop382; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } } _cnt382++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSTUFF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STUFF; int _saveIndex; switch ( LA(1)) { case '"': { mSTRING(false); break; } case '\'': { mCHAR(false); break; } case '\n': { match('\n'); newline(); break; } default: if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) { mCOMMENT(false); } else if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match("\r\n"); newline(); } else if ((LA(1)=='/') && (_tokenSet_1.member(LA(2)))) { match('/'); { match(_tokenSet_1); } } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); newline(); } else if ((_tokenSet_2.member(LA(1)))) { { match(_tokenSet_2); } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mAST_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_ITEM; int _saveIndex; Token t=null; Token id=null; Token ctor=null; if ((LA(1)=='#') && (LA(2)=='(')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mTREE(true); t=_returnToken; } else if ((LA(1)=='#') && (_tokenSet_3.member(LA(2)))) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mID(true); id=_returnToken; String idt = id.getText(); String var = generator.mapTreeId(idt,transInfo); if ( var!=null ) { text.setLength(_begin); text.append(var); } { if ((_tokenSet_4.member(LA(1))) && (true) && (true)) { mWS(false); } else { } } { if ((LA(1)=='=') && (true) && (true)) { mVAR_ASSIGN(false); } else { } } } else if ((LA(1)=='#') && (LA(2)=='[')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mAST_CONSTRUCTOR(true); ctor=_returnToken; } else if ((LA(1)=='#') && (LA(2)=='#')) { match("##"); String r=currentRule.getRuleName()+"_AST"; text.setLength(_begin); text.append(r); if ( transInfo!=null ) { transInfo.refRuleRoot=r; // we ref root of tree } { if ((_tokenSet_4.member(LA(1))) && (true) && (true)) { mWS(false); } else { } } { if ((LA(1)=='=') && (true) && (true)) { mVAR_ASSIGN(false); } else { } } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ITEM; int _saveIndex; Token a1=null; Token a2=null; Token a3=null; Token a4=null; Token a5=null; Token a6=null; if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='O')) { match("$FOLLOW"); { if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a5=_returnToken; match(')'); } else { } } String rule = currentRule.getRuleName(); if ( a5!=null ) { rule = a5.getText(); } String setName = generator.getFOLLOWBitSet(rule, 1); // System.out.println("FOLLOW("+rule+")="+setName); if ( setName==null ) { reportError("$FOLLOW("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { text.setLength(_begin); text.append(setName); } } else if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='I')) { match("$FIRST"); { if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a6=_returnToken; match(')'); } else { } } String rule = currentRule.getRuleName(); if ( a6!=null ) { rule = a6.getText(); } String setName = generator.getFIRSTBitSet(rule, 1); // System.out.println("FIRST("+rule+")="+setName); if ( setName==null ) { reportError("$FIRST("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { text.setLength(_begin); text.append(setName); } } else if ((LA(1)=='$') && (LA(2)=='a')) { match("$append"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a1=_returnToken; match(')'); String t = "text.append("+a1.getText()+")"; text.setLength(_begin); text.append(t); } else if ((LA(1)=='$') && (LA(2)=='s')) { match("$set"); { if ((LA(1)=='T') && (LA(2)=='e')) { match("Text"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a2=_returnToken; match(')'); String t; t = "text.setLength(_begin); text.append("+a2.getText()+")"; text.setLength(_begin); text.append(t); } else if ((LA(1)=='T') && (LA(2)=='o')) { match("Token"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a3=_returnToken; match(')'); String t="_token = "+a3.getText(); text.setLength(_begin); text.append(t); } else if ((LA(1)=='T') && (LA(2)=='y')) { match("Type"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a4=_returnToken; match(')'); String t="_ttype = "+a4.getText(); text.setLength(_begin); text.append(t); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if ((LA(1)=='$') && (LA(2)=='g')) { match("$getText"); text.setLength(_begin); text.append("new String(text.getBuffer(),_begin,text.length()-_begin)"); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = COMMENT; int _saveIndex; if ((LA(1)=='/') && (LA(2)=='/')) { mSL_COMMENT(false); } else if ((LA(1)=='/') && (LA(2)=='*')) { mML_COMMENT(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STRING; int _saveIndex; match('"'); { _loop478: do { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_7.member(LA(1)))) { matchNot('"'); } else { break _loop478; } } while (true); } match('"'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mCHAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = CHAR; int _saveIndex; match('\''); { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_8.member(LA(1)))) { matchNot('\''); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } match('\''); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTREE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TREE; int _saveIndex; Token t=null; Token t2=null; StringBuffer buf = new StringBuffer(); int n=0; Vector terms = new Vector(10); _saveIndex=text.length(); match('('); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mTREE_ELEMENT(true); text.setLength(_saveIndex); t=_returnToken; terms.appendElement(t.getText()); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': case ',': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop407: do { if ((LA(1)==',')) { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mTREE_ELEMENT(true); text.setLength(_saveIndex); t2=_returnToken; terms.appendElement(t2.getText()); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': case ',': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else { break _loop407; } } while (true); } text.setLength(_begin); text.append(generator.getASTCreateString(terms)); _saveIndex=text.length(); match(')'); text.setLength(_saveIndex); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID; int _saveIndex; { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '_': { match('_'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop464: do { if ((_tokenSet_9.member(LA(1))) && (true) && (true)) { { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } case '_': { match('_'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else { break _loop464; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WS; int _saveIndex; { int _cnt498=0; _loop498: do { if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match('\r'); match('\n'); newline(); } else if ((LA(1)==' ') && (true) && (true)) { match(' '); } else if ((LA(1)=='\t') && (true) && (true)) { match('\t'); } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); newline(); } else if ((LA(1)=='\n') && (true) && (true)) { match('\n'); newline(); } else { if ( _cnt498>=1 ) { break _loop498; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt498++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mVAR_ASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = VAR_ASSIGN; int _saveIndex; match('='); // inform the code generator that an assignment was done to // AST root for the rule if invoker set refRuleRoot. if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) { transInfo.assignToRoot=true; } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mAST_CONSTRUCTOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_CONSTRUCTOR; int _saveIndex; Token x=null; Token y=null; Token z=null; _saveIndex=text.length(); match('['); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); x=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ',': case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { if ((LA(1)==',') && (_tokenSet_10.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); y=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ',': case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else if ((LA(1)==','||LA(1)==']') && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case ',': { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); z=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); match(']'); text.setLength(_saveIndex); String args = x.getText(); if ( y!=null ) { args += ","+y.getText(); } if ( z!=null ) { args += ","+z.getText(); } text.setLength(_begin); text.append(generator.getASTCreateString(null,args)); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG; int _saveIndex; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '"': case '$': case '\'': case '+': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { int _cnt438=0; _loop438: do { if ((_tokenSet_11.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mTEXT_ARG_ELEMENT(false); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_12.member(LA(2))) && (true)) { mWS(false); } else if ((_tokenSet_12.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else { if ( _cnt438>=1 ) { break _loop438; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt438++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTREE_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TREE_ELEMENT; int _saveIndex; Token id=null; boolean was_mapped; switch ( LA(1)) { case '(': { mTREE(false); break; } case '[': { mAST_CONSTRUCTOR(false); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mID_ELEMENT(false); break; } case '"': { mSTRING(false); break; } default: if ((LA(1)=='#') && (LA(2)=='(')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mTREE(false); } else if ((LA(1)=='#') && (LA(2)=='[')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mAST_CONSTRUCTOR(false); } else if ((LA(1)=='#') && (_tokenSet_3.member(LA(2)))) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); was_mapped=mID_ELEMENT(true); id=_returnToken; // RK: I have a queer feeling that this maptreeid is redundant if( ! was_mapped ) { String t = generator.mapTreeId(id.getText(), null); text.setLength(_begin); text.append(t); } } else if ((LA(1)=='#') && (LA(2)=='#')) { match("##"); String t = currentRule.getRuleName()+"_AST"; text.setLength(_begin); text.append(t); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } /** An ID_ELEMENT can be a func call, array ref, simple var, * or AST label ref. */ protected final boolean mID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { boolean mapped=false; int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID_ELEMENT; int _saveIndex; Token id=null; mID(true); id=_returnToken; { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_13.member(LA(2))) && (true)) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_13.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '(': { match('('); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_14.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_14.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mARG(false); { _loop426: do { if ((LA(1)==',')) { match(','); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); } else { break _loop426; } } while (true); } break; } case '\t': case '\n': case '\r': case ' ': case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(')'); break; } case '[': { { int _cnt431=0; _loop431: do { if ((LA(1)=='[')) { match('['); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(']'); } else { if ( _cnt431>=1 ) { break _loop431; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt431++; } while (true); } break; } case '.': { match('.'); mID_ELEMENT(false); break; } case '\t': case '\n': case '\r': case ' ': case ')': case '*': case '+': case ',': case '-': case '/': case '=': case ']': { mapped = true; String t = generator.mapTreeId(id.getText(), transInfo); text.setLength(_begin); text.append(t); { if (((_tokenSet_15.member(LA(1))) && (_tokenSet_16.member(LA(2))) && (true))&&(transInfo!=null && transInfo.refRuleRoot!=null)) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '=': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mVAR_ASSIGN(false); } else if ((_tokenSet_17.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; return mapped; } /** The arguments of a #[...] constructor are text, token type, * or a tree. */ protected final void mAST_CTOR_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_CTOR_ELEMENT; int _saveIndex; if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mSTRING(false); } else if ((_tokenSet_18.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mTREE_ELEMENT(false); } else if (((LA(1) >= '0' && LA(1) <= '9'))) { mINT(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = INT; int _saveIndex; { int _cnt489=0; _loop489: do { if (((LA(1) >= '0' && LA(1) <= '9'))) { mDIGIT(false); } else { if ( _cnt489>=1 ) { break _loop489; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt489++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ARG; int _saveIndex; { switch ( LA(1)) { case '\'': { mCHAR(false); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mINT_OR_FLOAT(false); break; } default: if ((_tokenSet_18.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mTREE_ELEMENT(false); } else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mSTRING(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop459: do { if ((_tokenSet_19.member(LA(1))) && (_tokenSet_20.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '*': case '+': case '-': case '/': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '+': { match('+'); break; } case '-': { match('-'); break; } case '*': { match('*'); break; } case '/': { match('/'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); } else { break _loop459; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG_ELEMENT; int _saveIndex; switch ( LA(1)) { case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mTEXT_ARG_ID_ELEMENT(false); break; } case '"': { mSTRING(false); break; } case '\'': { mCHAR(false); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mINT_OR_FLOAT(false); break; } case '$': { mTEXT_ITEM(false); break; } case '+': { match('+'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG_ID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG_ID_ELEMENT; int _saveIndex; Token id=null; mID(true); id=_returnToken; { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_21.member(LA(2))) && (true)) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_21.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '(': { match('('); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_22.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_22.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { _loop447: do { if ((_tokenSet_23.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mTEXT_ARG(false); { _loop446: do { if ((LA(1)==',')) { match(','); mTEXT_ARG(false); } else { break _loop446; } } while (true); } } else { break _loop447; } } while (true); } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(')'); break; } case '[': { { int _cnt452=0; _loop452: do { if ((LA(1)=='[')) { match('['); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_23.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_23.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } mTEXT_ARG(false); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(']'); } else { if ( _cnt452>=1 ) { break _loop452; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt452++; } while (true); } break; } case '.': { match('.'); mTEXT_ARG_ID_ELEMENT(false); break; } case '\t': case '\n': case '\r': case ' ': case '"': case '$': case '\'': case ')': case '+': case ',': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case ']': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mINT_OR_FLOAT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = INT_OR_FLOAT; int _saveIndex; { int _cnt492=0; _loop492: do { if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_24.member(LA(2))) && (true)) { mDIGIT(false); } else { if ( _cnt492>=1 ) { break _loop492; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt492++; } while (true); } { if ((LA(1)=='L') && (_tokenSet_25.member(LA(2))) && (true)) { match('L'); } else if ((LA(1)=='l') && (_tokenSet_25.member(LA(2))) && (true)) { match('l'); } else if ((LA(1)=='.')) { match('.'); { _loop495: do { if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_25.member(LA(2))) && (true)) { mDIGIT(false); } else { break _loop495; } } while (true); } } else if ((_tokenSet_25.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SL_COMMENT; int _saveIndex; match("//"); { _loop469: do { // nongreedy exit test if ((LA(1)=='\n'||LA(1)=='\r') && (true) && (true)) break _loop469; if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { matchNot(EOF_CHAR); } else { break _loop469; } } while (true); } { if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match("\r\n"); } else if ((LA(1)=='\n')) { match('\n'); } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } newline(); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ML_COMMENT; int _saveIndex; match("/*"); { _loop473: do { // nongreedy exit test if ((LA(1)=='*') && (LA(2)=='/') && (true)) break _loop473; if ((LA(1)=='\r') && (LA(2)=='\n') && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\r'); match('\n'); newline(); } else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\r'); newline(); } else if ((LA(1)=='\n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\n'); newline(); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop473; } } while (true); } match("*/"); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ESC; int _saveIndex; match('\\'); { switch ( LA(1)) { case 'n': { match('n'); break; } case 'r': { match('r'); break; } case 't': { match('t'); break; } case 'b': { match('b'); break; } case 'f': { match('f'); break; } case '"': { match('"'); break; } case '\'': { match('\''); break; } case '\\': { match('\\'); break; } case '0': case '1': case '2': case '3': { { matchRange('0','3'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } case '4': case '5': case '6': case '7': { { matchRange('4','7'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = DIGIT; int _saveIndex; matchRange('0','9'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } private static final long[] mk_tokenSet_0() { long[] data = new long[8]; data[0]=-103079215112L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = new long[8]; data[0]=-145135534866440L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); private static final long[] mk_tokenSet_2() { long[] data = new long[8]; data[0]=-141407503262728L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2()); private static final long[] mk_tokenSet_3() { long[] data = { 0L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3()); private static final long[] mk_tokenSet_4() { long[] data = { 4294977024L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4()); private static final long[] mk_tokenSet_5() { long[] data = { 1103806604800L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5()); private static final long[] mk_tokenSet_6() { long[] data = { 287959436729787904L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6()); private static final long[] mk_tokenSet_7() { long[] data = new long[8]; data[0]=-17179869192L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7()); private static final long[] mk_tokenSet_8() { long[] data = new long[8]; data[0]=-549755813896L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8()); private static final long[] mk_tokenSet_9() { long[] data = { 287948901175001088L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9()); private static final long[] mk_tokenSet_10() { long[] data = { 287950056521213440L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10()); private static final long[] mk_tokenSet_11() { long[] data = { 287958332923183104L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11()); private static final long[] mk_tokenSet_12() { long[] data = { 287978128427460096L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_12 = new BitSet(mk_tokenSet_12()); private static final long[] mk_tokenSet_13() { long[] data = { 2306123388973753856L, 671088640L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_13 = new BitSet(mk_tokenSet_13()); private static final long[] mk_tokenSet_14() { long[] data = { 287952805300282880L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_14 = new BitSet(mk_tokenSet_14()); private static final long[] mk_tokenSet_15() { long[] data = { 2305843013508670976L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_15 = new BitSet(mk_tokenSet_15()); private static final long[] mk_tokenSet_16() { long[] data = { 2306051920717948416L, 536870912L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_16 = new BitSet(mk_tokenSet_16()); private static final long[] mk_tokenSet_17() { long[] data = { 208911504254464L, 536870912L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_17 = new BitSet(mk_tokenSet_17()); private static final long[] mk_tokenSet_18() { long[] data = { 1151051235328L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_18 = new BitSet(mk_tokenSet_18()); private static final long[] mk_tokenSet_19() { long[] data = { 189120294954496L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_19 = new BitSet(mk_tokenSet_19()); private static final long[] mk_tokenSet_20() { long[] data = { 288139722277004800L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_20 = new BitSet(mk_tokenSet_20()); private static final long[] mk_tokenSet_21() { long[] data = { 288049596683265536L, 576460746666278910L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_21 = new BitSet(mk_tokenSet_21()); private static final long[] mk_tokenSet_22() { long[] data = { 287960536241415680L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_22 = new BitSet(mk_tokenSet_22()); private static final long[] mk_tokenSet_23() { long[] data = { 287958337218160128L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_23 = new BitSet(mk_tokenSet_23()); private static final long[] mk_tokenSet_24() { long[] data = { 288228817078593024L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_24 = new BitSet(mk_tokenSet_24()); private static final long[] mk_tokenSet_25() { long[] data = { 288158448334415360L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_25 = new BitSet(mk_tokenSet_25()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/java/ActionLexerTokenTypes.java000066400000000000000000000012261161462365500306060ustar00rootroot00000000000000// $ANTLR : "action.g" -> "ActionLexer.java"$ package antlr.actions.java; public interface ActionLexerTokenTypes { int EOF = 1; int NULL_TREE_LOOKAHEAD = 3; int ACTION = 4; int STUFF = 5; int AST_ITEM = 6; int TEXT_ITEM = 7; int TREE = 8; int TREE_ELEMENT = 9; int AST_CONSTRUCTOR = 10; int AST_CTOR_ELEMENT = 11; int ID_ELEMENT = 12; int TEXT_ARG = 13; int TEXT_ARG_ELEMENT = 14; int TEXT_ARG_ID_ELEMENT = 15; int ARG = 16; int ID = 17; int VAR_ASSIGN = 18; int COMMENT = 19; int SL_COMMENT = 20; int ML_COMMENT = 21; int CHAR = 22; int STRING = 23; int ESC = 24; int DIGIT = 25; int INT = 26; int INT_OR_FLOAT = 27; int WS = 28; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/java/action.g000066400000000000000000000203371161462365500251310ustar00rootroot00000000000000header { package antlr.actions.java; } { import java.io.StringReader; import antlr.collections.impl.Vector; import antlr.*; } /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/actions/java/action.g#1 $ */ /** Perform the following translations: AST related translations ## -> currentRule_AST #(x,y,z) -> codeGenerator.getASTCreateString(vector-of(x,y,z)) #[x] -> codeGenerator.getASTCreateString(x) #x -> codeGenerator.mapTreeId(x) Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts. Text related translations $append(x) -> text.append(x) $setText(x) -> text.setLength(_begin); text.append(x) $getText -> new String(text.getBuffer(),_begin,text.length()-_begin) $setToken(x) -> _token = x $setType(x) -> _ttype = x $FOLLOW(r) -> FOLLOW set name for rule r (optional arg) $FIRST(r) -> FIRST set name for rule r (optional arg) */ class ActionLexer extends Lexer; options { k=3; charVocabulary='\3'..'\377'; testLiterals=false; interactive=true; } { protected RuleBlock currentRule; protected CodeGenerator generator; protected int lineOffset = 0; private Tool antlrTool; // The ANTLR tool ActionTransInfo transInfo; public ActionLexer( String s, RuleBlock currentRule, CodeGenerator generator, ActionTransInfo transInfo) { this(new StringReader(s)); this.currentRule = currentRule; this.generator = generator; this.transInfo = transInfo; } public void setLineOffset(int lineOffset) { // this.lineOffset = lineOffset; setLine(lineOffset); } public void setTool(Tool tool) { this.antlrTool = tool; } public void reportError(RecognitionException e) { antlrTool.error("Syntax error in action: "+e,getFilename(),getLine(),getColumn()); } public void reportError(String s) { antlrTool.error(s,getFilename(),getLine(),getColumn()); } public void reportWarning(String s) { if ( getFilename()==null ) { antlrTool.warning(s); } else { antlrTool.warning(s,getFilename(),getLine(), getColumn()); } } } // rules are protected because we don't care about nextToken(). public ACTION : ( STUFF | AST_ITEM | TEXT_ITEM )+ ; // stuff in between #(...) and #id items protected STUFF : COMMENT | STRING | CHAR | "\r\n" {newline();} | '\r' {newline();} | '\n' {newline();} | '/' ~('/'|'*') // non-comment start '/' // | ( ~('/'|'\n'|'\r'|'$'|'#'|'"'|'\'') )+ | ~('/'|'\n'|'\r'|'$'|'#'|'"'|'\'') ; protected AST_ITEM : '#'! t:TREE | '#'! id:ID { String idt = id.getText(); String var = generator.mapTreeId(idt,transInfo); if ( var!=null ) { $setText(var); } } (WS)? ( options {greedy=true;} : VAR_ASSIGN )? | '#'! ctor:AST_CONSTRUCTOR | "##" { String r=currentRule.getRuleName()+"_AST"; $setText(r); if ( transInfo!=null ) { transInfo.refRuleRoot=r; // we ref root of tree } } (WS)? ( options {greedy=true;} : VAR_ASSIGN )? ; protected TEXT_ITEM : "$append" (WS)? '(' a1:TEXT_ARG ')' { String t = "text.append("+a1.getText()+")"; $setText(t); } | "$set" ( "Text" (WS)? '(' a2:TEXT_ARG ')' { String t; t = "text.setLength(_begin); text.append("+a2.getText()+")"; $setText(t); } | "Token" (WS)? '(' a3:TEXT_ARG ')' { String t="_token = "+a3.getText(); $setText(t); } | "Type" (WS)? '(' a4:TEXT_ARG ')' { String t="_ttype = "+a4.getText(); $setText(t); } ) | "$getText" { $setText("new String(text.getBuffer(),_begin,text.length()-_begin)"); } | "$FOLLOW" ( (WS)? '(' a5:TEXT_ARG ')' )? { String rule = currentRule.getRuleName(); if ( a5!=null ) { rule = a5.getText(); } String setName = generator.getFOLLOWBitSet(rule, 1); // System.out.println("FOLLOW("+rule+")="+setName); if ( setName==null ) { reportError("$FOLLOW("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { $setText(setName); } } | "$FIRST" ( (WS)? '(' a6:TEXT_ARG ')' )? { String rule = currentRule.getRuleName(); if ( a6!=null ) { rule = a6.getText(); } String setName = generator.getFIRSTBitSet(rule, 1); // System.out.println("FIRST("+rule+")="+setName); if ( setName==null ) { reportError("$FIRST("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { $setText(setName); } } ; protected TREE! { StringBuffer buf = new StringBuffer(); int n=0; Vector terms = new Vector(10); } : '(' (WS)? t:TREE_ELEMENT {terms.appendElement(t.getText());} (WS)? ( ',' (WS)? t2:TREE_ELEMENT {terms.appendElement(t2.getText());} (WS)? )* {$setText(generator.getASTCreateString(terms));} ')' ; protected TREE_ELEMENT { boolean was_mapped; } : '#'! TREE | '#'! AST_CONSTRUCTOR | '#'! was_mapped=id:ID_ELEMENT { // RK: I have a queer feeling that this maptreeid is redundant if( ! was_mapped ) { String t = generator.mapTreeId(id.getText(), null); $setText(t); } } | "##" {String t = currentRule.getRuleName()+"_AST"; $setText(t);} | TREE | AST_CONSTRUCTOR | ID_ELEMENT | STRING ; protected AST_CONSTRUCTOR! : '[' (WS)? x:AST_CTOR_ELEMENT (WS)? (',' (WS)? y:AST_CTOR_ELEMENT (WS)? )? (',' (WS)? z:AST_CTOR_ELEMENT (WS)? )? ']' { String args = x.getText(); if ( y!=null ) { args += ","+y.getText(); } if ( z!=null ) { args += ","+z.getText(); } $setText(generator.getASTCreateString(null,args)); } ; /** The arguments of a #[...] constructor are text, token type, * or a tree. */ protected AST_CTOR_ELEMENT : STRING | INT | TREE_ELEMENT ; /** An ID_ELEMENT can be a func call, array ref, simple var, * or AST label ref. */ protected ID_ELEMENT returns [boolean mapped=false] : id:ID (options {greedy=true;}:WS!)? ( '(' (options {greedy=true;}:WS!)? ( ARG (',' (WS!)? ARG)* )? (WS!)? ')' // method call | ( '[' (WS!)? ARG (WS!)? ']' )+ // array reference | '.' ID_ELEMENT | /* could be a token reference or just a user var */ { mapped = true; String t = generator.mapTreeId(id.getText(), transInfo); $setText(t); } // if #rule referenced, check for assignment ( options {greedy=true;} : {transInfo!=null && transInfo.refRuleRoot!=null}? (WS)? VAR_ASSIGN )? ) ; protected TEXT_ARG : (WS)? ( TEXT_ARG_ELEMENT (options {greedy=true;}:WS)? )+ ; protected TEXT_ARG_ELEMENT : TEXT_ARG_ID_ELEMENT | STRING | CHAR | INT_OR_FLOAT | TEXT_ITEM | '+' ; protected TEXT_ARG_ID_ELEMENT : id:ID (options {greedy=true;}:WS!)? ( '(' (options {greedy=true;}:WS!)? ( TEXT_ARG (',' TEXT_ARG)* )* (WS!)? ')' // method call | ( '[' (WS!)? TEXT_ARG (WS!)? ']' )+ // array reference | '.' TEXT_ARG_ID_ELEMENT | ) ; protected ARG : ( TREE_ELEMENT | STRING | CHAR | INT_OR_FLOAT ) (options {greedy=true;} : (WS)? ( '+'| '-' | '*' | '/' ) (WS)? ARG )* ; protected ID : ('a'..'z'|'A'..'Z'|'_') (options {greedy=true;} : ('a'..'z'|'A'..'Z'|'0'..'9'|'_'))* ; protected VAR_ASSIGN : '=' { // inform the code generator that an assignment was done to // AST root for the rule if invoker set refRuleRoot. if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) { transInfo.assignToRoot=true; } } ; protected COMMENT : SL_COMMENT | ML_COMMENT ; protected SL_COMMENT : "//" (options {greedy=false;}:.)* ('\n'|"\r\n"|'\r') {newline();} ; protected ML_COMMENT : "/*" ( options {greedy=false;} : '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} | . )* "*/" ; protected CHAR : '\'' ( ESC | ~'\'' ) '\'' ; protected STRING : '"' (ESC|~'"')* '"' ; protected ESC : '\\' ( 'n' | 'r' | 't' | 'b' | 'f' | '"' | '\'' | '\\' | ('0'..'3') ( options {greedy=true;} : DIGIT ( options {greedy=true;} : DIGIT )? )? | ('4'..'7') (options {greedy=true;}:DIGIT)? ) ; protected DIGIT : '0'..'9' ; protected INT : (DIGIT)+ ; protected INT_OR_FLOAT : (options {greedy=true;}:DIGIT)+ ( options {greedy=true;} : '.' (options {greedy=true;}:DIGIT)* | 'L' | 'l' )? ; protected WS : ( options {greedy=true;} : ' ' | '\t' | '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} )+ ; nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/python/000077500000000000000000000000001161462365500240775ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/python/ActionLexer.java000066400000000000000000002013321161462365500271600ustar00rootroot00000000000000// $ANTLR 2.7.5RC1 (20041124-137): "action.g" -> "ActionLexer.java"$ package antlr.actions.python; import java.io.InputStream; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.TokenStreamRecognitionException; import antlr.CharStreamException; import antlr.CharStreamIOException; import antlr.ANTLRException; import java.io.Reader; import java.util.Hashtable; import antlr.CharScanner; import antlr.InputBuffer; import antlr.ByteBuffer; import antlr.CharBuffer; import antlr.Token; import antlr.CommonToken; import antlr.RecognitionException; import antlr.NoViableAltForCharException; import antlr.MismatchedCharException; import antlr.TokenStream; import antlr.ANTLRHashString; import antlr.LexerSharedInputState; import antlr.collections.impl.BitSet; import antlr.SemanticException; import java.io.StringReader; import antlr.collections.impl.Vector; import antlr.*; /** Perform the following translations: AST related translations ## -> currentRule_AST #(x,y,z) -> codeGenerator.getASTCreateString(vector-of(x,y,z)) #[x] -> codeGenerator.getASTCreateString(x) #x -> codeGenerator.mapTreeId(x) Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts. Text related translations $append(x) -> self.text.append(x) $setText(x) -> self.text.setLength(_begin) self.text.append(x) $getText -> self.text.getString(_begin) $setToken(x) -> _token = x $setType(x) -> _ttype = x $FOLLOW(r) -> FOLLOW set name for rule r (optional arg) $FIRST(r) -> FIRST set name for rule r (optional arg) experimental: $newline, $nl -> self.newline() $skip -> _ttype = SKIP */ public class ActionLexer extends antlr.CharScanner implements ActionLexerTokenTypes, TokenStream { protected RuleBlock currentRule; protected CodeGenerator generator; protected int lineOffset = 0; private Tool antlrTool; // The ANTLR tool ActionTransInfo transInfo; public ActionLexer( String s, RuleBlock currentRule, CodeGenerator generator, ActionTransInfo transInfo) { this(new StringReader(s)); this.currentRule = currentRule; this.generator = generator; this.transInfo = transInfo; } public void setLineOffset(int lineOffset) { // this.lineOffset = lineOffset; setLine(lineOffset); } public void setTool(Tool tool) { this.antlrTool = tool; } public void reportError(RecognitionException e) { antlrTool.error( "Syntax error in action: "+e, getFilename(),getLine(),getColumn()); } public void reportError(String s) { antlrTool.error(s,getFilename(),getLine(),getColumn()); } public void reportWarning(String s) { if ( getFilename()==null ) { antlrTool.warning(s); } else { antlrTool.warning(s,getFilename(),getLine(), getColumn()); } } public ActionLexer(InputStream in) { this(new ByteBuffer(in)); } public ActionLexer(Reader in) { this(new CharBuffer(in)); } public ActionLexer(InputBuffer ib) { this(new LexerSharedInputState(ib)); } public ActionLexer(LexerSharedInputState state) { super(state); caseSensitiveLiterals = true; setCaseSensitive(true); literals = new Hashtable(); } public Token nextToken() throws TokenStreamException { Token theRetToken=null; tryAgain: for (;;) { Token _token = null; int _ttype = Token.INVALID_TYPE; resetText(); try { // for char stream error handling try { // for lexical error handling if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff'))) { mACTION(true); theRetToken=_returnToken; } else { if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);} else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } if ( _returnToken==null ) continue tryAgain; // found SKIP token _ttype = _returnToken.getType(); _returnToken.setType(_ttype); return _returnToken; } catch (RecognitionException e) { throw new TokenStreamRecognitionException(e); } } catch (CharStreamException cse) { if ( cse instanceof CharStreamIOException ) { throw new TokenStreamIOException(((CharStreamIOException)cse).io); } else { throw new TokenStreamException(cse.getMessage()); } } } } public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ACTION; int _saveIndex; { int _cnt3=0; _loop3: do { switch ( LA(1)) { case '#': { mAST_ITEM(false); break; } case '$': { mTEXT_ITEM(false); break; } default: if ((_tokenSet_0.member(LA(1)))) { mSTUFF(false); } else { if ( _cnt3>=1 ) { break _loop3; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } } _cnt3++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSTUFF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STUFF; int _saveIndex; switch ( LA(1)) { case '"': { mSTRING(false); break; } case '\'': { mCHAR(false); break; } case '\n': { match('\n'); newline(); break; } default: if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) { mCOMMENT(false); } else if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match("\r\n"); newline(); } else if ((LA(1)=='/') && (_tokenSet_1.member(LA(2)))) { match('/'); { match(_tokenSet_1); } } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); newline(); } else if ((_tokenSet_2.member(LA(1)))) { { match(_tokenSet_2); } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mAST_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_ITEM; int _saveIndex; Token t=null; Token id=null; Token ctor=null; if ((LA(1)=='#') && (LA(2)=='(')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mTREE(true); t=_returnToken; } else if ((LA(1)=='#') && (_tokenSet_3.member(LA(2)))) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mID(true); id=_returnToken; String idt = id.getText(); String var = generator.mapTreeId(idt,transInfo); if ( var!=null ) { text.setLength(_begin); text.append(var); } { if ((_tokenSet_4.member(LA(1))) && (true) && (true)) { mWS(false); } else { } } { if ((LA(1)=='=') && (true) && (true)) { mVAR_ASSIGN(false); } else { } } } else if ((LA(1)=='#') && (LA(2)=='[')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mAST_CONSTRUCTOR(true); ctor=_returnToken; } else if ((LA(1)=='#') && (LA(2)=='#')) { match("##"); String r=currentRule.getRuleName()+"_AST"; text.setLength(_begin); text.append(r); if ( transInfo!=null ) { transInfo.refRuleRoot=r; // we ref root of tree } { if ((_tokenSet_4.member(LA(1))) && (true) && (true)) { mWS(false); } else { } } { if ((LA(1)=='=') && (true) && (true)) { mVAR_ASSIGN(false); } else { } } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ITEM; int _saveIndex; Token a1=null; Token a2=null; Token a3=null; Token a4=null; Token a5=null; Token a6=null; if ((LA(1)=='$') && (LA(2)=='s') && (LA(3)=='e')) { match("$set"); { if ((LA(1)=='T') && (LA(2)=='e')) { match("Text"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a2=_returnToken; match(')'); String t; t = "self.text.setLength(_begin) ; self.text.append("+a2.getText()+")"; text.setLength(_begin); text.append(t); } else if ((LA(1)=='T') && (LA(2)=='o')) { match("Token"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a3=_returnToken; match(')'); String t="_token = "+a3.getText(); text.setLength(_begin); text.append(t); } else if ((LA(1)=='T') && (LA(2)=='y')) { match("Type"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a4=_returnToken; match(')'); String t="_ttype = "+a4.getText(); text.setLength(_begin); text.append(t); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='O')) { match("$FOLLOW"); { if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a5=_returnToken; match(')'); } else { } } String rule = currentRule.getRuleName(); if ( a5!=null ) { rule = a5.getText(); } String setName = generator.getFOLLOWBitSet(rule, 1); if ( setName==null ) { reportError("$FOLLOW("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { text.setLength(_begin); text.append(setName); } } else if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='I')) { match("$FIRST"); { if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a6=_returnToken; match(')'); } else { } } String rule = currentRule.getRuleName(); if ( a6!=null ) { rule = a6.getText(); } String setName = generator.getFIRSTBitSet(rule, 1); if ( setName==null ) { reportError("$FIRST("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { text.setLength(_begin); text.append(setName); } } else if ((LA(1)=='$') && (LA(2)=='s') && (LA(3)=='k')) { match("$skip"); text.setLength(_begin); text.append("_ttype = SKIP"); } else if ((LA(1)=='$') && (LA(2)=='a')) { match("$append"); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '(': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('('); mTEXT_ARG(true); a1=_returnToken; match(')'); String t = "self.text.append("+a1.getText()+")"; text.setLength(_begin); text.append(t); } else if ((LA(1)=='$') && (LA(2)=='g')) { match("$getText"); text.setLength(_begin); text.append("self.text.getString(_begin)"); } else if ((LA(1)=='$') && (LA(2)=='n')) { { if ((LA(1)=='$') && (LA(2)=='n') && (LA(3)=='l')) { match("$nl"); } else if ((LA(1)=='$') && (LA(2)=='n') && (LA(3)=='e')) { match("$newline"); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } text.setLength(_begin); text.append("self.newline()"); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = COMMENT; int _saveIndex; { if ((LA(1)=='/') && (LA(2)=='/')) { mSL_COMMENT(false); } else if ((LA(1)=='/') && (LA(2)=='*')) { mML_COMMENT(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STRING; int _saveIndex; match('"'); { _loop104: do { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_7.member(LA(1)))) { matchNot('"'); } else { break _loop104; } } while (true); } match('"'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mCHAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = CHAR; int _saveIndex; match('\''); { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_8.member(LA(1)))) { matchNot('\''); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } match('\''); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTREE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TREE; int _saveIndex; Token t=null; Token t2=null; StringBuffer buf = new StringBuffer(); int n=0; Vector terms = new Vector(10); _saveIndex=text.length(); match('('); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mTREE_ELEMENT(true); text.setLength(_saveIndex); t=_returnToken; terms.appendElement(t.getText()); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': case ',': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop29: do { if ((LA(1)==',')) { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mTREE_ELEMENT(true); text.setLength(_saveIndex); t2=_returnToken; terms.appendElement(t2.getText()); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': case ',': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else { break _loop29; } } while (true); } text.setLength(_begin); text.append(generator.getASTCreateString(terms)); _saveIndex=text.length(); match(')'); text.setLength(_saveIndex); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID; int _saveIndex; { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '_': { match('_'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop86: do { if ((_tokenSet_9.member(LA(1))) && (true) && (true)) { { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } case '_': { match('_'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else { break _loop86; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WS; int _saveIndex; { int _cnt124=0; _loop124: do { if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match('\r'); match('\n'); newline(); } else if ((LA(1)==' ') && (true) && (true)) { match(' '); } else if ((LA(1)=='\t') && (true) && (true)) { match('\t'); } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); newline(); } else if ((LA(1)=='\n') && (true) && (true)) { match('\n'); newline(); } else { if ( _cnt124>=1 ) { break _loop124; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt124++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mVAR_ASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = VAR_ASSIGN; int _saveIndex; match('='); // inform the code generator that an assignment was done to // AST root for the rule if invoker set refRuleRoot. if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) { transInfo.assignToRoot=true; } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mAST_CONSTRUCTOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_CONSTRUCTOR; int _saveIndex; Token x=null; Token y=null; Token z=null; _saveIndex=text.length(); match('['); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); x=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ',': case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { if ((LA(1)==',') && (_tokenSet_10.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); y=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ',': case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else if ((LA(1)==','||LA(1)==']') && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case ',': { _saveIndex=text.length(); match(','); text.setLength(_saveIndex); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); mAST_CTOR_ELEMENT(true); text.setLength(_saveIndex); z=_returnToken; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } _saveIndex=text.length(); match(']'); text.setLength(_saveIndex); String args = x.getText(); if ( y!=null ) { args += ","+y.getText(); } if ( z!=null ) { args += ","+z.getText(); } text.setLength(_begin); text.append(generator.getASTCreateString(null,args)); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG; int _saveIndex; { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '"': case '$': case '\'': case '+': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { int _cnt60=0; _loop60: do { if ((_tokenSet_11.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mTEXT_ARG_ELEMENT(false); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_12.member(LA(2))) && (true)) { mWS(false); } else if ((_tokenSet_12.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else { if ( _cnt60>=1 ) { break _loop60; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt60++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTREE_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TREE_ELEMENT; int _saveIndex; Token id=null; boolean was_mapped; switch ( LA(1)) { case '(': { mTREE(false); break; } case '[': { mAST_CONSTRUCTOR(false); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mID_ELEMENT(false); break; } case '"': { mSTRING(false); break; } default: if ((LA(1)=='#') && (LA(2)=='(')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mTREE(false); } else if ((LA(1)=='#') && (LA(2)=='[')) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); mAST_CONSTRUCTOR(false); } else if ((LA(1)=='#') && (_tokenSet_3.member(LA(2)))) { _saveIndex=text.length(); match('#'); text.setLength(_saveIndex); was_mapped=mID_ELEMENT(true); id=_returnToken; // RK: I have a queer feeling that this maptreeid is redundant if( ! was_mapped ) { String t = generator.mapTreeId(id.getText(), null); text.setLength(_begin); text.append(t); } } else if ((LA(1)=='#') && (LA(2)=='#')) { match("##"); String t = currentRule.getRuleName()+"_AST"; text.setLength(_begin); text.append(t); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } /** An ID_ELEMENT can be a func call, array ref, simple var, * or AST label ref. */ protected final boolean mID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { boolean mapped=false; int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID_ELEMENT; int _saveIndex; Token id=null; mID(true); id=_returnToken; { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_13.member(LA(2))) && (true)) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_13.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '(': { match('('); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_14.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_14.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mARG(false); { _loop48: do { if ((LA(1)==',')) { match(','); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); } else { break _loop48; } } while (true); } break; } case '\t': case '\n': case '\r': case ' ': case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(')'); break; } case '[': { { int _cnt53=0; _loop53: do { if ((LA(1)=='[')) { match('['); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(']'); } else { if ( _cnt53>=1 ) { break _loop53; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt53++; } while (true); } break; } case '.': { match('.'); mID_ELEMENT(false); break; } case '\t': case '\n': case '\r': case ' ': case ')': case '*': case '+': case ',': case '-': case '/': case '=': case ']': { mapped = true; String t = generator.mapTreeId(id.getText(), transInfo); text.setLength(_begin); text.append(t); { if (((_tokenSet_15.member(LA(1))) && (_tokenSet_16.member(LA(2))) && (true))&&(transInfo!=null && transInfo.refRuleRoot!=null)) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '=': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mVAR_ASSIGN(false); } else if ((_tokenSet_17.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; return mapped; } /** The arguments of a #[...] constructor are text, token type, * or a tree. */ protected final void mAST_CTOR_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = AST_CTOR_ELEMENT; int _saveIndex; if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mSTRING(false); } else if ((_tokenSet_18.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mTREE_ELEMENT(false); } else if (((LA(1) >= '0' && LA(1) <= '9'))) { mINT(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = INT; int _saveIndex; { int _cnt115=0; _loop115: do { if (((LA(1) >= '0' && LA(1) <= '9'))) { mDIGIT(false); } else { if ( _cnt115>=1 ) { break _loop115; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt115++; } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ARG; int _saveIndex; { switch ( LA(1)) { case '\'': { mCHAR(false); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mINT_OR_FLOAT(false); break; } default: if ((_tokenSet_18.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mTREE_ELEMENT(false); } else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mSTRING(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop81: do { if ((_tokenSet_19.member(LA(1))) && (_tokenSet_20.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '*': case '+': case '-': case '/': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '+': { match('+'); break; } case '-': { match('-'); break; } case '*': { match('*'); break; } case '/': { match('/'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '"': case '#': case '\'': case '(': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '[': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } mARG(false); } else { break _loop81; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG_ELEMENT; int _saveIndex; switch ( LA(1)) { case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mTEXT_ARG_ID_ELEMENT(false); break; } case '"': { mSTRING(false); break; } case '\'': { mCHAR(false); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mINT_OR_FLOAT(false); break; } case '$': { mTEXT_ITEM(false); break; } case '+': { match('+'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mTEXT_ARG_ID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = TEXT_ARG_ID_ELEMENT; int _saveIndex; Token id=null; mID(true); id=_returnToken; { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_21.member(LA(2))) && (true)) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_21.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '(': { match('('); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_22.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_22.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { _loop69: do { if ((_tokenSet_23.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { mTEXT_ARG(false); { _loop68: do { if ((LA(1)==',')) { match(','); mTEXT_ARG(false); } else { break _loop68; } } while (true); } } else { break _loop69; } } while (true); } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(')'); break; } case '[': { { int _cnt74=0; _loop74: do { if ((LA(1)=='[')) { match('['); { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_23.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_23.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } mTEXT_ARG(false); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ']': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(']'); } else { if ( _cnt74>=1 ) { break _loop74; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt74++; } while (true); } break; } case '.': { match('.'); mTEXT_ARG_ID_ELEMENT(false); break; } case '\t': case '\n': case '\r': case ' ': case '"': case '$': case '\'': case ')': case '+': case ',': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case ']': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mINT_OR_FLOAT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = INT_OR_FLOAT; int _saveIndex; { int _cnt118=0; _loop118: do { if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_24.member(LA(2))) && (true)) { mDIGIT(false); } else { if ( _cnt118>=1 ) { break _loop118; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt118++; } while (true); } { if ((LA(1)=='L') && (_tokenSet_25.member(LA(2))) && (true)) { match('L'); } else if ((LA(1)=='l') && (_tokenSet_25.member(LA(2))) && (true)) { match('l'); } else if ((LA(1)=='.')) { match('.'); { _loop121: do { if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_25.member(LA(2))) && (true)) { mDIGIT(false); } else { break _loop121; } } while (true); } } else if ((_tokenSet_25.member(LA(1))) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SL_COMMENT; int _saveIndex; match("//"); /* rewrite comment symbol */ text.setLength(_begin); text.append("#"); { _loop92: do { // nongreedy exit test if ((LA(1)=='\n'||LA(1)=='\r') && (true) && (true)) break _loop92; if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { matchNot(EOF_CHAR); } else { break _loop92; } } while (true); } // do nothing { if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) { match("\r\n"); } else if ((LA(1)=='\n')) { match('\n'); } else if ((LA(1)=='\r') && (true) && (true)) { match('\r'); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } newline(); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ML_COMMENT; int _saveIndex; match("/*"); /* rewrite comment symbol */ text.setLength(_begin); text.append("#"); { _loop99: do { // nongreedy exit test if ((LA(1)=='*') && (LA(2)=='/') && (true)) break _loop99; if ((LA(1)=='\r') && (LA(2)=='\n') && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\r'); match('\n'); _saveIndex=text.length(); mIGNWS(false); text.setLength(_saveIndex); newline(); text.append("# "); } else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\r'); _saveIndex=text.length(); mIGNWS(false); text.setLength(_saveIndex); newline(); text.append("# "); } else if ((LA(1)=='\n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\n'); _saveIndex=text.length(); mIGNWS(false); text.setLength(_saveIndex); newline(); text.append("# "); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop99; } } while (true); } /* force a newline (MK: should actually be the same newline as * was matched earlier in the block comment*/ text.append("\n"); _saveIndex=text.length(); match("*/"); text.setLength(_saveIndex); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mIGNWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = IGNWS; int _saveIndex; { _loop96: do { if ((LA(1)==' ') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match(' '); } else if ((LA(1)=='\t') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) { match('\t'); } else { break _loop96; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ESC; int _saveIndex; match('\\'); { switch ( LA(1)) { case 'n': { match('n'); break; } case 'r': { match('r'); break; } case 't': { match('t'); break; } case 'b': { match('b'); break; } case 'f': { match('f'); break; } case '"': { match('"'); break; } case '\'': { match('\''); break; } case '\\': { match('\\'); break; } case '0': case '1': case '2': case '3': { { matchRange('0','3'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } case '4': case '5': case '6': case '7': { { matchRange('4','7'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = DIGIT; int _saveIndex; matchRange('0','9'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } private static final long[] mk_tokenSet_0() { long[] data = new long[8]; data[0]=-103079215112L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = new long[8]; data[0]=-145135534866440L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); private static final long[] mk_tokenSet_2() { long[] data = new long[8]; data[0]=-141407503262728L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2()); private static final long[] mk_tokenSet_3() { long[] data = { 0L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3()); private static final long[] mk_tokenSet_4() { long[] data = { 4294977024L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4()); private static final long[] mk_tokenSet_5() { long[] data = { 1103806604800L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5()); private static final long[] mk_tokenSet_6() { long[] data = { 287959436729787904L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6()); private static final long[] mk_tokenSet_7() { long[] data = new long[8]; data[0]=-17179869192L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7()); private static final long[] mk_tokenSet_8() { long[] data = new long[8]; data[0]=-549755813896L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8()); private static final long[] mk_tokenSet_9() { long[] data = { 287948901175001088L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9()); private static final long[] mk_tokenSet_10() { long[] data = { 287950056521213440L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10()); private static final long[] mk_tokenSet_11() { long[] data = { 287958332923183104L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11()); private static final long[] mk_tokenSet_12() { long[] data = { 287978128427460096L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_12 = new BitSet(mk_tokenSet_12()); private static final long[] mk_tokenSet_13() { long[] data = { 2306123388973753856L, 671088640L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_13 = new BitSet(mk_tokenSet_13()); private static final long[] mk_tokenSet_14() { long[] data = { 287952805300282880L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_14 = new BitSet(mk_tokenSet_14()); private static final long[] mk_tokenSet_15() { long[] data = { 2305843013508670976L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_15 = new BitSet(mk_tokenSet_15()); private static final long[] mk_tokenSet_16() { long[] data = { 2306051920717948416L, 536870912L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_16 = new BitSet(mk_tokenSet_16()); private static final long[] mk_tokenSet_17() { long[] data = { 208911504254464L, 536870912L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_17 = new BitSet(mk_tokenSet_17()); private static final long[] mk_tokenSet_18() { long[] data = { 1151051235328L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_18 = new BitSet(mk_tokenSet_18()); private static final long[] mk_tokenSet_19() { long[] data = { 189120294954496L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_19 = new BitSet(mk_tokenSet_19()); private static final long[] mk_tokenSet_20() { long[] data = { 288139722277004800L, 576460746129407998L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_20 = new BitSet(mk_tokenSet_20()); private static final long[] mk_tokenSet_21() { long[] data = { 288049596683265536L, 576460746666278910L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_21 = new BitSet(mk_tokenSet_21()); private static final long[] mk_tokenSet_22() { long[] data = { 287960536241415680L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_22 = new BitSet(mk_tokenSet_22()); private static final long[] mk_tokenSet_23() { long[] data = { 287958337218160128L, 576460745995190270L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_23 = new BitSet(mk_tokenSet_23()); private static final long[] mk_tokenSet_24() { long[] data = { 288228817078593024L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_24 = new BitSet(mk_tokenSet_24()); private static final long[] mk_tokenSet_25() { long[] data = { 288158448334415360L, 576460746532061182L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_25 = new BitSet(mk_tokenSet_25()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/python/ActionLexerTokenTypes.java000066400000000000000000000013021161462365500312010ustar00rootroot00000000000000// $ANTLR 2.7.5RC1 (20041124-137): "action.g" -> "ActionLexer.java"$ package antlr.actions.python; public interface ActionLexerTokenTypes { int EOF = 1; int NULL_TREE_LOOKAHEAD = 3; int ACTION = 4; int STUFF = 5; int AST_ITEM = 6; int TEXT_ITEM = 7; int TREE = 8; int TREE_ELEMENT = 9; int AST_CONSTRUCTOR = 10; int AST_CTOR_ELEMENT = 11; int ID_ELEMENT = 12; int TEXT_ARG = 13; int TEXT_ARG_ELEMENT = 14; int TEXT_ARG_ID_ELEMENT = 15; int ARG = 16; int ID = 17; int VAR_ASSIGN = 18; int COMMENT = 19; int SL_COMMENT = 20; int IGNWS = 21; int ML_COMMENT = 22; int CHAR = 23; int STRING = 24; int ESC = 25; int DIGIT = 26; int INT = 27; int INT_OR_FLOAT = 28; int WS = 29; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/python/CodeLexer.java000066400000000000000000000222031161462365500266130ustar00rootroot00000000000000// $ANTLR 2.7.5RC1 (20041124-137): "code.g" -> "CodeLexer.java"$ package antlr.actions.python; import java.io.InputStream; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.TokenStreamRecognitionException; import antlr.CharStreamException; import antlr.CharStreamIOException; import antlr.ANTLRException; import java.io.Reader; import java.util.Hashtable; import antlr.CharScanner; import antlr.InputBuffer; import antlr.ByteBuffer; import antlr.CharBuffer; import antlr.Token; import antlr.CommonToken; import antlr.RecognitionException; import antlr.NoViableAltForCharException; import antlr.MismatchedCharException; import antlr.TokenStream; import antlr.ANTLRHashString; import antlr.LexerSharedInputState; import antlr.collections.impl.BitSet; import antlr.SemanticException; import java.io.StringReader; import antlr.collections.impl.Vector; import antlr.*; public class CodeLexer extends antlr.CharScanner implements CodeLexerTokenTypes, TokenStream { protected int lineOffset = 0; private Tool antlrTool; // The ANTLR tool public CodeLexer ( String s, String fname, int line, Tool tool ) { this(new StringReader(s)); setLine(line); setFilename(fname); this.antlrTool = tool; } public void setLineOffset(int lineOffset) { setLine(lineOffset); } public void reportError(RecognitionException e) { antlrTool.error( "Syntax error in action: "+e, getFilename(),getLine(),getColumn()); } public void reportError(String s) { antlrTool.error(s,getFilename(),getLine(),getColumn()); } public void reportWarning(String s) { if ( getFilename()==null ) { antlrTool.warning(s); } else { antlrTool.warning(s,getFilename(),getLine(), getColumn()); } } public CodeLexer(InputStream in) { this(new ByteBuffer(in)); } public CodeLexer(Reader in) { this(new CharBuffer(in)); } public CodeLexer(InputBuffer ib) { this(new LexerSharedInputState(ib)); } public CodeLexer(LexerSharedInputState state) { super(state); caseSensitiveLiterals = true; setCaseSensitive(true); literals = new Hashtable(); } public Token nextToken() throws TokenStreamException { Token theRetToken=null; tryAgain: for (;;) { Token _token = null; int _ttype = Token.INVALID_TYPE; resetText(); try { // for char stream error handling try { // for lexical error handling { mACTION(true); theRetToken=_returnToken; } if ( _returnToken==null ) continue tryAgain; // found SKIP token _ttype = _returnToken.getType(); _returnToken.setType(_ttype); return _returnToken; } catch (RecognitionException e) { throw new TokenStreamRecognitionException(e); } } catch (CharStreamException cse) { if ( cse instanceof CharStreamIOException ) { throw new TokenStreamIOException(((CharStreamIOException)cse).io); } else { throw new TokenStreamException(cse.getMessage()); } } } } public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ACTION; int _saveIndex; { _loop3: do { if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff'))) { mSTUFF(false); } else { break _loop3; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSTUFF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STUFF; int _saveIndex; if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) { mCOMMENT(false); } else if ((LA(1)=='\r') && (LA(2)=='\n')) { match("\r\n"); newline(); } else if ((LA(1)=='/') && (_tokenSet_0.member(LA(2)))) { match('/'); { match(_tokenSet_0); } } else if ((LA(1)=='\r') && (true)) { match('\r'); newline(); } else if ((LA(1)=='\n')) { match('\n'); newline(); } else if ((_tokenSet_1.member(LA(1)))) { { match(_tokenSet_1); } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = COMMENT; int _saveIndex; if ((LA(1)=='/') && (LA(2)=='/')) { mSL_COMMENT(false); } else if ((LA(1)=='/') && (LA(2)=='*')) { mML_COMMENT(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SL_COMMENT; int _saveIndex; _saveIndex=text.length(); match("//"); text.setLength(_saveIndex); /* rewrite comment symbol */ text.append("#"); { _loop10: do { // nongreedy exit test if ((LA(1)=='\n'||LA(1)=='\r') && (true)) break _loop10; if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop10; } } while (true); } { if ((LA(1)=='\r') && (LA(2)=='\n')) { match("\r\n"); } else if ((LA(1)=='\n')) { match('\n'); } else if ((LA(1)=='\r') && (true)) { match('\r'); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } newline(); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ML_COMMENT; int _saveIndex; int offset = 0; _saveIndex=text.length(); match("/*"); text.setLength(_saveIndex); /* rewrite comment symbol */ text.append("#"); { _loop17: do { // nongreedy exit test if ((LA(1)=='*') && (LA(2)=='/')) break _loop17; if ((LA(1)=='\r') && (LA(2)=='\n')) { match('\r'); match('\n'); _saveIndex=text.length(); mIGNWS(false); text.setLength(_saveIndex); newline(); text.append("# "); } else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { match('\r'); _saveIndex=text.length(); mIGNWS(false); text.setLength(_saveIndex); newline(); text.append("# "); } else if ((LA(1)=='\n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { match('\n'); _saveIndex=text.length(); mIGNWS(false); text.setLength(_saveIndex); newline(); text.append("# "); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop17; } } while (true); } /* force a newline */ text.append("\n"); _saveIndex=text.length(); match("*/"); text.setLength(_saveIndex); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mIGNWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = IGNWS; int _saveIndex; { _loop14: do { if ((LA(1)==' ') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { match(' '); } else if ((LA(1)=='\t') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { match('\t'); } else { break _loop14; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } private static final long[] mk_tokenSet_0() { long[] data = new long[8]; data[0]=-145135534866440L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = new long[8]; data[0]=-140737488364552L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/python/CodeLexerTokenTypes.java000066400000000000000000000004451161462365500306450ustar00rootroot00000000000000// $ANTLR 2.7.5RC1 (20041124-137): "code.g" -> "CodeLexer.java"$ package antlr.actions.python; public interface CodeLexerTokenTypes { int EOF = 1; int NULL_TREE_LOOKAHEAD = 3; int ACTION = 4; int STUFF = 5; int COMMENT = 6; int SL_COMMENT = 7; int IGNWS = 8; int ML_COMMENT = 9; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/python/action.g000066400000000000000000000232751161462365500255350ustar00rootroot00000000000000// This file is part of PyANTLR. See LICENSE.txt for license // details..........Copyright (C) Wolfgang Haefelinger, 2004. // // $Id$ header { package antlr.actions.python; } { import java.io.StringReader; import antlr.collections.impl.Vector; import antlr.*; } /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id$ */ /** Perform the following translations: AST related translations ## -> currentRule_AST #(x,y,z) -> codeGenerator.getASTCreateString(vector-of(x,y,z)) #[x] -> codeGenerator.getASTCreateString(x) #x -> codeGenerator.mapTreeId(x) Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts. Text related translations $append(x) -> self.text.append(x) $setText(x) -> self.text.setLength(_begin) self.text.append(x) $getText -> self.text.getString(_begin) $setToken(x) -> _token = x $setType(x) -> _ttype = x $FOLLOW(r) -> FOLLOW set name for rule r (optional arg) $FIRST(r) -> FIRST set name for rule r (optional arg) experimental: $newline, $nl -> self.newline() $skip -> _ttype = SKIP */ class ActionLexer extends Lexer; options { k=3; charVocabulary='\3'..'\377'; testLiterals=false; interactive=true; } { protected RuleBlock currentRule; protected CodeGenerator generator; protected int lineOffset = 0; private Tool antlrTool; // The ANTLR tool ActionTransInfo transInfo; public ActionLexer( String s, RuleBlock currentRule, CodeGenerator generator, ActionTransInfo transInfo) { this(new StringReader(s)); this.currentRule = currentRule; this.generator = generator; this.transInfo = transInfo; } public void setLineOffset(int lineOffset) { // this.lineOffset = lineOffset; setLine(lineOffset); } public void setTool(Tool tool) { this.antlrTool = tool; } public void reportError(RecognitionException e) { antlrTool.error( "Syntax error in action: "+e, getFilename(),getLine(),getColumn()); } public void reportError(String s) { antlrTool.error(s,getFilename(),getLine(),getColumn()); } public void reportWarning(String s) { if ( getFilename()==null ) { antlrTool.warning(s); } else { antlrTool.warning(s,getFilename(),getLine(), getColumn()); } } } // rules are protected because we don't care about nextToken(). public ACTION : ( STUFF | AST_ITEM | TEXT_ITEM )+ ; // stuff in between #(...) and #id items protected STUFF : COMMENT | STRING | CHAR | "\r\n" {newline();} | '\r' {newline();} | '\n' {newline();} | '/' ~('/'|'*') // non-comment start '/' | ~('/'|'\n'|'\r'|'$'|'#'|'"'|'\'') ; protected AST_ITEM : '#'! t:TREE | '#'! id:ID { String idt = id.getText(); String var = generator.mapTreeId(idt,transInfo); if ( var!=null ) { $setText(var); } } (WS)? ( options {greedy=true;} : VAR_ASSIGN )? | '#'! ctor:AST_CONSTRUCTOR | "##" { String r=currentRule.getRuleName()+"_AST"; $setText(r); if ( transInfo!=null ) { transInfo.refRuleRoot=r; // we ref root of tree } } (WS)? ( options {greedy=true;} : VAR_ASSIGN )? ; protected TEXT_ITEM : "$append" (WS)? '(' a1:TEXT_ARG ')' { String t = "self.text.append("+a1.getText()+")"; $setText(t); } | "$set" ( "Text" (WS)? '(' a2:TEXT_ARG ')' { String t; t = "self.text.setLength(_begin) ; self.text.append("+a2.getText()+")"; $setText(t); } | "Token" (WS)? '(' a3:TEXT_ARG ')' { String t="_token = "+a3.getText(); $setText(t); } | "Type" (WS)? '(' a4:TEXT_ARG ')' { String t="_ttype = "+a4.getText(); $setText(t); } ) | "$getText" { $setText("self.text.getString(_begin)"); } | "$FOLLOW" ( (WS)? '(' a5:TEXT_ARG ')' )? { String rule = currentRule.getRuleName(); if ( a5!=null ) { rule = a5.getText(); } String setName = generator.getFOLLOWBitSet(rule, 1); if ( setName==null ) { reportError("$FOLLOW("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { $setText(setName); } } | "$FIRST" ( (WS)? '(' a6:TEXT_ARG ')' )? { String rule = currentRule.getRuleName(); if ( a6!=null ) { rule = a6.getText(); } String setName = generator.getFIRSTBitSet(rule, 1); if ( setName==null ) { reportError("$FIRST("+rule+")"+ ": unknown rule or bad lookahead computation"); } else { $setText(setName); } } | "$skip" { $setText("_ttype = SKIP"); } | ( "$nl" | "$newline" ) { $setText("self.newline()"); } ; protected TREE! { StringBuffer buf = new StringBuffer(); int n=0; Vector terms = new Vector(10); } : '(' (WS)? t:TREE_ELEMENT {terms.appendElement(t.getText());} (WS)? ( ',' (WS)? t2:TREE_ELEMENT {terms.appendElement(t2.getText());} (WS)? )* {$setText(generator.getASTCreateString(terms));} ')' ; protected TREE_ELEMENT { boolean was_mapped; } : '#'! TREE | '#'! AST_CONSTRUCTOR | '#'! was_mapped=id:ID_ELEMENT { // RK: I have a queer feeling that this maptreeid is redundant if( ! was_mapped ) { String t = generator.mapTreeId(id.getText(), null); $setText(t); } } | "##" {String t = currentRule.getRuleName()+"_AST"; $setText(t);} | TREE | AST_CONSTRUCTOR | ID_ELEMENT | STRING ; protected AST_CONSTRUCTOR! : '[' (WS)? x:AST_CTOR_ELEMENT (WS)? (',' (WS)? y:AST_CTOR_ELEMENT (WS)? )? (',' (WS)? z:AST_CTOR_ELEMENT (WS)? )? ']' { String args = x.getText(); if ( y!=null ) { args += ","+y.getText(); } if ( z!=null ) { args += ","+z.getText(); } $setText(generator.getASTCreateString(null,args)); } ; /** The arguments of a #[...] constructor are text, token type, * or a tree. */ protected AST_CTOR_ELEMENT : STRING | INT | TREE_ELEMENT ; /** An ID_ELEMENT can be a func call, array ref, simple var, * or AST label ref. */ protected ID_ELEMENT returns [boolean mapped=false] : id:ID (options {greedy=true;}:WS!)? ( '(' (options {greedy=true;}:WS!)? ( ARG (',' (WS!)? ARG)* )? (WS!)? ')' // method call | ( '[' (WS!)? ARG (WS!)? ']' )+ // array reference | '.' ID_ELEMENT | /* could be a token reference or just a user var */ { mapped = true; String t = generator.mapTreeId(id.getText(), transInfo); $setText(t); } // if #rule referenced, check for assignment ( options {greedy=true;} : {transInfo!=null && transInfo.refRuleRoot!=null}? (WS)? VAR_ASSIGN )? ) ; protected TEXT_ARG : (WS)? ( TEXT_ARG_ELEMENT (options {greedy=true;}:WS)? )+ ; protected TEXT_ARG_ELEMENT : TEXT_ARG_ID_ELEMENT | STRING | CHAR | INT_OR_FLOAT | TEXT_ITEM | '+' ; protected TEXT_ARG_ID_ELEMENT : id:ID (options {greedy=true;}:WS!)? ( '(' (options {greedy=true;}:WS!)? ( TEXT_ARG (',' TEXT_ARG)* )* (WS!)? ')' // method call | ( '[' (WS!)? TEXT_ARG (WS!)? ']' )+ // array reference | '.' TEXT_ARG_ID_ELEMENT | ) ; protected ARG : ( TREE_ELEMENT | STRING | CHAR | INT_OR_FLOAT ) (options {greedy=true;} : (WS)? ( '+'| '-' | '*' | '/' ) (WS)? ARG )* ; protected ID : ('a'..'z'|'A'..'Z'|'_') (options {greedy=true;} : ('a'..'z'|'A'..'Z'|'0'..'9'|'_'))* ; protected VAR_ASSIGN : '=' { // inform the code generator that an assignment was done to // AST root for the rule if invoker set refRuleRoot. if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) { transInfo.assignToRoot=true; } } ; protected COMMENT : (SL_COMMENT | ML_COMMENT) { } ; protected SL_COMMENT : "//" { /* rewrite comment symbol */ $setText("#"); } ( options {greedy=false;}:. )* { // do nothing } ('\n'|"\r\n"|'\r') { newline(); } ; protected IGNWS : ( ' ' | '\t' )* ; protected ML_COMMENT : "/*" { /* rewrite comment symbol */ $setText("#"); } ( options {greedy=false;} : '\r' '\n' IGNWS! { newline(); $append("# "); } | '\r' IGNWS! { newline(); $append("# "); } | '\n' IGNWS! { newline(); $append("# "); } | . )* { /* force a newline (MK: should actually be the same newline as * was matched earlier in the block comment*/ $append("\n"); } "*/"! ; protected CHAR : '\'' ( ESC | ~'\'' ) '\'' ; protected STRING : '"' (ESC|~'"')* '"' ; protected ESC : '\\' ( 'n' | 'r' | 't' | 'b' | 'f' | '"' | '\'' | '\\' | ('0'..'3') ( options {greedy=true;} : DIGIT ( options {greedy=true;} : DIGIT )? )? | ('4'..'7') (options {greedy=true;}:DIGIT)? ) ; protected DIGIT : '0'..'9' ; protected INT : (DIGIT)+ ; protected INT_OR_FLOAT : (options {greedy=true;}:DIGIT)+ ( options {greedy=true;} : '.' (options {greedy=true;}:DIGIT)* | 'L' | 'l' )? ; protected WS : ( options {greedy=true;} : ' ' | '\t' | '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} )+ ; nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/actions/python/code.g000066400000000000000000000051351161462365500251650ustar00rootroot00000000000000// This file is part of PyANTLR. See LICENSE.txt for license // details..........Copyright (C) Wolfgang Haefelinger, 2004. // // $Id$ header { package antlr.actions.python; } { import java.io.StringReader; import antlr.collections.impl.Vector; import antlr.*; } class CodeLexer extends Lexer; options { k=2; charVocabulary='\3'..'\377'; testLiterals=false; interactive=true; } { protected int lineOffset = 0; private Tool antlrTool; // The ANTLR tool public CodeLexer ( String s, String fname, int line, Tool tool ) { this(new StringReader(s)); setLine(line); setFilename(fname); this.antlrTool = tool; } public void setLineOffset(int lineOffset) { setLine(lineOffset); } public void reportError(RecognitionException e) { antlrTool.error( "Syntax error in action: "+e, getFilename(),getLine(),getColumn()); } public void reportError(String s) { antlrTool.error(s,getFilename(),getLine(),getColumn()); } public void reportWarning(String s) { if ( getFilename()==null ) { antlrTool.warning(s); } else { antlrTool.warning(s,getFilename(),getLine(), getColumn()); } } } // rules are protected because we don't care about nextToken(). public ACTION : ( STUFF )* ; // stuff in between #(...) and #id items protected STUFF : COMMENT | "\r\n" { newline(); } | '\r' { newline(); } | '\n' { newline(); } | '/' ~('/'|'*') // non-comment start '/' | ~('/'|'\n'|'\r') ; protected COMMENT : SL_COMMENT | ML_COMMENT ; protected SL_COMMENT : "//"! { /* rewrite comment symbol */ $append("#"); } ( options {greedy=false;}:. )* ('\n'|"\r\n"|'\r') { newline(); } ; protected IGNWS : ( ' ' | '\t' )* ; protected ML_COMMENT { int offset = 0; } : "/*"! { /* rewrite comment symbol */ $append("#"); } ( options {greedy=false;} : '\r' '\n' IGNWS! { newline(); $append("# "); } | '\r' IGNWS! { newline(); $append("# "); } | '\n' IGNWS! { newline(); $append("# "); } | . )* { /* force a newline */ $append("\n"); } "*/"! ; nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/antlr.g000066400000000000000000000515001161462365500224070ustar00rootroot00000000000000header { package antlr; } { import java.util.Enumeration; import java.io.DataInputStream; import java.io.InputStream; import java.io.FileInputStream; import java.io.IOException; } /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/antlr.g#1 $ */ class ANTLRParser extends Parser; options { exportVocab=ANTLR; defaultErrorHandler=false; k=2; } tokens { "tokens"; } { private static final boolean DEBUG_PARSER = false; ANTLRGrammarParseBehavior behavior; Tool antlrTool; protected int blockNesting= -1; public ANTLRParser( TokenBuffer tokenBuf, ANTLRGrammarParseBehavior behavior_, Tool tool_ ) { super(tokenBuf, 1); tokenNames = _tokenNames; behavior = behavior_; antlrTool = tool_; } public void reportError(String s) { antlrTool.error(s, getFilename(), -1, -1); } public void reportError(RecognitionException e) { reportError(e, e.getErrorMessage()); } public void reportError(RecognitionException e, String s) { antlrTool.error(s, e.getFilename(), e.getLine(), e.getColumn()); } public void reportWarning(String s) { antlrTool.warning(s, getFilename(), -1, -1); } private boolean lastInRule() throws TokenStreamException { if ( blockNesting==0 && (LA(1)==SEMI || LA(1)==LITERAL_exception || LA(1)==OR) ) { return true; } return false; } private void checkForMissingEndRule(Token label) { if ( label.getColumn()==1 ) { antlrTool.warning("did you forget to terminate previous rule?", getFilename(), label.getLine(), label.getColumn()); } } } grammar : ( { n = null; // RK: prevent certain orders of header actions // overwriting eachother. } "header" (n:STRING_LITERAL)? h:ACTION { // store the header action // FIXME: 'n' should be checked for validity behavior.refHeaderAction(n,h); } )* ( fileOptionsSpec )? ( classDef )* EOF ; exception catch [RecognitionException ex] { reportError(ex, "rule grammar trapped:\n"+ex.toString()); consumeUntil(EOF); } classDef {String doc=null;} : ( a:ACTION { behavior.refPreambleAction(a);} )? ( d:DOC_COMMENT {doc=d.getText();} )? ( ("lexclass" | "class" id "extends" "Lexer" ) => lexerSpec[doc] | ( "class" id "extends" "TreeParser" ) => treeParserSpec[doc] | parserSpec[doc] ) rules { behavior.endGrammar(); } ; exception catch [RecognitionException ex] { if ( ex instanceof NoViableAltException ) { NoViableAltException e = (NoViableAltException)ex; // RK: These probably generate inconsequent error messages... // have to see how this comes out.. if ( e.token.getType()==DOC_COMMENT ) { reportError(ex, "JAVADOC comments may only prefix rules and grammars"); } else { reportError(ex, "rule classDef trapped:\n"+ex.toString()); } } else { reportError(ex, "rule classDef trapped:\n"+ex.toString()); } behavior.abortGrammar(); boolean consuming = true; // consume everything until the next class definition or EOF while (consuming) { consume(); switch(LA(1)) { case LITERAL_class: case LITERAL_lexclass: case EOF: consuming = false; break; } } } fileOptionsSpec { Token idTok; Token value; } : OPTIONS ( idTok = id ASSIGN value = optionValue { behavior.setFileOption(idTok, value,getInputState().filename); } SEMI )* RCURLY ; parserOptionsSpec { Token idTok; Token value; } : OPTIONS ( idTok = id ASSIGN value = optionValue { behavior.setGrammarOption(idTok, value); } SEMI )* RCURLY ; treeParserOptionsSpec { Token idTok; Token value; } : OPTIONS ( idTok = id ASSIGN value = optionValue { behavior.setGrammarOption(idTok, value); } SEMI )* RCURLY ; lexerOptionsSpec { Token idTok; Token value; BitSet b; } : OPTIONS ( // Special case for vocabulary option because it has a bit-set "charVocabulary" ASSIGN b = charSet SEMI { behavior.setCharVocabulary(b); } | idTok = id ASSIGN value = optionValue { behavior.setGrammarOption(idTok, value); } SEMI )* RCURLY ; subruleOptionsSpec { Token idTok; Token value; } : OPTIONS ( idTok = id ASSIGN value = optionValue { behavior.setSubruleOption(idTok, value); } SEMI )* RCURLY ; // optionValue returns a Token which may be one of several things: // STRING_LITERAL -- a quoted string // CHAR_LITERAL -- a single quoted character // INT -- an integer // RULE_REF or TOKEN_REF -- an identifier optionValue returns [ Token retval ] { retval = null; } : retval = qualifiedID | sl:STRING_LITERAL { retval = sl; } | cl:CHAR_LITERAL { retval = cl; } | il:INT { retval = il; } ; charSet returns [ BitSet b ] { b = null; BitSet tmpSet = null; } : // TODO: generate a bit set b = setBlockElement ( OR tmpSet = setBlockElement { b.orInPlace(tmpSet); } )* ; setBlockElement returns [ BitSet b ] { b = null; int rangeMin = 0; } : c1:CHAR_LITERAL { rangeMin = ANTLRLexer.tokenTypeForCharLiteral(c1.getText()); b = BitSet.of(rangeMin); } ( RANGE c2:CHAR_LITERAL { int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(c2.getText()); if (rangeMax < rangeMin) { antlrTool.error("Malformed range line ", getFilename(), c1.getLine(), c1.getColumn()); } for (int i = rangeMin+1; i <= rangeMax; i++) { b.add(i); } } )? ; tokensSpec : TOKENS ( ( {s1=null;} t1:TOKEN_REF ( ASSIGN s1:STRING_LITERAL )? {behavior.defineToken(t1, s1);} (tokensSpecOptions[t1])? | s3:STRING_LITERAL {behavior.defineToken(null, s3);} (tokensSpecOptions[s3])? ) SEMI )+ RCURLY ; tokensSpecOptions[Token t] { Token o=null, v=null; } : OPEN_ELEMENT_OPTION o=id ASSIGN v=optionValue {behavior.refTokensSpecElementOption(t,o,v);} ( SEMI o=id ASSIGN v=optionValue {behavior.refTokensSpecElementOption(t,o,v);} )* CLOSE_ELEMENT_OPTION ; superClass returns [String sup] {sup=null;} : LPAREN { sup = LT(1).getText(); sup = StringUtils.stripFrontBack(sup, "\"", "\""); } (STRING_LITERAL) RPAREN ; parserSpec[String doc] { Token idTok; String sup=null; } : "class" idTok = id ( "extends" "Parser" (sup=superClass)? | { antlrTool.warning("use 'class X extends Parser'", getFilename(), idTok.getLine(), idTok.getColumn()); // System.out.println("warning: line " + // idTok.getLine() + ": use 'class X extends Parser'"); } ) {behavior.startParser(getFilename(), idTok, sup, doc);} SEMI (parserOptionsSpec)? { behavior.endOptions(); } (tokensSpec)? ( a:ACTION {behavior.refMemberAction(a);} )? ; lexerSpec[String doc] { Token idTok; String sup=null; } : ( lc:"lexclass" idTok = id { antlrTool.warning("lexclass' is deprecated; use 'class X extends Lexer'", getFilename(), lc.getLine(), lc.getColumn()); // System.out.println("warning: line " + lc.getLine() + ": 'lexclass' is deprecated; use 'class X extends Lexer'"); } | "class" idTok = id "extends" "Lexer" (sup=superClass)? ) {behavior.startLexer(getFilename(), idTok,sup,doc);} SEMI (lexerOptionsSpec)? { behavior.endOptions(); } (tokensSpec)? ( a:ACTION {behavior.refMemberAction(a);} )? ; treeParserSpec[String doc] { Token idTok; String sup=null; } : "class" idTok = id "extends" "TreeParser" (sup=superClass)? {behavior.startTreeWalker(getFilename(), idTok,sup,doc);} SEMI (treeParserOptionsSpec)? { behavior.endOptions(); } (tokensSpec)? ( a:ACTION {behavior.refMemberAction(a);} )? ; rules : ( options { // limitation of appox LL(k) says ambig upon // DOC_COMMENT TOKEN_REF, but that's an impossible sequence warnWhenFollowAmbig=false; } : rule )+ ; rule { String access="public"; Token idTok; String doc=null; boolean ruleAutoGen = true; blockNesting = -1; // block increments, so -1 to make rule at level 0 } : ( d:DOC_COMMENT {doc=d.getText();} )? ( p1:"protected" {access=p1.getText();} | p2:"public" {access=p2.getText();} | p3:"private" {access=p3.getText();} )? idTok = id ( BANG { ruleAutoGen = false; } )? { behavior.defineRuleName(idTok, access, ruleAutoGen, doc); } ( aa:ARG_ACTION { behavior.refArgAction(aa); } )? ( "returns" rt:ARG_ACTION { behavior.refReturnAction(rt); } )? ( throwsSpec )? ( ruleOptionsSpec )? (a:ACTION {behavior.refInitAction(a);})? COLON block SEMI ( exceptionGroup )? {behavior.endRule(idTok.getText());} ; /* // // for now, syntax error in rule aborts the whole grammar // exception catch [ParserException ex] { behavior.abortRule(idTok); behavior.hasError(); // Consume until something that looks like end of a rule consume(); while (LA(1) != SEMI && LA(1) != EOF) { consume(); } consume(); } */ ruleOptionsSpec { Token idTok; Token value; } : OPTIONS ( idTok = id ASSIGN value = optionValue { behavior.setRuleOption(idTok, value); } SEMI )* RCURLY ; throwsSpec { String t=null; Token a,b; } : "throws" a=id {t=a.getText();} ( COMMA b=id {t+=","+b.getText();} )* { behavior.setUserExceptions(t); } ; block : {blockNesting++;} alternative ( OR alternative )* {blockNesting--;} ; alternative { boolean altAutoGen = true; } : (BANG { altAutoGen=false;} )? {behavior.beginAlt(altAutoGen);} ( element )* ( exceptionSpecNoLabel )? {behavior.endAlt();} ; exceptionGroup : { behavior.beginExceptionGroup(); } ( exceptionSpec )+ { behavior.endExceptionGroup(); } ; exceptionSpec { Token labelAction = null; } : "exception" ( aa:ARG_ACTION { labelAction = aa; } )? { behavior.beginExceptionSpec(labelAction); } ( exceptionHandler )* { behavior.endExceptionSpec(); } ; exceptionSpecNoLabel : "exception" { behavior.beginExceptionSpec(null); } ( exceptionHandler )* { behavior.endExceptionSpec(); } ; exceptionHandler { Token exType; Token exName; } : "catch" a1:ARG_ACTION a2:ACTION { behavior.refExceptionHandler(a1, a2); } ; element : elementNoOptionSpec (elementOptionSpec)? ; elementOptionSpec { Token o=null, v=null; } : OPEN_ELEMENT_OPTION o=id ASSIGN v=optionValue {behavior.refElementOption(o,v);} ( SEMI o=id ASSIGN v=optionValue {behavior.refElementOption(o,v);} )* CLOSE_ELEMENT_OPTION ; elementNoOptionSpec { Token label = null; Token assignId = null; Token args = null; int autoGen = GrammarElement.AUTO_GEN_NONE; } : assignId=id ASSIGN ( label=id COLON {checkForMissingEndRule(label);} )? ( rr:RULE_REF ( aa:ARG_ACTION { args=aa; } )? ( BANG { autoGen = GrammarElement.AUTO_GEN_BANG; } )? { behavior.refRule(assignId, rr, label, args, autoGen); } | // this syntax only valid for lexer tr:TOKEN_REF ( aa2:ARG_ACTION { args=aa2; } )? { behavior.refToken(assignId, tr, label, args, false, autoGen, lastInRule()); } ) | (label=id COLON {checkForMissingEndRule(label);} )? ( r2:RULE_REF ( aa3:ARG_ACTION { args=aa3; } )? ( BANG { autoGen = GrammarElement.AUTO_GEN_BANG; } )? { behavior.refRule(assignId, r2, label, args, autoGen); } | range [label] | terminal [label] | NOT_OP ( notTerminal[label] | ebnf[label,true] ) | ebnf[label,false] ) | a:ACTION { behavior.refAction(a);} | p:SEMPRED { behavior.refSemPred(p);} | tree ; tree : lp:TREE_BEGIN { behavior.beginTree(lp); } rootNode {behavior.beginChildList();} ( element )+ {behavior.endChildList();} RPAREN { behavior.endTree(); } ; rootNode { Token label = null; } : (label=id COLON {checkForMissingEndRule(label);} )? terminal[label] // | range[null] ; ebnf [ Token label, boolean not ] : lp:LPAREN {behavior.beginSubRule(label, lp, not);} ( // 2nd alt and optional branch ambig due to // linear approx LL(2) issue. COLON ACTION // matched correctly in 2nd alt. options { warnWhenFollowAmbig = false; } : subruleOptionsSpec ( aa:ACTION {behavior.refInitAction(aa);} )? COLON | ab:ACTION {behavior.refInitAction(ab);} COLON )? block RPAREN ( ( QUESTION{behavior.optionalSubRule();} | STAR {behavior.zeroOrMoreSubRule();} | PLUS {behavior.oneOrMoreSubRule();} )? ( BANG {behavior.noASTSubRule(); } )? | IMPLIES {behavior.synPred();} ) {behavior.endSubRule();} ; ast_type_spec returns [ int autoGen ] { autoGen = GrammarElement.AUTO_GEN_NONE; } : ( CARET { autoGen = GrammarElement.AUTO_GEN_CARET; } | BANG { autoGen = GrammarElement.AUTO_GEN_BANG; } )? ; range [ Token label ] { Token trLeft=null; Token trRight=null; int autoGen=GrammarElement.AUTO_GEN_NONE; } : crLeft:CHAR_LITERAL RANGE crRight:CHAR_LITERAL ( BANG { autoGen = GrammarElement.AUTO_GEN_BANG; } )? { behavior.refCharRange(crLeft, crRight, label, autoGen, lastInRule()); } | (t:TOKEN_REF{trLeft=t;}|u:STRING_LITERAL{trLeft=u;}) RANGE (v:TOKEN_REF{trRight=v;}|w:STRING_LITERAL{trRight=w;}) autoGen = ast_type_spec { behavior.refTokenRange(trLeft, trRight, label, autoGen, lastInRule()); } ; terminal [ Token label ] { int autoGen=GrammarElement.AUTO_GEN_NONE; Token args=null; } : cl:CHAR_LITERAL ( BANG { autoGen = GrammarElement.AUTO_GEN_BANG; } )? {behavior.refCharLiteral(cl, label, false, autoGen, lastInRule());} | tr:TOKEN_REF autoGen = ast_type_spec // Args are only valid for lexer ( aa:ARG_ACTION { args=aa; } )? { behavior.refToken(null, tr, label, args, false, autoGen, lastInRule()); } | sl:STRING_LITERAL autoGen = ast_type_spec {behavior.refStringLiteral(sl, label, autoGen, lastInRule());} | wi:WILDCARD autoGen = ast_type_spec {behavior.refWildcard(wi, label, autoGen);} ; notTerminal [ Token label ] { int autoGen=GrammarElement.AUTO_GEN_NONE; } : cl:CHAR_LITERAL ( BANG { autoGen = GrammarElement.AUTO_GEN_BANG; } )? {behavior.refCharLiteral(cl, label, true, autoGen, lastInRule());} | tr:TOKEN_REF autoGen = ast_type_spec {behavior.refToken(null, tr, label, null, true, autoGen, lastInRule());} ; /** Match a.b.c.d qualified ids; WILDCARD here is overloaded as * id separator; that is, I need a reference to the '.' token. */ qualifiedID returns [Token qidTok=null] { StringBuffer buf = new StringBuffer(30); Token a; } : a=id {buf.append(a.getText());} ( WILDCARD a=id {buf.append('.'); buf.append(a.getText());} )* { // can use either TOKEN_REF or RULE_REF; should // really create a QID or something instead. qidTok = new CommonToken(TOKEN_REF, buf.toString()); qidTok.setLine(a.getLine()); } ; id returns [ Token idTok ] { idTok = null; } : a:TOKEN_REF {idTok = a;} | b:RULE_REF {idTok = b;} ; class ANTLRLexer extends Lexer; options { k=2; exportVocab=ANTLR; testLiterals=false; interactive=true; charVocabulary='\003'..'\377'; } tokens { "options"; } { /**Convert 'c' to an integer char value. */ public static int escapeCharValue(String cs) { //System.out.println("escapeCharValue("+cs+")"); if ( cs.charAt(1)!='\\' ) return 0; switch ( cs.charAt(2) ) { case 'b' : return '\b'; case 'r' : return '\r'; case 't' : return '\t'; case 'n' : return '\n'; case 'f' : return '\f'; case '"' : return '\"'; case '\'' :return '\''; case '\\' :return '\\'; case 'u' : // Unicode char if (cs.length() != 8) { return 0; } else { return Character.digit(cs.charAt(3), 16) * 16 * 16 * 16 + Character.digit(cs.charAt(4), 16) * 16 * 16 + Character.digit(cs.charAt(5), 16) * 16 + Character.digit(cs.charAt(6), 16); } case '0' : case '1' : case '2' : case '3' : if ( cs.length()>5 && Character.isDigit(cs.charAt(4)) ) { return (cs.charAt(2)-'0')*8*8 + (cs.charAt(3)-'0')*8 + (cs.charAt(4)-'0'); } if ( cs.length()>4 && Character.isDigit(cs.charAt(3)) ) { return (cs.charAt(2)-'0')*8 + (cs.charAt(3)-'0'); } return cs.charAt(2)-'0'; case '4' : case '5' : case '6' : case '7' : if ( cs.length()>4 && Character.isDigit(cs.charAt(3)) ) { return (cs.charAt(2)-'0')*8 + (cs.charAt(3)-'0'); } return cs.charAt(2)-'0'; default : return 0; } } public static int tokenTypeForCharLiteral(String lit) { if ( lit.length()>3 ) { // does char contain escape? return escapeCharValue(lit); } else { return lit.charAt(1); } } } WS : ( /* '\r' '\n' can be matched in one alternative or by matching '\r' in one iteration and '\n' in another. I am trying to handle any flavor of newline that comes in, but the language that allows both "\r\n" and "\r" and "\n" to all be valid newline is ambiguous. Consequently, the resulting grammar must be ambiguous. I'm shutting this warning off. */ options { generateAmbigWarnings=false; } : ' ' | '\t' | '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} ) { $setType(Token.SKIP); } ; COMMENT : ( SL_COMMENT | t:ML_COMMENT {$setType(t.getType());} ) {if ( _ttype != DOC_COMMENT ) $setType(Token.SKIP);} ; protected SL_COMMENT : "//" ( ~('\n'|'\r') )* ( /* '\r' '\n' can be matched in one alternative or by matching '\r' and then in the next token. The language that allows both "\r\n" and "\r" and "\n" to all be valid newline is ambiguous. Consequently, the resulting grammar must be ambiguous. I'm shutting this warning off. */ options { generateAmbigWarnings=false; } : '\r' '\n' | '\r' | '\n' ) { newline(); } ; protected ML_COMMENT : "/*" ( { LA(2)!='/' }? '*' {$setType(DOC_COMMENT);} | ) ( /* '\r' '\n' can be matched in one alternative or by matching '\r' and then in the next token. The language that allows both "\r\n" and "\r" and "\n" to all be valid newline is ambiguous. Consequently, the resulting grammar must be ambiguous. I'm shutting this warning off. */ options { greedy=false; // make it exit upon "*/" generateAmbigWarnings=false; // shut off newline errors } : '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} | ~('\n'|'\r') )* "*/" ; OPEN_ELEMENT_OPTION : '<' ; CLOSE_ELEMENT_OPTION : '>' ; COMMA : ','; QUESTION : '?' ; TREE_BEGIN : "#(" ; LPAREN: '(' ; RPAREN: ')' ; COLON : ':' ; STAR: '*' ; PLUS: '+' ; ASSIGN : '=' ; IMPLIES : "=>" ; SEMI: ';' ; CARET : '^' ; BANG : '!' ; OR : '|' ; WILDCARD : '.' ; RANGE : ".." ; NOT_OP : '~' ; RCURLY: '}' ; CHAR_LITERAL : '\'' (ESC|~'\'') '\'' ; STRING_LITERAL : '"' (ESC|~'"')* '"' ; protected ESC : '\\' ( 'n' | 'r' | 't' | 'b' | 'f' | 'w' | 'a' | '"' | '\'' | '\\' | ('0'..'3') ( options { warnWhenFollowAmbig = false; }: '0'..'7' ( options { warnWhenFollowAmbig = false; }: '0'..'7' )? )? | ('4'..'7') ( options { warnWhenFollowAmbig = false; }: '0'..'7' )? | 'u' XDIGIT XDIGIT XDIGIT XDIGIT ) ; protected DIGIT : '0'..'9' ; protected XDIGIT : '0' .. '9' | 'a' .. 'f' | 'A' .. 'F' ; INT : ('0'..'9')+ ; ARG_ACTION : NESTED_ARG_ACTION { setText(StringUtils.stripFrontBack(getText(), "[", "]")); } ; protected NESTED_ARG_ACTION : '[' ( /* '\r' '\n' can be matched in one alternative or by matching '\r' and then '\n' in the next iteration. */ options { generateAmbigWarnings=false; // shut off newline errors } : NESTED_ARG_ACTION | '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} | CHAR_LITERAL | STRING_LITERAL | ~']' )* ']' ; ACTION {int actionLine=getLine(); int actionColumn = getColumn(); } : NESTED_ACTION ( '?' {_ttype = SEMPRED;} )? { if ( _ttype==ACTION ) { setText(StringUtils.stripFrontBack(getText(), "{", "}")); } else { setText(StringUtils.stripFrontBack(getText(), "{", "}?")); } CommonToken t = new CommonToken(_ttype,$getText); t.setLine(actionLine); // set action line to start t.setColumn(actionColumn); $setToken(t); } ; protected NESTED_ACTION : '{' ( options { greedy = false; // exit upon '}' } : ( options { generateAmbigWarnings = false; // shut off newline warning } : '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} ) | NESTED_ACTION | CHAR_LITERAL | COMMENT | STRING_LITERAL | . )* '}' ; TOKEN_REF options { testLiterals = true; } : 'A'..'Z' ( // scarf as many letters/numbers as you can options { warnWhenFollowAmbig=false; } : 'a'..'z'|'A'..'Z'|'_'|'0'..'9' )* ; // we get a warning here when looking for options '{', but it works right RULE_REF { int t=0; } : t=INTERNAL_RULE_REF {_ttype=t;} ( {t==LITERAL_options}? WS_LOOP ('{' {_ttype = OPTIONS;})? | {t==LITERAL_tokens}? WS_LOOP ('{' {_ttype = TOKENS;})? | ) ; protected WS_LOOP : ( // grab as much WS as you can options { greedy=true; } : WS | COMMENT )* ; protected INTERNAL_RULE_REF returns [int t] { t = RULE_REF; } : 'a'..'z' ( // scarf as many letters/numbers as you can options { warnWhenFollowAmbig=false; } : 'a'..'z'|'A'..'Z'|'_'|'0'..'9' )* {t = testLiteralsTable(t);} ; protected WS_OPT : (WS)? ; nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/antlr.javaproj000066400000000000000000000663331161462365500240070ustar00rootroot00000000000000 {88CC221B-28AB-4AF9-90A3-A021B613DC35} COMPILE ActionElement.java COMPILE actions\cpp\ActionLexer.java COMPILE actions\cpp\ActionLexerTokenTypes.java COMPILE actions\csharp\ActionLexer.java COMPILE actions\csharp\ActionLexerTokenTypes.java COMPILE actions\java\ActionLexer.java COMPILE actions\java\ActionLexerTokenTypes.java COMPILE actions\python\ActionLexer.java COMPILE actions\python\ActionLexerTokenTypes.java COMPILE actions\python\CodeLexer.java COMPILE actions\python\CodeLexerTokenTypes.java COMPILE ActionTransInfo.java COMPILE Alternative.java COMPILE AlternativeBlock.java COMPILE AlternativeElement.java COMPILE ANTLRError.java COMPILE ANTLRException.java COMPILE ANTLRGrammarParseBehavior.java COMPILE ANTLRHashString.java COMPILE ANTLRLexer.java COMPILE ANTLRParser.java COMPILE ANTLRStringBuffer.java COMPILE ANTLRTokdefLexer.java COMPILE ANTLRTokdefParser.java COMPILE ANTLRTokdefParserTokenTypes.java COMPILE ANTLRTokenTypes.java COMPILE ASTFactory.java COMPILE ASTIterator.java COMPILE ASTNULLType.java COMPILE ASTPair.java COMPILE ASTVisitor.java COMPILE BaseAST.java COMPILE BlockContext.java COMPILE BlockEndElement.java COMPILE BlockWithImpliedExitPath.java COMPILE build\ANTLR.java COMPILE build\StreamScarfer.java COMPILE build\Tool.java COMPILE ByteBuffer.java COMPILE CharBuffer.java COMPILE CharFormatter.java COMPILE CharLiteralElement.java COMPILE CharQueue.java COMPILE CharRangeElement.java COMPILE CharScanner.java COMPILE CharStreamException.java COMPILE CharStreamIOException.java COMPILE CodeGenerator.java COMPILE collections\AST.java COMPILE collections\ASTEnumeration.java COMPILE collections\Enumerator.java COMPILE collections\impl\ASTArray.java COMPILE collections\impl\ASTEnumerator.java COMPILE collections\impl\BitSet.java COMPILE collections\impl\IndexedVector.java COMPILE collections\impl\IntRange.java COMPILE collections\impl\LLCell.java COMPILE collections\impl\LLEnumeration.java COMPILE collections\impl\LList.java COMPILE collections\impl\Vector.java COMPILE collections\impl\VectorEnumeration.java COMPILE collections\impl\VectorEnumerator.java COMPILE collections\List.java COMPILE collections\Stack.java COMPILE CommonAST.java COMPILE CommonASTWithHiddenTokens.java COMPILE CommonHiddenStreamToken.java COMPILE CommonToken.java COMPILE CppBlockFinishingInfo.java COMPILE CppCharFormatter.java COMPILE CppCodeGenerator.java COMPILE CSharpBlockFinishingInfo.java COMPILE CSharpCharFormatter.java COMPILE CSharpCodeGenerator.java COMPILE CSharpNameSpace.java COMPILE debug\DebuggingCharScanner.java COMPILE debug\DebuggingInputBuffer.java COMPILE debug\DebuggingParser.java COMPILE debug\Event.java COMPILE debug\GuessingEvent.java COMPILE debug\InputBufferAdapter.java COMPILE debug\InputBufferEvent.java COMPILE debug\InputBufferEventSupport.java COMPILE debug\InputBufferListener.java COMPILE debug\InputBufferReporter.java COMPILE debug\ListenerBase.java COMPILE debug\LLkDebuggingParser.java COMPILE debug\MessageAdapter.java COMPILE debug\MessageEvent.java COMPILE debug\MessageListener.java COMPILE debug\misc\ASTFrame.java COMPILE debug\misc\JTreeASTModel.java COMPILE debug\misc\JTreeASTPanel.java COMPILE debug\NewLineEvent.java COMPILE debug\NewLineListener.java COMPILE debug\ParserAdapter.java COMPILE debug\ParserController.java COMPILE debug\ParserEventSupport.java COMPILE debug\ParserListener.java COMPILE debug\ParserMatchAdapter.java COMPILE debug\ParserMatchEvent.java COMPILE debug\ParserMatchListener.java COMPILE debug\ParserReporter.java COMPILE debug\ParserTokenAdapter.java COMPILE debug\ParserTokenEvent.java COMPILE debug\ParserTokenListener.java COMPILE debug\ParseTreeDebugParser.java COMPILE debug\SemanticPredicateAdapter.java COMPILE debug\SemanticPredicateEvent.java COMPILE debug\SemanticPredicateListener.java COMPILE debug\SyntacticPredicateAdapter.java COMPILE debug\SyntacticPredicateEvent.java COMPILE debug\SyntacticPredicateListener.java COMPILE debug\TraceAdapter.java COMPILE debug\TraceEvent.java COMPILE debug\TraceListener.java COMPILE debug\Tracer.java COMPILE DefaultFileLineFormatter.java COMPILE DefaultToolErrorHandler.java COMPILE DefineGrammarSymbols.java COMPILE DiagnosticCodeGenerator.java COMPILE DocBookCodeGenerator.java COMPILE DumpASTVisitor.java COMPILE ExceptionHandler.java COMPILE ExceptionSpec.java COMPILE FileCopyException.java COMPILE FileLineFormatter.java COMPILE Grammar.java COMPILE GrammarAnalyzer.java COMPILE GrammarAtom.java COMPILE GrammarElement.java COMPILE GrammarSymbol.java COMPILE HTMLCodeGenerator.java COMPILE ImportVocabTokenManager.java COMPILE InputBuffer.java COMPILE JavaBlockFinishingInfo.java COMPILE JavaCharFormatter.java COMPILE JavaCodeGenerator.java COMPILE LexerGrammar.java COMPILE LexerSharedInputState.java COMPILE LLkAnalyzer.java COMPILE LLkGrammarAnalyzer.java COMPILE LLkParser.java COMPILE Lookahead.java COMPILE MakeGrammar.java COMPILE MismatchedCharException.java COMPILE MismatchedTokenException.java COMPILE NameSpace.java COMPILE NoViableAltException.java COMPILE NoViableAltForCharException.java COMPILE OneOrMoreBlock.java COMPILE Parser.java COMPILE ParserGrammar.java COMPILE ParserSharedInputState.java COMPILE ParseTree.java COMPILE ParseTreeRule.java COMPILE ParseTreeToken.java COMPILE preprocessor\Grammar.java COMPILE preprocessor\GrammarFile.java COMPILE preprocessor\Hierarchy.java COMPILE preprocessor\Option.java COMPILE preprocessor\Preprocessor.java COMPILE preprocessor\PreprocessorLexer.java COMPILE preprocessor\PreprocessorTokenTypes.java COMPILE preprocessor\Rule.java COMPILE preprocessor\Tool.java COMPILE PreservingFileWriter.java COMPILE PythonBlockFinishingInfo.java COMPILE PythonCharFormatter.java COMPILE PythonCodeGenerator.java COMPILE RecognitionException.java COMPILE RuleBlock.java COMPILE RuleEndElement.java COMPILE RuleRefElement.java COMPILE RuleSymbol.java COMPILE SemanticException.java COMPILE SimpleTokenManager.java COMPILE StringLiteralElement.java COMPILE StringLiteralSymbol.java COMPILE StringUtils.java COMPILE SynPredBlock.java COMPILE Token.java COMPILE TokenBuffer.java COMPILE TokenManager.java COMPILE TokenQueue.java COMPILE TokenRangeElement.java COMPILE TokenRefElement.java COMPILE TokenStream.java COMPILE TokenStreamBasicFilter.java COMPILE TokenStreamException.java COMPILE TokenStreamHiddenTokenFilter.java COMPILE TokenStreamIOException.java COMPILE TokenStreamRecognitionException.java COMPILE TokenStreamRetryException.java COMPILE TokenStreamRewriteEngine.java COMPILE TokenStreamSelector.java COMPILE TokenSymbol.java COMPILE TokenWithIndex.java COMPILE Tool.java COMPILE ToolErrorHandler.java COMPILE TreeBlockContext.java COMPILE TreeElement.java COMPILE TreeParser.java COMPILE TreeParserSharedInputState.java COMPILE TreeSpecifierNode.java COMPILE TreeWalkerGrammar.java COMPILE Version.java COMPILE WildcardElement.java COMPILE ZeroOrMoreBlock.java antlr 1.5 nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/antlr.sln000066400000000000000000000015271161462365500227610ustar00rootroot00000000000000Microsoft Visual Studio Solution File, Format Version 9.00 Project("{85B0C3CB-4F8C-465B-A944-62ABB0F7F898}") = "antlr", "antlr.javaproj", "{88CC221B-28AB-4AF9-90A3-A021B613DC35}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {88CC221B-28AB-4AF9-90A3-A021B613DC35}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {88CC221B-28AB-4AF9-90A3-A021B613DC35}.Debug|Any CPU.Build.0 = Debug|Any CPU {88CC221B-28AB-4AF9-90A3-A021B613DC35}.Release|Any CPU.ActiveCfg = Release|Any CPU {88CC221B-28AB-4AF9-90A3-A021B613DC35}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/build/000077500000000000000000000000001161462365500222155ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/build/ANTLR.java000066400000000000000000000063711161462365500237470ustar00rootroot00000000000000package antlr.build; import java.io.*; /** Simple class that uses build.Tool to compile ANTLR's Java stuff */ public class ANTLR { public static String compiler = "javac"; public static String jarName = "antlr.jar"; public static String root = "."; public static String[] srcdir = { "antlr", "antlr/actions/cpp", "antlr/actions/java", "antlr/actions/csharp", "antlr/collections", "antlr/collections/impl", "antlr/debug", "antlr/debug/misc", "antlr/preprocessor" }; public ANTLR() { compiler = System.getProperty("antlr.build.compiler", compiler); root = System.getProperty("antlr.build.root", root); } public String getName() { return "ANTLR"; } /** Build ANTLR. action on cmd-line matches method name */ public void build(Tool tool) { if ( !rootIsValidANTLRDir(tool) ) { return; } // run ANTLR on its own .g files tool.antlr(root+"/antlr/antlr.g"); tool.antlr(root+"/antlr/tokdef.g"); tool.antlr(root+"/antlr/preprocessor/preproc.g"); tool.antlr(root+"/antlr/actions/java/action.g"); tool.antlr(root+"/antlr/actions/cpp/action.g"); tool.antlr(root+"/antlr/actions/csharp/action.g"); for (int i=0; iMageLang Institute */ public interface Stack { public int height(); public Object pop() throws NoSuchElementException; public void push(Object o); public Object top() throws NoSuchElementException; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/000077500000000000000000000000001161462365500243755ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/ASTArray.java000066400000000000000000000013321161462365500266650ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/ASTArray.java#1 $ */ import antlr.collections.AST; /** ASTArray is a class that allows ANTLR to * generate code that can create and initialize an array * in one expression, like: * (new ASTArray(3)).add(x).add(y).add(z) */ public class ASTArray { public int size = 0; public AST[] array; public ASTArray(int capacity) { array = new AST[capacity]; } public ASTArray add(AST node) { array[size++] = node; return this; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/ASTEnumerator.java000066400000000000000000000021111161462365500277240ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/ASTEnumerator.java#1 $ */ import antlr.collections.impl.Vector; import antlr.collections.ASTEnumeration; import antlr.collections.AST; import java.util.NoSuchElementException; public class ASTEnumerator implements antlr.collections.ASTEnumeration { /** The list of root nodes for subtrees that match */ VectorEnumerator nodes; int i = 0; public ASTEnumerator(Vector v) { nodes = new VectorEnumerator(v); } public boolean hasMoreNodes() { synchronized (nodes) { return i <= nodes.vector.lastElement; } } public antlr.collections.AST nextNode() { synchronized (nodes) { if (i <= nodes.vector.lastElement) { return (AST)nodes.vector.data[i++]; } throw new NoSuchElementException("ASTEnumerator"); } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/BitSet.java000066400000000000000000000344521161462365500264420ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/BitSet.java#1 $ */ import antlr.CharFormatter; /**A BitSet to replace java.util.BitSet. * Primary differences are that most set operators return new sets * as opposed to oring and anding "in place". Further, a number of * operations were added. I cannot contain a BitSet because there * is no way to access the internal bits (which I need for speed) * and, because it is final, I cannot subclass to add functionality. * Consider defining set degree. Without access to the bits, I must * call a method n times to test the ith bit...ack! * * Also seems like or() from util is wrong when size of incoming set is bigger * than this.bits.length. * * @author Terence Parr * @author
Pete Wells */ public class BitSet implements Cloneable { protected final static int BITS = 64; // number of bits / long protected final static int NIBBLE = 4; protected final static int LOG_BITS = 6; // 2^6 == 64 /* We will often need to do a mod operator (i mod nbits). Its * turns out that, for powers of two, this mod operation is * same as (i & (nbits-1)). Since mod is slow, we use a * precomputed mod mask to do the mod instead. */ protected final static int MOD_MASK = BITS - 1; /** The actual data bits */ protected long bits[]; /** Construct a bitset of size one word (64 bits) */ public BitSet() { this(BITS); } /** Construction from a static array of longs */ public BitSet(long[] bits_) { bits = bits_; } /** Construct a bitset given the size * @param nbits The size of the bitset in bits */ public BitSet(int nbits) { bits = new long[((nbits - 1) >> LOG_BITS) + 1]; } /** or this element into this set (grow as necessary to accommodate) */ public void add(int el) { //System.out.println("add("+el+")"); int n = wordNumber(el); //System.out.println("word number is "+n); //System.out.println("bits.length "+bits.length); if (n >= bits.length) { growToInclude(el); } bits[n] |= bitMask(el); } public BitSet and(BitSet a) { BitSet s = (BitSet)this.clone(); s.andInPlace(a); return s; } public void andInPlace(BitSet a) { int min = Math.min(bits.length, a.bits.length); for (int i = min - 1; i >= 0; i--) { bits[i] &= a.bits[i]; } // clear all bits in this not present in a (if this bigger than a). for (int i = min; i < bits.length; i++) { bits[i] = 0; } } private final static long bitMask(int bitNumber) { int bitPosition = bitNumber & MOD_MASK; // bitNumber mod BITS return 1L << bitPosition; } public void clear() { for (int i = bits.length - 1; i >= 0; i--) { bits[i] = 0; } } public void clear(int el) { int n = wordNumber(el); if (n >= bits.length) { // grow as necessary to accommodate growToInclude(el); } bits[n] &= ~bitMask(el); } public Object clone() { BitSet s; try { s = (BitSet)super.clone(); s.bits = new long[bits.length]; System.arraycopy(bits, 0, s.bits, 0, bits.length); } catch (CloneNotSupportedException e) { throw new InternalError(); } return s; } public int degree() { int deg = 0; for (int i = bits.length - 1; i >= 0; i--) { long word = bits[i]; if (word != 0L) { for (int bit = BITS - 1; bit >= 0; bit--) { if ((word & (1L << bit)) != 0) { deg++; } } } } return deg; } /** code "inherited" from java.util.BitSet */ public boolean equals(Object obj) { if ((obj != null) && (obj instanceof BitSet)) { BitSet set = (BitSet)obj; int n = Math.min(bits.length, set.bits.length); for (int i = n; i-- > 0;) { if (bits[i] != set.bits[i]) { return false; } } if (bits.length > n) { for (int i = bits.length; i-- > n;) { if (bits[i] != 0) { return false; } } } else if (set.bits.length > n) { for (int i = set.bits.length; i-- > n;) { if (set.bits[i] != 0) { return false; } } } return true; } return false; } /** Find ranges in a set element array. @param elems The array of * elements representing the set, usually from Bit Set.toArray(). * @return Vector of ranges. */ public static Vector getRanges(int[] elems) { if (elems.length == 0) { return null; } int begin = elems[0]; int end = elems[elems.length - 1]; if (elems.length <= 2) { // Not enough elements for a range expression return null; } Vector ranges = new Vector(5); // look for ranges for (int i = 0; i < elems.length - 2; i++) { int lastInRange; lastInRange = elems.length - 1; for (int j = i + 1; j < elems.length; j++) { if (elems[j] != elems[j - 1] + 1) { lastInRange = j - 1; break; } } // found a range if (lastInRange - i > 2) { ranges.appendElement(new IntRange(elems[i], elems[lastInRange])); } } return ranges; } /** * Grows the set to a larger number of bits. * @param bit element that must fit in set */ public void growToInclude(int bit) { int newSize = Math.max(bits.length << 1, numWordsToHold(bit)); long newbits[] = new long[newSize]; System.arraycopy(bits, 0, newbits, 0, bits.length); bits = newbits; } public boolean member(int el) { int n = wordNumber(el); if (n >= bits.length) return false; return (bits[n] & bitMask(el)) != 0; } public boolean nil() { for (int i = bits.length - 1; i >= 0; i--) { if (bits[i] != 0) return false; } return true; } public BitSet not() { BitSet s = (BitSet)this.clone(); s.notInPlace(); return s; } public void notInPlace() { for (int i = bits.length - 1; i >= 0; i--) { bits[i] = ~bits[i]; } } /** complement bits in the range 0..maxBit. */ public void notInPlace(int maxBit) { notInPlace(0, maxBit); } /** complement bits in the range minBit..maxBit.*/ public void notInPlace(int minBit, int maxBit) { // make sure that we have room for maxBit growToInclude(maxBit); for (int i = minBit; i <= maxBit; i++) { int n = wordNumber(i); bits[n] ^= bitMask(i); } } private final int numWordsToHold(int el) { return (el >> LOG_BITS) + 1; } public static BitSet of(int el) { BitSet s = new BitSet(el + 1); s.add(el); return s; } /** return this | a in a new set */ public BitSet or(BitSet a) { BitSet s = (BitSet)this.clone(); s.orInPlace(a); return s; } public void orInPlace(BitSet a) { // If this is smaller than a, grow this first if (a.bits.length > bits.length) { setSize(a.bits.length); } int min = Math.min(bits.length, a.bits.length); for (int i = min - 1; i >= 0; i--) { bits[i] |= a.bits[i]; } } // remove this element from this set public void remove(int el) { int n = wordNumber(el); if (n >= bits.length) { growToInclude(el); } bits[n] &= ~bitMask(el); } /** * Sets the size of a set. * @param nwords how many words the new set should be */ private void setSize(int nwords) { long newbits[] = new long[nwords]; int n = Math.min(nwords, bits.length); System.arraycopy(bits, 0, newbits, 0, n); bits = newbits; } public int size() { return bits.length << LOG_BITS; // num words * bits per word } /** return how much space is being used by the bits array not * how many actually have member bits on. */ public int lengthInLongWords() { return bits.length; } /**Is this contained within a? */ public boolean subset(BitSet a) { if (a == null || !(a instanceof BitSet)) return false; return this.and(a).equals(this); } /**Subtract the elements of 'a' from 'this' in-place. * Basically, just turn off all bits of 'this' that are in 'a'. */ public void subtractInPlace(BitSet a) { if (a == null) return; // for all words of 'a', turn off corresponding bits of 'this' for (int i = 0; i < bits.length && i < a.bits.length; i++) { bits[i] &= ~a.bits[i]; } } public int[] toArray() { int[] elems = new int[degree()]; int en = 0; for (int i = 0; i < (bits.length << LOG_BITS); i++) { if (member(i)) { elems[en++] = i; } } return elems; } public long[] toPackedArray() { return bits; } public String toString() { return toString(","); } /** Transform a bit set into a string by formatting each element as an integer * @separator The string to put in between elements * @return A commma-separated list of values */ public String toString(String separator) { String str = ""; for (int i = 0; i < (bits.length << LOG_BITS); i++) { if (member(i)) { if (str.length() > 0) { str += separator; } str = str + i; } } return str; } /** Transform a bit set into a string of characters. * @separator The string to put in between elements * @param formatter An object implementing the CharFormatter interface. * @return A commma-separated list of character constants. */ public String toString(String separator, CharFormatter formatter) { String str = ""; for (int i = 0; i < (bits.length << LOG_BITS); i++) { if (member(i)) { if (str.length() > 0) { str += separator; } str = str + formatter.literalChar(i); } } return str; } /**Create a string representation where instead of integer elements, the * ith element of vocabulary is displayed instead. Vocabulary is a Vector * of Strings. * @separator The string to put in between elements * @return A commma-separated list of character constants. */ public String toString(String separator, Vector vocabulary) { if (vocabulary == null) { return toString(separator); } String str = ""; for (int i = 0; i < (bits.length << LOG_BITS); i++) { if (member(i)) { if (str.length() > 0) { str += separator; } if (i >= vocabulary.size()) { str += ""; } else if (vocabulary.elementAt(i) == null) { str += "<" + i + ">"; } else { str += (String)vocabulary.elementAt(i); } } } return str; } /** * Dump a comma-separated list of the words making up the bit set. * Split each 64 bit number into two more manageable 32 bit numbers. * This generates a comma-separated list of C++-like unsigned long constants. */ public String toStringOfHalfWords() { String s = new String(); for (int i = 0; i < bits.length; i++) { if (i != 0) s += ", "; long tmp = bits[i]; tmp &= 0xFFFFFFFFL; s += (tmp + "UL"); s += ", "; tmp = bits[i] >>> 32; tmp &= 0xFFFFFFFFL; s += (tmp + "UL"); } return s; } /** * Dump a comma-separated list of the words making up the bit set. * This generates a comma-separated list of Java-like long int constants. */ public String toStringOfWords() { String s = new String(); for (int i = 0; i < bits.length; i++) { if (i != 0) s += ", "; s += (bits[i] + "L"); } return s; } /** Print out the bit set but collapse char ranges. */ public String toStringWithRanges(String separator, CharFormatter formatter) { String str = ""; int[] elems = this.toArray(); if (elems.length == 0) { return ""; } // look for ranges int i = 0; while (i < elems.length) { int lastInRange; lastInRange = 0; for (int j = i + 1; j < elems.length; j++) { if (elems[j] != elems[j - 1] + 1) { break; } lastInRange = j; } // found a range if (str.length() > 0) { str += separator; } if (lastInRange - i >= 2) { str += formatter.literalChar(elems[i]); str += ".."; str += formatter.literalChar(elems[lastInRange]); i = lastInRange; // skip past end of range for next range } else { // no range, just print current char and move on str += formatter.literalChar(elems[i]); } i++; } return str; } private final static int wordNumber(int bit) { return bit >> LOG_BITS; // bit / BITS } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/IndexedVector.java000066400000000000000000000040051161462365500300020ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/IndexedVector.java#1 $ */ import java.util.Hashtable; import java.util.Enumeration; import antlr.collections.impl.Vector; /** * A simple indexed vector: a normal vector except that you must * specify a key when adding an element. This allows fast lookup * and allows the order of specification to be preserved. */ public class IndexedVector { protected Vector elements; protected Hashtable index; /** * IndexedVector constructor comment. */ public IndexedVector() { elements = new Vector(10); index = new Hashtable(10); } /** * IndexedVector constructor comment. * @param size int */ public IndexedVector(int size) { elements = new Vector(size); index = new Hashtable(size); } public synchronized void appendElement(Object key, Object value) { elements.appendElement(value); index.put(key, value); } /** * Returns the element at the specified index. * @param index the index of the desired element * @exception ArrayIndexOutOfBoundsException If an invalid * index was given. */ public Object elementAt(int i) { return elements.elementAt(i); } public Enumeration elements() { return elements.elements(); } public Object getElement(Object key) { Object o = index.get(key); return o; } /** remove element referred to by key NOT value; return false if not found. */ public synchronized boolean removeElement(Object key) { Object value = index.get(key); if (value == null) { return false; } index.remove(key); elements.removeElement(value); return false; } public int size() { return elements.size(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/IntRange.java000066400000000000000000000007551161462365500267560ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/IntRange.java#1 $ */ public class IntRange { int begin, end; public IntRange(int begin, int end) { this.begin = begin; this.end = end; } public String toString() { return begin + ".." + end; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/LLCell.java000066400000000000000000000011771161462365500263550ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/LLCell.java#1 $ */ /**A linked list cell, which contains a ref to the object and next cell. * The data,next members are public to this class, but not outside the * collections.impl package. * * @author Terence Parr * MageLang Institute */ class LLCell { Object data; LLCell next; public LLCell(Object o) { data = o; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/LLEnumeration.java000066400000000000000000000027551161462365500277670ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/LLEnumeration.java#1 $ */ import antlr.collections.List; import antlr.collections.Stack; import java.util.Enumeration; import java.util.NoSuchElementException; import antlr.collections.impl.LLCell; /**An enumeration of a LList. Maintains a cursor through the list. * bad things would happen if the list changed via another thread * while we were walking this list. */ final class LLEnumeration implements Enumeration { LLCell cursor; LList list; /**Create an enumeration attached to a LList*/ public LLEnumeration(LList l) { list = l; cursor = list.head; } /** Return true/false depending on whether there are more * elements to enumerate. */ public boolean hasMoreElements() { if (cursor != null) return true; else return false; } /**Get the next element in the enumeration. Destructive in that * the returned element is removed from the enumeration. This * does not affect the list itself. * @return the next object in the enumeration. */ public Object nextElement() { if (!hasMoreElements()) throw new NoSuchElementException(); LLCell p = cursor; cursor = cursor.next; return p.data; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/LList.java000066400000000000000000000071531161462365500262750ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/LList.java#1 $ */ import antlr.collections.List; import antlr.collections.Stack; import java.util.Enumeration; import java.util.NoSuchElementException; import antlr.collections.impl.LLCell; /**A Linked List Implementation (not thread-safe for simplicity) * (adds to the tail) (has an enumeration) */ public class LList implements List, Stack { protected LLCell head = null, tail = null; protected int length = 0; /** Add an object to the end of the list. * @param o the object to add */ public void add(Object o) { append(o); } /** Append an object to the end of the list. * @param o the object to append */ public void append(Object o) { LLCell n = new LLCell(o); if (length == 0) { head = tail = n; length = 1; } else { tail.next = n; tail = n; length++; } } /**Delete the object at the head of the list. * @return the object found at the head of the list. * @exception NoSuchElementException if the list is empty. */ protected Object deleteHead() throws NoSuchElementException { if (head == null) throw new NoSuchElementException(); Object o = head.data; head = head.next; length--; return o; } /**Get the ith element in the list. * @param i the index (from 0) of the requested element. * @return the object at index i * NoSuchElementException is thrown if i out of range */ public Object elementAt(int i) throws NoSuchElementException { int j = 0; for (LLCell p = head; p != null; p = p.next) { if (i == j) return p.data; j++; } throw new NoSuchElementException(); } /**Return an enumeration of the list elements */ public Enumeration elements() { return new LLEnumeration(this); } /** How high is the stack? */ public int height() { return length; } /** Answers whether or not an object is contained in the list * @param o the object to test for inclusion. * @return true if object is contained else false. */ public boolean includes(Object o) { for (LLCell p = head; p != null; p = p.next) { if (p.data.equals(o)) return true; } return false; } // The next two methods make LLQueues and LLStacks easier. /** Insert an object at the head of the list. * @param o the object to add */ protected void insertHead(Object o) { LLCell c = head; head = new LLCell(o); head.next = c; length++; if (tail == null) tail = head; } /**Return the length of the list.*/ public int length() { return length; } /** Pop the top element of the stack off. * @return the top of stack that was popped off. * @exception NoSuchElementException if the stack is empty. */ public Object pop() throws NoSuchElementException { Object o = deleteHead(); return o; } // Satisfy the Stack interface now. /** Push an object onto the stack. * @param o the object to push */ public void push(Object o) { insertHead(o); } public Object top() throws NoSuchElementException { if (head == null) throw new NoSuchElementException(); return head.data; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/Vector.java000066400000000000000000000063531161462365500265110ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/Vector.java#1 $ */ import java.util.Enumeration; import java.util.NoSuchElementException; import antlr.collections.Enumerator; public class Vector implements Cloneable { protected Object[] data; protected int lastElement = -1; public Vector() { this(10); } public Vector(int size) { data = new Object[size]; } public synchronized void appendElement(Object o) { ensureCapacity(lastElement + 2); data[++lastElement] = o; } /** * Returns the current capacity of the vector. */ public int capacity() { return data.length; } public Object clone() { Vector v = null; try { v = (Vector)super.clone(); } catch (CloneNotSupportedException e) { System.err.println("cannot clone Vector.super"); return null; } v.data = new Object[size()]; System.arraycopy(data, 0, v.data, 0, size()); return v; } /** * Returns the element at the specified index. * @param index the index of the desired element * @exception ArrayIndexOutOfBoundsException If an invalid * index was given. */ public synchronized Object elementAt(int i) { if (i >= data.length) { throw new ArrayIndexOutOfBoundsException(i + " >= " + data.length); } if (i < 0) { throw new ArrayIndexOutOfBoundsException(i + " < 0 "); } return data[i]; } public synchronized Enumeration elements() { return new VectorEnumerator(this); } public synchronized void ensureCapacity(int minIndex) { if (minIndex + 1 > data.length) { Object oldData[] = data; int n = data.length * 2; if (minIndex + 1 > n) { n = minIndex + 1; } data = new Object[n]; System.arraycopy(oldData, 0, data, 0, oldData.length); } } public synchronized boolean removeElement(Object o) { // find element int i; for (i = 0; i <= lastElement && data[i] != o; i++) { ; } if (i <= lastElement) { // if found it data[i] = null; // kill ref for GC int above = lastElement - i; if (above > 0) { System.arraycopy(data, i + 1, data, i, above); } lastElement--; return true; } else { return false; } } public synchronized void setElementAt(Object obj, int i) { if (i >= data.length) { throw new ArrayIndexOutOfBoundsException(i + " >= " + data.length); } data[i] = obj; // track last element in the vector so we can append things if (i > lastElement) { lastElement = i; } } // return number of slots in the vector; e.g., you can set // the 30th element and size() will return 31. public int size() { return lastElement + 1; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/VectorEnumeration.java000066400000000000000000000017571161462365500307230ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/VectorEnumeration.java#1 $ */ import java.util.Enumeration; import java.util.NoSuchElementException; import antlr.collections.Enumerator; // based on java.lang.Vector; returns any null indices between non-null ones. class VectorEnumeration implements Enumeration { Vector vector; int i; VectorEnumeration(Vector v) { vector = v; i = 0; } public boolean hasMoreElements() { synchronized (vector) { return i <= vector.lastElement; } } public Object nextElement() { synchronized (vector) { if (i <= vector.lastElement) { return vector.data[i++]; } throw new NoSuchElementException("VectorEnumerator"); } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/collections/impl/VectorEnumerator.java000066400000000000000000000017541161462365500305530ustar00rootroot00000000000000package antlr.collections.impl; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/collections/impl/VectorEnumerator.java#1 $ */ import java.util.Enumeration; import java.util.NoSuchElementException; import antlr.collections.Enumerator; // based on java.lang.Vector; returns any null indices between non-null ones. class VectorEnumerator implements Enumeration { Vector vector; int i; VectorEnumerator(Vector v) { vector = v; i = 0; } public boolean hasMoreElements() { synchronized (vector) { return i <= vector.lastElement; } } public Object nextElement() { synchronized (vector) { if (i <= vector.lastElement) { return vector.data[i++]; } throw new NoSuchElementException("VectorEnumerator"); } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/000077500000000000000000000000001161462365500222045ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/DebuggingCharScanner.java000066400000000000000000000162001161462365500270510ustar00rootroot00000000000000package antlr.debug; import antlr.*; import antlr.collections.*; import antlr.collections.impl.*; import java.io.*; public abstract class DebuggingCharScanner extends CharScanner implements DebuggingParser { private ParserEventSupport parserEventSupport = new ParserEventSupport(this); private boolean _notDebugMode = false; protected String ruleNames[]; protected String semPredNames[]; public DebuggingCharScanner(InputBuffer cb) { super(cb); } public DebuggingCharScanner(LexerSharedInputState state) { super(state); } public void addMessageListener(MessageListener l) { parserEventSupport.addMessageListener(l); } public void addNewLineListener(NewLineListener l) { parserEventSupport.addNewLineListener(l); } public void addParserListener(ParserListener l) { parserEventSupport.addParserListener(l); } public void addParserMatchListener(ParserMatchListener l) { parserEventSupport.addParserMatchListener(l); } public void addParserTokenListener(ParserTokenListener l) { parserEventSupport.addParserTokenListener(l); } public void addSemanticPredicateListener(SemanticPredicateListener l) { parserEventSupport.addSemanticPredicateListener(l); } public void addSyntacticPredicateListener(SyntacticPredicateListener l) { parserEventSupport.addSyntacticPredicateListener(l); } public void addTraceListener(TraceListener l) { parserEventSupport.addTraceListener(l); } public void consume() throws CharStreamException { int la_1 = -99; try {la_1 = LA(1);} catch (CharStreamException ignoreAnIOException) {} super.consume(); parserEventSupport.fireConsume(la_1); } protected void fireEnterRule(int num, int data) { if (isDebugMode()) parserEventSupport.fireEnterRule(num,inputState.guessing,data); } protected void fireExitRule(int num, int ttype) { if (isDebugMode()) parserEventSupport.fireExitRule(num,inputState.guessing, ttype); } protected boolean fireSemanticPredicateEvaluated(int type, int num, boolean condition) { if (isDebugMode()) return parserEventSupport.fireSemanticPredicateEvaluated(type,num,condition,inputState.guessing); else return condition; } protected void fireSyntacticPredicateFailed() { if (isDebugMode()) parserEventSupport.fireSyntacticPredicateFailed(inputState.guessing); } protected void fireSyntacticPredicateStarted() { if (isDebugMode()) parserEventSupport.fireSyntacticPredicateStarted(inputState.guessing); } protected void fireSyntacticPredicateSucceeded() { if (isDebugMode()) parserEventSupport.fireSyntacticPredicateSucceeded(inputState.guessing); } public String getRuleName(int num) { return ruleNames[num]; } public String getSemPredName(int num) { return semPredNames[num]; } public synchronized void goToSleep() { try {wait();} catch (InterruptedException e) { } } public boolean isDebugMode() { return !_notDebugMode; } public char LA(int i) throws CharStreamException { char la = super.LA(i); parserEventSupport.fireLA(i, la); return la; } protected Token makeToken(int t) { // do something with char buffer??? // try { // Token tok = (Token)tokenObjectClass.newInstance(); // tok.setType(t); // // tok.setText(getText()); done in generated lexer now // tok.setLine(line); // return tok; // } // catch (InstantiationException ie) { // panic("can't instantiate a Token"); // } // catch (IllegalAccessException iae) { // panic("Token class is not accessible"); // } return super.makeToken(t); } public void match(char c) throws MismatchedCharException, CharStreamException { char la_1 = LA(1); try { super.match(c); parserEventSupport.fireMatch(c, inputState.guessing); } catch (MismatchedCharException e) { if (inputState.guessing == 0) parserEventSupport.fireMismatch(la_1, c, inputState.guessing); throw e; } } public void match(BitSet b) throws MismatchedCharException, CharStreamException { String text = this.text.toString(); char la_1 = LA(1); try { super.match(b); parserEventSupport.fireMatch(la_1, b, text, inputState.guessing); } catch (MismatchedCharException e) { if (inputState.guessing == 0) parserEventSupport.fireMismatch(la_1, b, text, inputState.guessing); throw e; } } public void match(String s) throws MismatchedCharException, CharStreamException { StringBuffer la_s = new StringBuffer(""); int len = s.length(); // peek at the next len worth of characters try { for(int i = 1; i <= len; i++) { la_s.append(super.LA(i)); } } catch(Exception ignoreMe) {} try { super.match(s); parserEventSupport.fireMatch(s, inputState.guessing); } catch (MismatchedCharException e) { if (inputState.guessing == 0) parserEventSupport.fireMismatch(la_s.toString(), s, inputState.guessing); throw e; } } public void matchNot(char c) throws MismatchedCharException, CharStreamException { char la_1 = LA(1); try { super.matchNot(c); parserEventSupport.fireMatchNot(la_1, c, inputState.guessing); } catch (MismatchedCharException e) { if (inputState.guessing == 0) parserEventSupport.fireMismatchNot(la_1, c, inputState.guessing); throw e; } } public void matchRange(char c1, char c2) throws MismatchedCharException, CharStreamException { char la_1 = LA(1); try { super.matchRange(c1,c2); parserEventSupport.fireMatch(la_1, ""+c1+c2, inputState.guessing); } catch (MismatchedCharException e) { if (inputState.guessing == 0) parserEventSupport.fireMismatch(la_1, ""+c1+c2, inputState.guessing); throw e; } } public void newline() { super.newline(); parserEventSupport.fireNewLine(getLine()); } public void removeMessageListener(MessageListener l) { parserEventSupport.removeMessageListener(l); } public void removeNewLineListener(NewLineListener l) { parserEventSupport.removeNewLineListener(l); } public void removeParserListener(ParserListener l) { parserEventSupport.removeParserListener(l); } public void removeParserMatchListener(ParserMatchListener l) { parserEventSupport.removeParserMatchListener(l); } public void removeParserTokenListener(ParserTokenListener l) { parserEventSupport.removeParserTokenListener(l); } public void removeSemanticPredicateListener(SemanticPredicateListener l) { parserEventSupport.removeSemanticPredicateListener(l); } public void removeSyntacticPredicateListener(SyntacticPredicateListener l) { parserEventSupport.removeSyntacticPredicateListener(l); } public void removeTraceListener(TraceListener l) { parserEventSupport.removeTraceListener(l); } /** Report exception errors caught in nextToken() */ public void reportError(MismatchedCharException e) { parserEventSupport.fireReportError(e); super.reportError(e); } /** Parser error-reporting function can be overridden in subclass */ public void reportError(String s) { parserEventSupport.fireReportError(s); super.reportError(s); } /** Parser warning-reporting function can be overridden in subclass */ public void reportWarning(String s) { parserEventSupport.fireReportWarning(s); super.reportWarning(s); } public void setDebugMode(boolean value) { _notDebugMode = !value; } public void setupDebugging() { } public synchronized void wakeUp() { notify(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/DebuggingInputBuffer.java000066400000000000000000000032021161462365500271110ustar00rootroot00000000000000package antlr.debug; import antlr.InputBuffer; import antlr.CharStreamException; import java.util.Vector; import java.io.IOException; public class DebuggingInputBuffer extends InputBuffer { private InputBuffer buffer; private InputBufferEventSupport inputBufferEventSupport; private boolean debugMode = true; public DebuggingInputBuffer(InputBuffer buffer) { this.buffer = buffer; inputBufferEventSupport = new InputBufferEventSupport(this); } public void addInputBufferListener(InputBufferListener l) { inputBufferEventSupport.addInputBufferListener(l); } public void consume() { char la = ' '; try {la = buffer.LA(1);} catch (CharStreamException e) {} // vaporize it... buffer.consume(); if (debugMode) inputBufferEventSupport.fireConsume(la); } public void fill(int a) throws CharStreamException { buffer.fill(a); } public Vector getInputBufferListeners() { return inputBufferEventSupport.getInputBufferListeners(); } public boolean isDebugMode() { return debugMode; } public boolean isMarked() { return buffer.isMarked(); } public char LA(int i) throws CharStreamException { char la = buffer.LA(i); if (debugMode) inputBufferEventSupport.fireLA(la,i); return la; } public int mark() { int m = buffer.mark(); inputBufferEventSupport.fireMark(m); return m; } public void removeInputBufferListener(InputBufferListener l) { if (inputBufferEventSupport != null) inputBufferEventSupport.removeInputBufferListener(l); } public void rewind(int mark) { buffer.rewind(mark); inputBufferEventSupport.fireRewind(mark); } public void setDebugMode(boolean value) { debugMode = value; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/DebuggingParser.java000066400000000000000000000002651161462365500261220ustar00rootroot00000000000000package antlr.debug; /** * This type was created in VisualAge. */ public interface DebuggingParser { public String getRuleName(int n); public String getSemPredName(int n); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/Event.java000066400000000000000000000007271161462365500241360ustar00rootroot00000000000000package antlr.debug; import java.util.EventObject; public abstract class Event extends EventObject { private int type; public Event(Object source) { super(source); } public Event(Object source, int type) { super(source); setType(type); } public int getType() { return type; } void setType(int type) { this.type = type; } /** This should NOT be called from anyone other than ParserEventSupport! */ void setValues(int type) { setType(type); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/GuessingEvent.java000066400000000000000000000010121161462365500256270ustar00rootroot00000000000000package antlr.debug; public abstract class GuessingEvent extends Event { private int guessing; public GuessingEvent(Object source) { super(source); } public GuessingEvent(Object source, int type) { super(source, type); } public int getGuessing() { return guessing; } void setGuessing(int guessing) { this.guessing = guessing; } /** This should NOT be called from anyone other than ParserEventSupport! */ void setValues(int type, int guessing) { super.setValues(type); setGuessing(guessing); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/InputBufferAdapter.java000066400000000000000000000011221161462365500265750ustar00rootroot00000000000000package antlr.debug; /** A dummy implementation of a CharBufferListener -- this class is not * meant to be used by itself -- it's meant to be subclassed */ public abstract class InputBufferAdapter implements InputBufferListener { public void doneParsing(TraceEvent e) { } /** * charConsumed method comment. */ public void inputBufferConsume(InputBufferEvent e) { } /** * charLA method comment. */ public void inputBufferLA(InputBufferEvent e) { } public void inputBufferMark(InputBufferEvent e) {} public void inputBufferRewind(InputBufferEvent e) {} public void refresh() { } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/InputBufferEvent.java000066400000000000000000000022401161462365500263000ustar00rootroot00000000000000package antlr.debug; public class InputBufferEvent extends Event { char c; int lookaheadAmount; // amount of lookahead public static final int CONSUME = 0; public static final int LA = 1; public static final int MARK = 2; public static final int REWIND = 3; /** * CharBufferEvent constructor comment. * @param source java.lang.Object */ public InputBufferEvent(Object source) { super(source); } /** * CharBufferEvent constructor comment. * @param source java.lang.Object */ public InputBufferEvent(Object source, int type, char c, int lookaheadAmount) { super(source); setValues(type, c, lookaheadAmount); } public char getChar() { return c; } public int getLookaheadAmount() { return lookaheadAmount; } void setChar(char c) { this.c = c; } void setLookaheadAmount(int la) { this.lookaheadAmount = la; } /** This should NOT be called from anyone other than ParserEventSupport! */ void setValues(int type, char c, int la) { super.setValues(type); setChar(c); setLookaheadAmount(la); } public String toString() { return "CharBufferEvent [" + (getType()==CONSUME?"CONSUME, ":"LA, ")+ getChar() + "," + getLookaheadAmount() + "]"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/InputBufferEventSupport.java000066400000000000000000000051561161462365500277060ustar00rootroot00000000000000package antlr.debug; import java.util.Vector; import antlr.collections.impl.BitSet; import antlr.RecognitionException; public class InputBufferEventSupport { private Object source; private Vector inputBufferListeners; private InputBufferEvent inputBufferEvent; protected static final int CONSUME=0; protected static final int LA=1; protected static final int MARK=2; protected static final int REWIND=3; public InputBufferEventSupport(Object source) { inputBufferEvent = new InputBufferEvent(source); this.source = source; } public void addInputBufferListener(InputBufferListener l) { if (inputBufferListeners == null) inputBufferListeners = new Vector(); inputBufferListeners.addElement(l); } public void fireConsume(char c) { inputBufferEvent.setValues(InputBufferEvent.CONSUME, c, 0); fireEvents(CONSUME, inputBufferListeners); } public void fireEvent(int type, ListenerBase l) { switch(type) { case CONSUME: ((InputBufferListener)l).inputBufferConsume(inputBufferEvent); break; case LA: ((InputBufferListener)l).inputBufferLA(inputBufferEvent); break; case MARK: ((InputBufferListener)l).inputBufferMark(inputBufferEvent); break; case REWIND: ((InputBufferListener)l).inputBufferRewind(inputBufferEvent); break; default: throw new IllegalArgumentException("bad type "+type+" for fireEvent()"); } } public void fireEvents(int type, Vector listeners) { Vector targets=null; ListenerBase l=null; synchronized (this) { if (listeners == null) return; targets = (Vector)listeners.clone(); } if (targets != null) for (int i = 0; i < targets.size(); i++) { l = (ListenerBase)targets.elementAt(i); fireEvent(type, l); } } public void fireLA(char c, int la) { inputBufferEvent.setValues(InputBufferEvent.LA, c, la); fireEvents(LA, inputBufferListeners); } public void fireMark(int pos) { inputBufferEvent.setValues(InputBufferEvent.MARK, ' ', pos); fireEvents(MARK, inputBufferListeners); } public void fireRewind(int pos) { inputBufferEvent.setValues(InputBufferEvent.REWIND, ' ', pos); fireEvents(REWIND, inputBufferListeners); } public Vector getInputBufferListeners() { return inputBufferListeners; } protected void refresh(Vector listeners) { Vector v; synchronized (listeners) { v = (Vector)listeners.clone(); } if (v != null) for (int i = 0; i < v.size(); i++) ((ListenerBase)v.elementAt(i)).refresh(); } public void refreshListeners() { refresh(inputBufferListeners); } public void removeInputBufferListener(InputBufferListener l) { if (inputBufferListeners != null) inputBufferListeners.removeElement(l); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/InputBufferListener.java000066400000000000000000000004411161462365500270050ustar00rootroot00000000000000package antlr.debug; public interface InputBufferListener extends ListenerBase { public void inputBufferConsume(InputBufferEvent e); public void inputBufferLA(InputBufferEvent e); public void inputBufferMark(InputBufferEvent e); public void inputBufferRewind(InputBufferEvent e); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/InputBufferReporter.java000066400000000000000000000013121161462365500270200ustar00rootroot00000000000000package antlr.debug; public class InputBufferReporter implements InputBufferListener { /** * doneParsing method comment. */ public void doneParsing(TraceEvent e) { } public void inputBufferChanged(InputBufferEvent e) { System.out.println(e); } /** * charBufferConsume method comment. */ public void inputBufferConsume(InputBufferEvent e) { System.out.println(e); } /** * charBufferLA method comment. */ public void inputBufferLA(InputBufferEvent e) { System.out.println(e); } public void inputBufferMark(InputBufferEvent e) { System.out.println(e); } public void inputBufferRewind(InputBufferEvent e) { System.out.println(e); } /** * refresh method comment. */ public void refresh() { } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/LLkDebuggingParser.java000066400000000000000000000170541161462365500265310ustar00rootroot00000000000000package antlr.debug; import antlr.ParserSharedInputState; import antlr.TokenStreamException; import antlr.LLkParser; import antlr.TokenBuffer; import antlr.TokenStream; import antlr.MismatchedTokenException; import antlr.RecognitionException; import antlr.collections.impl.BitSet; import java.io.IOException; import antlr.TokenStreamException; import antlr.debug.ParserEventSupport; import java.lang.reflect.Constructor; public class LLkDebuggingParser extends LLkParser implements DebuggingParser { protected ParserEventSupport parserEventSupport = new ParserEventSupport(this); private boolean _notDebugMode = false; protected String ruleNames[]; protected String semPredNames[]; public LLkDebuggingParser(int k_) { super(k_); } public LLkDebuggingParser(ParserSharedInputState state, int k_) { super(state, k_); } public LLkDebuggingParser(TokenBuffer tokenBuf, int k_) { super(tokenBuf, k_); } public LLkDebuggingParser(TokenStream lexer, int k_) { super(lexer, k_); } public void addMessageListener(MessageListener l) { parserEventSupport.addMessageListener(l); } public void addParserListener(ParserListener l) { parserEventSupport.addParserListener(l); } public void addParserMatchListener(ParserMatchListener l) { parserEventSupport.addParserMatchListener(l); } public void addParserTokenListener(ParserTokenListener l) { parserEventSupport.addParserTokenListener(l); } public void addSemanticPredicateListener(SemanticPredicateListener l) { parserEventSupport.addSemanticPredicateListener(l); } public void addSyntacticPredicateListener(SyntacticPredicateListener l) { parserEventSupport.addSyntacticPredicateListener(l); } public void addTraceListener(TraceListener l) { parserEventSupport.addTraceListener(l); } /**Get another token object from the token stream */ public void consume() throws TokenStreamException { int la_1 = -99; la_1 = LA(1); super.consume(); parserEventSupport.fireConsume(la_1); } protected void fireEnterRule(int num,int data) { if (isDebugMode()) parserEventSupport.fireEnterRule(num,inputState.guessing,data); } protected void fireExitRule(int num,int data) { if (isDebugMode()) parserEventSupport.fireExitRule(num,inputState.guessing,data); } protected boolean fireSemanticPredicateEvaluated(int type, int num, boolean condition) { if (isDebugMode()) return parserEventSupport.fireSemanticPredicateEvaluated(type,num,condition,inputState.guessing); else return condition; } protected void fireSyntacticPredicateFailed() { if (isDebugMode()) parserEventSupport.fireSyntacticPredicateFailed(inputState.guessing); } protected void fireSyntacticPredicateStarted() { if (isDebugMode()) parserEventSupport.fireSyntacticPredicateStarted(inputState.guessing); } protected void fireSyntacticPredicateSucceeded() { if (isDebugMode()) parserEventSupport.fireSyntacticPredicateSucceeded(inputState.guessing); } public String getRuleName(int num) { return ruleNames[num]; } public String getSemPredName(int num) { return semPredNames[num]; } public synchronized void goToSleep() { try {wait();} catch (InterruptedException e) { } } public boolean isDebugMode() { return !_notDebugMode; } public boolean isGuessing() { return inputState.guessing > 0; } /** Return the token type of the ith token of lookahead where i=1 * is the current token being examined by the parser (i.e., it * has not been matched yet). */ public int LA(int i) throws TokenStreamException { int la = super.LA(i); parserEventSupport.fireLA(i, la); return la; } /**Make sure current lookahead symbol matches token type t. * Throw an exception upon mismatch, which is catch by either the * error handler or by the syntactic predicate. */ public void match(int t) throws MismatchedTokenException, TokenStreamException { String text = LT(1).getText(); int la_1 = LA(1); try { super.match(t); parserEventSupport.fireMatch(t, text, inputState.guessing); } catch (MismatchedTokenException e) { if (inputState.guessing == 0) parserEventSupport.fireMismatch(la_1, t, text, inputState.guessing); throw e; } } /**Make sure current lookahead symbol matches the given set * Throw an exception upon mismatch, which is catch by either the * error handler or by the syntactic predicate. */ public void match(BitSet b) throws MismatchedTokenException, TokenStreamException { String text = LT(1).getText(); int la_1 = LA(1); try { super.match(b); parserEventSupport.fireMatch(la_1,b, text, inputState.guessing); } catch (MismatchedTokenException e) { if (inputState.guessing == 0) parserEventSupport.fireMismatch(la_1, b, text, inputState.guessing); throw e; } } public void matchNot(int t) throws MismatchedTokenException, TokenStreamException { String text = LT(1).getText(); int la_1 = LA(1); try { super.matchNot(t); parserEventSupport.fireMatchNot(la_1, t, text, inputState.guessing); } catch (MismatchedTokenException e) { if (inputState.guessing == 0) parserEventSupport.fireMismatchNot(la_1, t, text, inputState.guessing); throw e; } } public void removeMessageListener(MessageListener l) { parserEventSupport.removeMessageListener(l); } public void removeParserListener(ParserListener l) { parserEventSupport.removeParserListener(l); } public void removeParserMatchListener(ParserMatchListener l) { parserEventSupport.removeParserMatchListener(l); } public void removeParserTokenListener(ParserTokenListener l) { parserEventSupport.removeParserTokenListener(l); } public void removeSemanticPredicateListener(SemanticPredicateListener l) { parserEventSupport.removeSemanticPredicateListener(l); } public void removeSyntacticPredicateListener(SyntacticPredicateListener l) { parserEventSupport.removeSyntacticPredicateListener(l); } public void removeTraceListener(TraceListener l) { parserEventSupport.removeTraceListener(l); } /** Parser error-reporting function can be overridden in subclass */ public void reportError(RecognitionException ex) { parserEventSupport.fireReportError(ex); super.reportError(ex); } /** Parser error-reporting function can be overridden in subclass */ public void reportError(String s) { parserEventSupport.fireReportError(s); super.reportError(s); } /** Parser warning-reporting function can be overridden in subclass */ public void reportWarning(String s) { parserEventSupport.fireReportWarning(s); super.reportWarning(s); } public void setDebugMode(boolean value) { _notDebugMode = !value; } public void setupDebugging(TokenBuffer tokenBuf) { setupDebugging(null, tokenBuf); } public void setupDebugging(TokenStream lexer) { setupDebugging(lexer, null); } /** User can override to do their own debugging */ protected void setupDebugging(TokenStream lexer, TokenBuffer tokenBuf) { setDebugMode(true); // default parser debug setup is ParseView try { try { Class.forName("javax.swing.JButton"); } catch (ClassNotFoundException e) { System.err.println("Swing is required to use ParseView, but is not present in your CLASSPATH"); System.exit(1); } Class c = Class.forName("antlr.parseview.ParseView"); Constructor constructor = c.getConstructor(new Class[] {LLkDebuggingParser.class, TokenStream.class, TokenBuffer.class}); constructor.newInstance(new Object[] {this, lexer, tokenBuf}); } catch(Exception e) { System.err.println("Error initializing ParseView: "+e); System.err.println("Please report this to Scott Stanchfield, thetick@magelang.com"); System.exit(1); } } public synchronized void wakeUp() { notify(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ListenerBase.java000066400000000000000000000002611161462365500254260ustar00rootroot00000000000000package antlr.debug; import java.util.EventListener; public interface ListenerBase extends EventListener { public void doneParsing(TraceEvent e); public void refresh(); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/MessageAdapter.java000066400000000000000000000003611161462365500257340ustar00rootroot00000000000000package antlr.debug; public class MessageAdapter implements MessageListener { public void doneParsing(TraceEvent e) {} public void refresh() {} public void reportError(MessageEvent e) {} public void reportWarning(MessageEvent e) {} } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/MessageEvent.java000066400000000000000000000013161161462365500254360ustar00rootroot00000000000000package antlr.debug; public class MessageEvent extends Event { private String text; public static int WARNING = 0; public static int ERROR = 1; public MessageEvent(Object source) { super(source); } public MessageEvent(Object source, int type, String text) { super(source); setValues(type,text); } public String getText() { return text; } void setText(String text) { this.text = text; } /** This should NOT be called from anyone other than ParserEventSupport! */ void setValues(int type, String text) { super.setValues(type); setText(text); } public String toString() { return "ParserMessageEvent [" + (getType()==WARNING?"warning,":"error,") + getText() + "]"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/MessageListener.java000066400000000000000000000002501161462365500261360ustar00rootroot00000000000000package antlr.debug; public interface MessageListener extends ListenerBase { public void reportError(MessageEvent e); public void reportWarning(MessageEvent e); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/NewLineEvent.java000066400000000000000000000010071161462365500254100ustar00rootroot00000000000000package antlr.debug; public class NewLineEvent extends Event { private int line; public NewLineEvent(Object source) { super(source); } public NewLineEvent(Object source, int line) { super(source); setValues(line); } public int getLine() { return line; } void setLine(int line) { this.line = line; } /** This should NOT be called from anyone other than ParserEventSupport! */ void setValues(int line) { setLine(line); } public String toString() { return "NewLineEvent [" + line + "]"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/NewLineListener.java000066400000000000000000000001731161462365500261170ustar00rootroot00000000000000package antlr.debug; public interface NewLineListener extends ListenerBase { public void hitNewLine(NewLineEvent e); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParseTreeDebugParser.java000066400000000000000000000066221161462365500270730ustar00rootroot00000000000000package antlr.debug; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html */ import antlr.*; import antlr.collections.impl.BitSet; import java.util.Stack; /** Override the standard matching and rule entry/exit routines * to build parse trees. This class is useful for 2.7.3 where * you can specify a superclass like * * class TinyCParser extends Parser(ParseTreeDebugParser); */ public class ParseTreeDebugParser extends LLkParser { /** Each new rule invocation must have it's own subtree. Tokens * are added to the current root so we must have a stack of subtree roots. */ protected Stack currentParseTreeRoot = new Stack(); /** Track most recently created parse subtree so that when parsing * is finished, we can get to the root. */ protected ParseTreeRule mostRecentParseTreeRoot = null; /** For every rule replacement with a production, we bump up count. */ protected int numberOfDerivationSteps = 1; // n replacements plus step 0 public ParseTreeDebugParser(int k_) { super(k_); } public ParseTreeDebugParser(ParserSharedInputState state, int k_) { super(state,k_); } public ParseTreeDebugParser(TokenBuffer tokenBuf, int k_) { super(tokenBuf, k_); } public ParseTreeDebugParser(TokenStream lexer, int k_) { super(lexer,k_); } public ParseTree getParseTree() { return mostRecentParseTreeRoot; } public int getNumberOfDerivationSteps() { return numberOfDerivationSteps; } public void match(int i) throws MismatchedTokenException, TokenStreamException { addCurrentTokenToParseTree(); super.match(i); } public void match(BitSet bitSet) throws MismatchedTokenException, TokenStreamException { addCurrentTokenToParseTree(); super.match(bitSet); } public void matchNot(int i) throws MismatchedTokenException, TokenStreamException { addCurrentTokenToParseTree(); super.matchNot(i); } /** This adds LT(1) to the current parse subtree. Note that the match() * routines add the node before checking for correct match. This means * that, upon mismatched token, there will a token node in the tree * corresponding to where that token was expected. For no viable * alternative errors, no node will be in the tree as nothing was * matched() (the lookahead failed to predict an alternative). */ protected void addCurrentTokenToParseTree() throws TokenStreamException { if (inputState.guessing>0) { return; } ParseTreeRule root = (ParseTreeRule)currentParseTreeRoot.peek(); ParseTreeToken tokenNode = null; if ( LA(1)==Token.EOF_TYPE ) { tokenNode = new ParseTreeToken(new antlr.CommonToken("EOF")); } else { tokenNode = new ParseTreeToken(LT(1)); } root.addChild(tokenNode); } /** Create a rule node, add to current tree, and make it current root */ public void traceIn(String s) throws TokenStreamException { if (inputState.guessing>0) { return; } ParseTreeRule subRoot = new ParseTreeRule(s); if ( currentParseTreeRoot.size()>0 ) { ParseTreeRule oldRoot = (ParseTreeRule)currentParseTreeRoot.peek(); oldRoot.addChild(subRoot); } currentParseTreeRoot.push(subRoot); numberOfDerivationSteps++; } /** Pop current root; back to adding to old root */ public void traceOut(String s) throws TokenStreamException { if (inputState.guessing>0) { return; } mostRecentParseTreeRoot = (ParseTreeRule)currentParseTreeRoot.pop(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserAdapter.java000066400000000000000000000015761161462365500256150ustar00rootroot00000000000000package antlr.debug; public class ParserAdapter implements ParserListener { public void doneParsing(TraceEvent e) {} public void enterRule(TraceEvent e) {} public void exitRule(TraceEvent e) {} public void parserConsume(ParserTokenEvent e) {} public void parserLA(ParserTokenEvent e) {} public void parserMatch(ParserMatchEvent e) {} public void parserMatchNot(ParserMatchEvent e) {} public void parserMismatch(ParserMatchEvent e) {} public void parserMismatchNot(ParserMatchEvent e) {} public void refresh() {} public void reportError(MessageEvent e) {} public void reportWarning(MessageEvent e) {} public void semanticPredicateEvaluated(SemanticPredicateEvent e) {} public void syntacticPredicateFailed(SyntacticPredicateEvent e) {} public void syntacticPredicateStarted(SyntacticPredicateEvent e) {} public void syntacticPredicateSucceeded(SyntacticPredicateEvent e) {} } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserController.java000066400000000000000000000002521161462365500263460ustar00rootroot00000000000000package antlr.debug; public interface ParserController extends ParserListener { public void checkBreak(); public void setParserEventSupport(ParserEventSupport p); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserEventSupport.java000066400000000000000000000330741161462365500267110ustar00rootroot00000000000000package antlr.debug; import java.util.Vector; import java.util.Hashtable; import java.util.Enumeration; import antlr.collections.impl.BitSet; import antlr.RecognitionException; /** A class to assist in firing parser events * NOTE: I intentionally _did_not_ synchronize the event firing and * add/remove listener methods. This is because the add/remove should * _only_ be called by the parser at its start/end, and the _same_thread_ * should be performing the parsing. This should help performance a tad... */ public class ParserEventSupport { private Object source; private Hashtable doneListeners; private Vector matchListeners; private Vector messageListeners; private Vector tokenListeners; private Vector traceListeners; private Vector semPredListeners; private Vector synPredListeners; private Vector newLineListeners; private ParserMatchEvent matchEvent; private MessageEvent messageEvent; private ParserTokenEvent tokenEvent; private SemanticPredicateEvent semPredEvent; private SyntacticPredicateEvent synPredEvent; private TraceEvent traceEvent; private NewLineEvent newLineEvent; private ParserController controller; protected static final int CONSUME=0; protected static final int ENTER_RULE=1; protected static final int EXIT_RULE=2; protected static final int LA=3; protected static final int MATCH=4; protected static final int MATCH_NOT=5; protected static final int MISMATCH=6; protected static final int MISMATCH_NOT=7; protected static final int REPORT_ERROR=8; protected static final int REPORT_WARNING=9; protected static final int SEMPRED=10; protected static final int SYNPRED_FAILED=11; protected static final int SYNPRED_STARTED=12; protected static final int SYNPRED_SUCCEEDED=13; protected static final int NEW_LINE=14; protected static final int DONE_PARSING=15; private int ruleDepth = 0; public ParserEventSupport(Object source) { matchEvent = new ParserMatchEvent(source); messageEvent = new MessageEvent(source); tokenEvent = new ParserTokenEvent(source); traceEvent = new TraceEvent(source); semPredEvent = new SemanticPredicateEvent(source); synPredEvent = new SyntacticPredicateEvent(source); newLineEvent = new NewLineEvent(source); this.source = source; } public void addDoneListener(ListenerBase l) { if (doneListeners == null) doneListeners = new Hashtable(); Integer i = (Integer)doneListeners.get(l); int val; if (i != null) val = i.intValue() + 1; else val = 1; doneListeners.put(l, new Integer(val)); } public void addMessageListener(MessageListener l) { if (messageListeners == null) messageListeners = new Vector(); messageListeners.addElement(l); addDoneListener(l); } public void addNewLineListener(NewLineListener l) { if (newLineListeners == null) newLineListeners = new Vector(); newLineListeners.addElement(l); addDoneListener(l); } public void addParserListener(ParserListener l) { if (l instanceof ParserController) { ((ParserController)l).setParserEventSupport(this); controller = (ParserController)l; } addParserMatchListener(l); addParserTokenListener(l); addMessageListener(l); addTraceListener(l); addSemanticPredicateListener(l); addSyntacticPredicateListener(l); } public void addParserMatchListener(ParserMatchListener l) { if (matchListeners == null) matchListeners = new Vector(); matchListeners.addElement(l); addDoneListener(l); } public void addParserTokenListener(ParserTokenListener l) { if (tokenListeners == null) tokenListeners = new Vector(); tokenListeners.addElement(l); addDoneListener(l); } public void addSemanticPredicateListener(SemanticPredicateListener l) { if (semPredListeners == null) semPredListeners = new Vector(); semPredListeners.addElement(l); addDoneListener(l); } public void addSyntacticPredicateListener(SyntacticPredicateListener l) { if (synPredListeners == null) synPredListeners = new Vector(); synPredListeners.addElement(l); addDoneListener(l); } public void addTraceListener(TraceListener l) { if (traceListeners == null) traceListeners = new Vector(); traceListeners.addElement(l); addDoneListener(l); } public void fireConsume(int value) { tokenEvent.setValues(ParserTokenEvent.CONSUME, 1, value); fireEvents(CONSUME, tokenListeners); } public void fireDoneParsing() { traceEvent.setValues(TraceEvent.DONE_PARSING, 0,0,0); Hashtable targets=null; // Hashtable targets=doneListeners; ListenerBase l=null; synchronized (this) { if (doneListeners == null) return; targets = (Hashtable)doneListeners.clone(); } if (targets != null) { Enumeration e = targets.keys(); while(e.hasMoreElements()) { l = (ListenerBase)e.nextElement(); fireEvent(DONE_PARSING, l); } } if (controller != null) controller.checkBreak(); } public void fireEnterRule(int ruleNum, int guessing, int data) { ruleDepth++; traceEvent.setValues(TraceEvent.ENTER, ruleNum, guessing, data); fireEvents(ENTER_RULE, traceListeners); } public void fireEvent(int type, ListenerBase l) { switch(type) { case CONSUME: ((ParserTokenListener)l).parserConsume(tokenEvent); break; case LA: ((ParserTokenListener)l).parserLA(tokenEvent); break; case ENTER_RULE: ((TraceListener)l).enterRule(traceEvent); break; case EXIT_RULE: ((TraceListener)l).exitRule(traceEvent); break; case MATCH: ((ParserMatchListener)l).parserMatch(matchEvent); break; case MATCH_NOT: ((ParserMatchListener)l).parserMatchNot(matchEvent); break; case MISMATCH: ((ParserMatchListener)l).parserMismatch(matchEvent); break; case MISMATCH_NOT: ((ParserMatchListener)l).parserMismatchNot(matchEvent); break; case SEMPRED: ((SemanticPredicateListener)l).semanticPredicateEvaluated(semPredEvent); break; case SYNPRED_STARTED: ((SyntacticPredicateListener)l).syntacticPredicateStarted(synPredEvent); break; case SYNPRED_FAILED: ((SyntacticPredicateListener)l).syntacticPredicateFailed(synPredEvent); break; case SYNPRED_SUCCEEDED: ((SyntacticPredicateListener)l).syntacticPredicateSucceeded(synPredEvent); break; case REPORT_ERROR: ((MessageListener)l).reportError(messageEvent); break; case REPORT_WARNING: ((MessageListener)l).reportWarning(messageEvent); break; case DONE_PARSING: l.doneParsing(traceEvent); break; case NEW_LINE: ((NewLineListener)l).hitNewLine(newLineEvent); break; default: throw new IllegalArgumentException("bad type "+type+" for fireEvent()"); } } public void fireEvents(int type, Vector listeners) { ListenerBase l=null; if (listeners != null) for (int i = 0; i < listeners.size(); i++) { l = (ListenerBase)listeners.elementAt(i); fireEvent(type, l); } if (controller != null) controller.checkBreak(); } public void fireExitRule(int ruleNum, int guessing, int data) { traceEvent.setValues(TraceEvent.EXIT, ruleNum, guessing, data); fireEvents(EXIT_RULE, traceListeners); ruleDepth--; if (ruleDepth == 0) fireDoneParsing(); } public void fireLA(int k, int la) { tokenEvent.setValues(ParserTokenEvent.LA, k, la); fireEvents(LA, tokenListeners); } public void fireMatch(char c, int guessing) { matchEvent.setValues(ParserMatchEvent.CHAR, c, new Character(c), null, guessing, false, true); fireEvents(MATCH, matchListeners); } public void fireMatch(char value, BitSet b, int guessing) { matchEvent.setValues(ParserMatchEvent.CHAR_BITSET, value, b, null, guessing, false, true); fireEvents(MATCH, matchListeners); } public void fireMatch(char value, String target, int guessing) { matchEvent.setValues(ParserMatchEvent.CHAR_RANGE, value, target, null, guessing, false, true); fireEvents(MATCH, matchListeners); } public void fireMatch(int value, BitSet b, String text, int guessing) { matchEvent.setValues(ParserMatchEvent.BITSET, value, b, text, guessing, false, true); fireEvents(MATCH, matchListeners); } public void fireMatch(int n, String text, int guessing) { matchEvent.setValues(ParserMatchEvent.TOKEN, n, new Integer(n), text, guessing, false, true); fireEvents(MATCH, matchListeners); } public void fireMatch(String s, int guessing) { matchEvent.setValues(ParserMatchEvent.STRING, 0, s, null, guessing, false, true); fireEvents(MATCH, matchListeners); } public void fireMatchNot(char value, char n, int guessing) { matchEvent.setValues(ParserMatchEvent.CHAR, value, new Character(n), null, guessing, true, true); fireEvents(MATCH_NOT, matchListeners); } public void fireMatchNot(int value, int n, String text, int guessing) { matchEvent.setValues(ParserMatchEvent.TOKEN, value, new Integer(n), text, guessing, true, true); fireEvents(MATCH_NOT, matchListeners); } public void fireMismatch(char value, char n, int guessing) { matchEvent.setValues(ParserMatchEvent.CHAR, value, new Character(n), null, guessing, false, false); fireEvents(MISMATCH, matchListeners); } public void fireMismatch(char value, BitSet b, int guessing) { matchEvent.setValues(ParserMatchEvent.CHAR_BITSET, value, b, null, guessing, false, true); fireEvents(MISMATCH, matchListeners); } public void fireMismatch(char value, String target, int guessing) { matchEvent.setValues(ParserMatchEvent.CHAR_RANGE, value, target, null, guessing, false, true); fireEvents(MISMATCH, matchListeners); } public void fireMismatch(int value, int n, String text, int guessing) { matchEvent.setValues(ParserMatchEvent.TOKEN, value, new Integer(n), text, guessing, false, false); fireEvents(MISMATCH, matchListeners); } public void fireMismatch(int value, BitSet b, String text, int guessing) { matchEvent.setValues(ParserMatchEvent.BITSET, value, b, text, guessing, false, true); fireEvents(MISMATCH, matchListeners); } public void fireMismatch(String value, String text, int guessing) { matchEvent.setValues(ParserMatchEvent.STRING, 0, text, value, guessing, false, true); fireEvents(MISMATCH, matchListeners); } public void fireMismatchNot(char value, char c, int guessing) { matchEvent.setValues(ParserMatchEvent.CHAR, value, new Character(c), null, guessing, true, true); fireEvents(MISMATCH_NOT, matchListeners); } public void fireMismatchNot(int value, int n, String text, int guessing) { matchEvent.setValues(ParserMatchEvent.TOKEN, value, new Integer(n), text, guessing, true, true); fireEvents(MISMATCH_NOT, matchListeners); } public void fireNewLine(int line) { newLineEvent.setValues(line); fireEvents(NEW_LINE, newLineListeners); } public void fireReportError(Exception e) { messageEvent.setValues(MessageEvent.ERROR, e.toString()); fireEvents(REPORT_ERROR, messageListeners); } public void fireReportError(String s) { messageEvent.setValues(MessageEvent.ERROR, s); fireEvents(REPORT_ERROR, messageListeners); } public void fireReportWarning(String s) { messageEvent.setValues(MessageEvent.WARNING, s); fireEvents(REPORT_WARNING, messageListeners); } public boolean fireSemanticPredicateEvaluated(int type, int condition, boolean result, int guessing) { semPredEvent.setValues(type, condition, result, guessing); fireEvents(SEMPRED, semPredListeners); return result; } public void fireSyntacticPredicateFailed(int guessing) { synPredEvent.setValues(0, guessing); fireEvents(SYNPRED_FAILED, synPredListeners); } public void fireSyntacticPredicateStarted(int guessing) { synPredEvent.setValues(0, guessing); fireEvents(SYNPRED_STARTED, synPredListeners); } public void fireSyntacticPredicateSucceeded(int guessing) { synPredEvent.setValues(0, guessing); fireEvents(SYNPRED_SUCCEEDED, synPredListeners); } protected void refresh(Vector listeners) { Vector v; synchronized (listeners) { v = (Vector)listeners.clone(); } if (v != null) for (int i = 0; i < v.size(); i++) ((ListenerBase)v.elementAt(i)).refresh(); } public void refreshListeners() { refresh(matchListeners); refresh(messageListeners); refresh(tokenListeners); refresh(traceListeners); refresh(semPredListeners); refresh(synPredListeners); } public void removeDoneListener(ListenerBase l) { if (doneListeners == null) return; Integer i = (Integer)doneListeners.get(l); int val=0; if (i != null) val = i.intValue() - 1; if (val == 0) doneListeners.remove(l); else doneListeners.put(l, new Integer(val)); } public void removeMessageListener(MessageListener l) { if (messageListeners != null) messageListeners.removeElement(l); removeDoneListener(l); } public void removeNewLineListener(NewLineListener l) { if (newLineListeners != null) newLineListeners.removeElement(l); removeDoneListener(l); } public void removeParserListener(ParserListener l) { removeParserMatchListener(l); removeMessageListener(l); removeParserTokenListener(l); removeTraceListener(l); removeSemanticPredicateListener(l); removeSyntacticPredicateListener(l); } public void removeParserMatchListener(ParserMatchListener l) { if (matchListeners != null) matchListeners.removeElement(l); removeDoneListener(l); } public void removeParserTokenListener(ParserTokenListener l) { if (tokenListeners != null) tokenListeners.removeElement(l); removeDoneListener(l); } public void removeSemanticPredicateListener(SemanticPredicateListener l) { if (semPredListeners != null) semPredListeners.removeElement(l); removeDoneListener(l); } public void removeSyntacticPredicateListener(SyntacticPredicateListener l) { if (synPredListeners != null) synPredListeners.removeElement(l); removeDoneListener(l); } public void removeTraceListener(TraceListener l) { if (traceListeners != null) traceListeners.removeElement(l); removeDoneListener(l); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserListener.java000066400000000000000000000003711161462365500260120ustar00rootroot00000000000000package antlr.debug; public interface ParserListener extends SemanticPredicateListener, ParserMatchListener, MessageListener, ParserTokenListener, TraceListener, SyntacticPredicateListener { } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserMatchAdapter.java000066400000000000000000000005531161462365500265640ustar00rootroot00000000000000package antlr.debug; public class ParserMatchAdapter implements ParserMatchListener { public void doneParsing(TraceEvent e) {} public void parserMatch(ParserMatchEvent e) {} public void parserMatchNot(ParserMatchEvent e) {} public void parserMismatch(ParserMatchEvent e) {} public void parserMismatchNot(ParserMatchEvent e) {} public void refresh() {} } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserMatchEvent.java000066400000000000000000000036421161462365500262670ustar00rootroot00000000000000package antlr.debug; public class ParserMatchEvent extends GuessingEvent { // NOTE: for a mismatch on type STRING, the "text" is used as the lookahead // value. Normally "value" is this public static int TOKEN=0; public static int BITSET=1; public static int CHAR=2; public static int CHAR_BITSET=3; public static int STRING=4; public static int CHAR_RANGE=5; private boolean inverse; private boolean matched; private Object target; private int value; private String text; public ParserMatchEvent(Object source) { super(source); } public ParserMatchEvent(Object source, int type, int value, Object target, String text, int guessing, boolean inverse, boolean matched) { super(source); setValues(type,value,target,text,guessing,inverse,matched); } public Object getTarget() { return target; } public String getText() { return text; } public int getValue() { return value; } public boolean isInverse() { return inverse; } public boolean isMatched() { return matched; } void setInverse(boolean inverse) { this.inverse = inverse; } void setMatched(boolean matched) { this.matched = matched; } void setTarget(Object target) { this.target = target; } void setText(String text) { this.text = text; } void setValue(int value) { this.value = value; } /** This should NOT be called from anyone other than ParserEventSupport! */ void setValues(int type, int value, Object target, String text, int guessing, boolean inverse, boolean matched) { super.setValues(type, guessing); setValue(value); setTarget(target); setInverse(inverse); setMatched(matched); setText(text); } public String toString() { return "ParserMatchEvent [" + (isMatched()?"ok,":"bad,") + (isInverse()?"NOT ":"") + (getType()==TOKEN?"token,":"bitset,") + getValue() + "," + getTarget() + "," + getGuessing() + "]"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserMatchListener.java000066400000000000000000000004321161462365500267650ustar00rootroot00000000000000package antlr.debug; public interface ParserMatchListener extends ListenerBase { public void parserMatch(ParserMatchEvent e); public void parserMatchNot(ParserMatchEvent e); public void parserMismatch(ParserMatchEvent e); public void parserMismatchNot(ParserMatchEvent e); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserReporter.java000066400000000000000000000022231161462365500260250ustar00rootroot00000000000000package antlr.debug; public class ParserReporter extends Tracer implements ParserListener { public void parserConsume(ParserTokenEvent e) { System.out.println(indent+e); } public void parserLA(ParserTokenEvent e) { System.out.println(indent+e); } public void parserMatch(ParserMatchEvent e) { System.out.println(indent+e); } public void parserMatchNot(ParserMatchEvent e) { System.out.println(indent+e); } public void parserMismatch(ParserMatchEvent e) { System.out.println(indent+e); } public void parserMismatchNot(ParserMatchEvent e) { System.out.println(indent+e); } public void reportError(MessageEvent e) { System.out.println(indent+e); } public void reportWarning(MessageEvent e) { System.out.println(indent+e); } public void semanticPredicateEvaluated(SemanticPredicateEvent e) { System.out.println(indent+e); } public void syntacticPredicateFailed(SyntacticPredicateEvent e) { System.out.println(indent+e); } public void syntacticPredicateStarted(SyntacticPredicateEvent e) { System.out.println(indent+e); } public void syntacticPredicateSucceeded(SyntacticPredicateEvent e) { System.out.println(indent+e); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserTokenAdapter.java000066400000000000000000000003761161462365500266130ustar00rootroot00000000000000package antlr.debug; public class ParserTokenAdapter implements ParserTokenListener { public void doneParsing(TraceEvent e) {} public void parserConsume(ParserTokenEvent e) {} public void parserLA(ParserTokenEvent e) {} public void refresh() {} } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserTokenEvent.java000066400000000000000000000017421161462365500263120ustar00rootroot00000000000000package antlr.debug; public class ParserTokenEvent extends Event { private int value; private int amount; public static int LA=0; public static int CONSUME=1; public ParserTokenEvent(Object source) { super(source); } public ParserTokenEvent(Object source, int type, int amount, int value) { super(source); setValues(type,amount,value); } public int getAmount() { return amount; } public int getValue() { return value; } void setAmount(int amount) { this.amount = amount; } void setValue(int value) { this.value = value; } /** This should NOT be called from anyone other than ParserEventSupport! */ void setValues(int type, int amount, int value) { super.setValues(type); setAmount(amount); setValue(value); } public String toString() { if (getType()==LA) return "ParserTokenEvent [LA," + getAmount() + "," + getValue() + "]"; else return "ParserTokenEvent [consume,1," + getValue() + "]"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/ParserTokenListener.java000066400000000000000000000002611161462365500270110ustar00rootroot00000000000000package antlr.debug; public interface ParserTokenListener extends ListenerBase { public void parserConsume(ParserTokenEvent e); public void parserLA(ParserTokenEvent e); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/SemanticPredicateAdapter.java000066400000000000000000000003601161462365500277330ustar00rootroot00000000000000package antlr.debug; public class SemanticPredicateAdapter implements SemanticPredicateListener { public void doneParsing(TraceEvent e) {} public void refresh() {} public void semanticPredicateEvaluated(SemanticPredicateEvent e) {} } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/SemanticPredicateEvent.java000066400000000000000000000017201161462365500274350ustar00rootroot00000000000000package antlr.debug; public class SemanticPredicateEvent extends GuessingEvent { public static final int VALIDATING=0; public static final int PREDICTING=1; private int condition; private boolean result; public SemanticPredicateEvent(Object source) { super(source); } public SemanticPredicateEvent(Object source, int type) { super(source, type); } public int getCondition() { return condition; } public boolean getResult() { return result; } void setCondition(int condition) { this.condition = condition; } void setResult(boolean result) { this.result = result; } /** This should NOT be called from anyone other than ParserEventSupport! */ void setValues(int type, int condition, boolean result, int guessing) { super.setValues(type, guessing); setCondition(condition); setResult(result); } public String toString() { return "SemanticPredicateEvent [" + getCondition() + "," + getResult() + "," + getGuessing() + "]"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/SemanticPredicateListener.java000066400000000000000000000002371161462365500301430ustar00rootroot00000000000000package antlr.debug; public interface SemanticPredicateListener extends ListenerBase { public void semanticPredicateEvaluated(SemanticPredicateEvent e); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/SyntacticPredicateAdapter.java000066400000000000000000000005751161462365500301410ustar00rootroot00000000000000package antlr.debug; public class SyntacticPredicateAdapter implements SyntacticPredicateListener { public void doneParsing(TraceEvent e) {} public void refresh() {} public void syntacticPredicateFailed(SyntacticPredicateEvent e) {} public void syntacticPredicateStarted(SyntacticPredicateEvent e) {} public void syntacticPredicateSucceeded(SyntacticPredicateEvent e) {} } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/SyntacticPredicateEvent.java000066400000000000000000000007511161462365500276360ustar00rootroot00000000000000package antlr.debug; public class SyntacticPredicateEvent extends GuessingEvent { public SyntacticPredicateEvent(Object source) { super(source); } public SyntacticPredicateEvent(Object source, int type) { super(source, type); } /** This should NOT be called from anyone other than ParserEventSupport! */ void setValues(int type, int guessing) { super.setValues(type, guessing); } public String toString() { return "SyntacticPredicateEvent [" + getGuessing() + "]"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/SyntacticPredicateListener.java000066400000000000000000000004471161462365500303440ustar00rootroot00000000000000package antlr.debug; public interface SyntacticPredicateListener extends ListenerBase { public void syntacticPredicateFailed(SyntacticPredicateEvent e); public void syntacticPredicateStarted(SyntacticPredicateEvent e); public void syntacticPredicateSucceeded(SyntacticPredicateEvent e); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/TraceAdapter.java000066400000000000000000000003421161462365500254050ustar00rootroot00000000000000package antlr.debug; public class TraceAdapter implements TraceListener { public void doneParsing(TraceEvent e) {} public void enterRule(TraceEvent e) {} public void exitRule(TraceEvent e) {} public void refresh() {} } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/TraceEvent.java000066400000000000000000000017361161462365500251160ustar00rootroot00000000000000package antlr.debug; public class TraceEvent extends GuessingEvent { private int ruleNum; private int data; public static int ENTER=0; public static int EXIT=1; public static int DONE_PARSING=2; public TraceEvent(Object source) { super(source); } public TraceEvent(Object source, int type, int ruleNum, int guessing, int data) { super(source); setValues(type, ruleNum, guessing, data); } public int getData() { return data; } public int getRuleNum() { return ruleNum; } void setData(int data) { this.data = data; } void setRuleNum(int ruleNum) { this.ruleNum = ruleNum; } /** This should NOT be called from anyone other than ParserEventSupport! */ void setValues(int type, int ruleNum, int guessing, int data) { super.setValues(type, guessing); setRuleNum(ruleNum); setData(data); } public String toString() { return "ParserTraceEvent [" + (getType()==ENTER?"enter,":"exit,") + getRuleNum() + "," + getGuessing() +"]"; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/TraceListener.java000066400000000000000000000002331161462365500256110ustar00rootroot00000000000000package antlr.debug; public interface TraceListener extends ListenerBase { public void enterRule(TraceEvent e); public void exitRule(TraceEvent e); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/Tracer.java000066400000000000000000000007341161462365500242730ustar00rootroot00000000000000package antlr.debug; public class Tracer extends TraceAdapter implements TraceListener { String indent=""; // TBD: should be StringBuffer protected void dedent() { if (indent.length() < 2) indent = ""; else indent = indent.substring(2); } public void enterRule(TraceEvent e) { System.out.println(indent+e); indent(); } public void exitRule(TraceEvent e) { dedent(); System.out.println(indent+e); } protected void indent() { indent += " "; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/misc/000077500000000000000000000000001161462365500231375ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/misc/ASTFrame.java000066400000000000000000000042521161462365500254070ustar00rootroot00000000000000package antlr.debug.misc; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/debug/misc/ASTFrame.java#1 $ */ import antlr.*; import antlr.collections.AST; import java.awt.*; import java.awt.event.*; import javax.swing.*; import javax.swing.event.*; import javax.swing.tree.*; public class ASTFrame extends JFrame { // The initial width and height of the frame static final int WIDTH = 200; static final int HEIGHT = 300; class MyTreeSelectionListener implements TreeSelectionListener { public void valueChanged(TreeSelectionEvent event) { TreePath path = event.getPath(); System.out.println("Selected: " + path.getLastPathComponent()); Object elements[] = path.getPath(); for (int i = 0; i < elements.length; i++) { System.out.print("->" + elements[i]); } System.out.println(); } } public ASTFrame(String lab, AST r) { super(lab); // Create the TreeSelectionListener TreeSelectionListener listener = new MyTreeSelectionListener(); JTreeASTPanel tp = new JTreeASTPanel(new JTreeASTModel(r), null); Container content = getContentPane(); content.add(tp, BorderLayout.CENTER); addWindowListener(new WindowAdapter() { public void windowClosing(WindowEvent e) { Frame f = (Frame)e.getSource(); f.setVisible(false); f.dispose(); // System.exit(0); } }); setSize(WIDTH, HEIGHT); } public static void main(String args[]) { // Create the tree nodes ASTFactory factory = new ASTFactory(); CommonAST r = (CommonAST)factory.create(0, "ROOT"); r.addChild((CommonAST)factory.create(0, "C1")); r.addChild((CommonAST)factory.create(0, "C2")); r.addChild((CommonAST)factory.create(0, "C3")); ASTFrame frame = new ASTFrame("AST JTree Example", r); frame.setVisible(true); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/misc/JTreeASTModel.java000066400000000000000000000050441161462365500263470ustar00rootroot00000000000000package antlr.debug.misc; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/debug/misc/JTreeASTModel.java#1 $ */ import antlr.collections.AST; import javax.swing.*; import javax.swing.event.*; import javax.swing.tree.*; public class JTreeASTModel implements TreeModel { AST root = null; public JTreeASTModel(AST t) { if (t == null) { throw new IllegalArgumentException("root is null"); } root = t; } public void addTreeModelListener(TreeModelListener l) { } public Object getChild(Object parent, int index) { if (parent == null) { return null; } AST p = (AST)parent; AST c = p.getFirstChild(); if (c == null) { throw new ArrayIndexOutOfBoundsException("node has no children"); } int i = 0; while (c != null && i < index) { c = c.getNextSibling(); i++; } return c; } public int getChildCount(Object parent) { if (parent == null) { throw new IllegalArgumentException("root is null"); } AST p = (AST)parent; AST c = p.getFirstChild(); int i = 0; while (c != null) { c = c.getNextSibling(); i++; } return i; } public int getIndexOfChild(Object parent, Object child) { if (parent == null || child == null) { throw new IllegalArgumentException("root or child is null"); } AST p = (AST)parent; AST c = p.getFirstChild(); if (c == null) { throw new ArrayIndexOutOfBoundsException("node has no children"); } int i = 0; while (c != null && c != child) { c = c.getNextSibling(); i++; } if (c == child) { return i; } throw new java.util.NoSuchElementException("node is not a child"); } public Object getRoot() { return root; } public boolean isLeaf(Object node) { if (node == null) { throw new IllegalArgumentException("node is null"); } AST t = (AST)node; return t.getFirstChild() == null; } public void removeTreeModelListener(TreeModelListener l) { } public void valueForPathChanged(TreePath path, Object newValue) { System.out.println("heh, who is calling this mystery method?"); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/debug/misc/JTreeASTPanel.java000066400000000000000000000020221161462365500263370ustar00rootroot00000000000000package antlr.debug.misc; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/debug/misc/JTreeASTPanel.java#1 $ */ import java.awt.*; import javax.swing.*; import javax.swing.tree.*; import javax.swing.event.*; public class JTreeASTPanel extends JPanel { JTree tree; public JTreeASTPanel(TreeModel tm, TreeSelectionListener listener) { // use a layout that will stretch tree to panel size setLayout(new BorderLayout()); // Create tree tree = new JTree(tm); // Change line style tree.putClientProperty("JTree.lineStyle", "Angled"); // Add TreeSelectionListener if (listener != null) tree.addTreeSelectionListener(listener); // Put tree in a scrollable pane's viewport JScrollPane sp = new JScrollPane(); sp.getViewport().add(tree); add(sp, BorderLayout.CENTER); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/000077500000000000000000000000001161462365500236445ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/Grammar.java000066400000000000000000000232251161462365500261010ustar00rootroot00000000000000package antlr.preprocessor; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/preprocessor/Grammar.java#1 $ */ import antlr.collections.impl.IndexedVector; import java.util.Hashtable; import java.util.Enumeration; import java.io.IOException; class Grammar { protected String name; protected String fileName; // where does it come from? protected String superGrammar; // null if no super class protected String type; // lexer? parser? tree parser? protected IndexedVector rules; // text of rules as they were read in protected IndexedVector options;// rule options protected String tokenSection; // the tokens{} stuff protected String preambleAction;// action right before grammar protected String memberAction; // action inside grammar protected Hierarchy hier; // hierarchy of grammars protected boolean predefined = false; // one of the predefined grammars? protected boolean alreadyExpanded = false; protected boolean specifiedVocabulary = false; // found importVocab option? /** if not derived from another grammar, might still specify a non-ANTLR * class to derive from like this "class T extends Parser(MyParserClass);" */ protected String superClass = null; protected String importVocab = null; protected String exportVocab = null; protected antlr.Tool antlrTool; public Grammar(antlr.Tool tool, String name, String superGrammar, IndexedVector rules) { this.name = name; this.superGrammar = superGrammar; this.rules = rules; this.antlrTool = tool; } public void addOption(Option o) { if (options == null) { // if not already there, create it options = new IndexedVector(); } options.appendElement(o.getName(), o); } public void addRule(Rule r) { rules.appendElement(r.getName(), r); } /** Copy all nonoverridden rules, vocabulary, and options into this grammar from * supergrammar chain. The change is made in place; e.g., this grammar's vector * of rules gets bigger. This has side-effects: all grammars on path to * root of hierarchy are expanded also. */ public void expandInPlace() { // if this grammar already expanded, just return if (alreadyExpanded) { return; } // Expand super grammar first (unless it's a predefined or subgrammar of predefined) Grammar superG = getSuperGrammar(); if (superG == null) return; // error (didn't provide superclass) if (exportVocab == null) { // if no exportVocab for this grammar, make it same as grammar name exportVocab = getName(); } if (superG.isPredefined()) return; // can't expand Lexer, Parser, ... superG.expandInPlace(); // expand current grammar now. alreadyExpanded = true; // track whether a grammar file needed to have a grammar expanded GrammarFile gf = hier.getFile(getFileName()); gf.setExpanded(true); // Copy rules from supergrammar into this grammar IndexedVector inhRules = superG.getRules(); for (Enumeration e = inhRules.elements(); e.hasMoreElements();) { Rule r = (Rule)e.nextElement(); inherit(r, superG); } // Copy options from supergrammar into this grammar // Modify tokdef options so that they point to dir of enclosing grammar IndexedVector inhOptions = superG.getOptions(); if (inhOptions != null) { for (Enumeration e = inhOptions.elements(); e.hasMoreElements();) { Option o = (Option)e.nextElement(); inherit(o, superG); } } // add an option to load the superGrammar's output vocab if ((options != null && options.getElement("importVocab") == null) || options == null) { // no importVocab found, add one that grabs superG's output vocab Option inputV = new Option("importVocab", superG.exportVocab + ";", this); addOption(inputV); // copy output vocab file to current dir String originatingGrFileName = superG.getFileName(); String path = antlrTool.pathToFile(originatingGrFileName); String superExportVocabFileName = path + superG.exportVocab + antlr.CodeGenerator.TokenTypesFileSuffix + antlr.CodeGenerator.TokenTypesFileExt; String newImportVocabFileName = antlrTool.fileMinusPath(superExportVocabFileName); if (path.equals("." + System.getProperty("file.separator"))) { // don't copy tokdef file onto itself (must be current directory) // System.out.println("importVocab file same dir; leaving as " + superExportVocabFileName); } else { try { antlrTool.copyFile(superExportVocabFileName, newImportVocabFileName); } catch (IOException io) { antlrTool.toolError("cannot find/copy importVocab file " + superExportVocabFileName); return; } } } // copy member action from supergrammar into this grammar inherit(superG.memberAction, superG); } public String getFileName() { return fileName; } public String getName() { return name; } public IndexedVector getOptions() { return options; } public IndexedVector getRules() { return rules; } public Grammar getSuperGrammar() { if (superGrammar == null) return null; Grammar g = (Grammar)hier.getGrammar(superGrammar); return g; } public String getSuperGrammarName() { return superGrammar; } public String getType() { return type; } public void inherit(Option o, Grammar superG) { // do NOT inherit importVocab/exportVocab options under any circumstances if (o.getName().equals("importVocab") || o.getName().equals("exportVocab")) { return; } Option overriddenOption = null; if (options != null) { // do we even have options? overriddenOption = (Option)options.getElement(o.getName()); } // if overridden, do not add to this grammar if (overriddenOption == null) { // not overridden addOption(o); // copy option into this grammar--not overridden } } public void inherit(Rule r, Grammar superG) { // if overridden, do not add to this grammar Rule overriddenRule = (Rule)rules.getElement(r.getName()); if (overriddenRule != null) { // rule is overridden in this grammar. if (!overriddenRule.sameSignature(r)) { // warn if different sig antlrTool.warning("rule " + getName() + "." + overriddenRule.getName() + " has different signature than " + superG.getName() + "." + overriddenRule.getName()); } } else { // not overridden, copy rule into this addRule(r); } } public void inherit(String memberAction, Grammar superG) { if (this.memberAction != null) return; // do nothing if already have member action if (memberAction != null) { // don't have one here, use supergrammar's action this.memberAction = memberAction; } } public boolean isPredefined() { return predefined; } public void setFileName(String f) { fileName = f; } public void setHierarchy(Hierarchy hier) { this.hier = hier; } public void setMemberAction(String a) { memberAction = a; } public void setOptions(IndexedVector options) { this.options = options; } public void setPreambleAction(String a) { preambleAction = a; } public void setPredefined(boolean b) { predefined = b; } public void setTokenSection(String tk) { tokenSection = tk; } public void setType(String t) { type = t; } public String toString() { StringBuffer s = new StringBuffer(10000); if (preambleAction != null) { s.append(preambleAction); } if (superGrammar == null) { return "class " + name + ";"; } if ( superClass!=null ) { // replace with specified superclass not actual grammar // user must make sure that the superclass derives from super grammar class s.append("class " + name + " extends " + superClass + ";"); } else { s.append("class " + name + " extends " + type + ";"); } s.append( System.getProperty("line.separator") + System.getProperty("line.separator")); if (options != null) { s.append(Hierarchy.optionsToString(options)); } if (tokenSection != null) { s.append(tokenSection + "\n"); } if (memberAction != null) { s.append(memberAction + System.getProperty("line.separator")); } for (int i = 0; i < rules.size(); i++) { Rule r = (Rule)rules.elementAt(i); if (!getName().equals(r.enclosingGrammar.getName())) { s.append("// inherited from grammar " + r.enclosingGrammar.getName() + System.getProperty("line.separator")); } s.append(r + System.getProperty("line.separator") + System.getProperty("line.separator")); } return s.toString(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/GrammarFile.java000066400000000000000000000047521161462365500267050ustar00rootroot00000000000000package antlr.preprocessor; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/preprocessor/GrammarFile.java#1 $ */ import antlr.collections.impl.IndexedVector; import java.util.Enumeration; import java.io.*; /** Stores header action, grammar preamble, file options, and * list of grammars in the file */ public class GrammarFile { protected String fileName; protected String headerAction = ""; protected IndexedVector options; protected IndexedVector grammars; protected boolean expanded = false; // any grammars expanded within? protected antlr.Tool tool; public GrammarFile(antlr.Tool tool, String f) { fileName = f; grammars = new IndexedVector(); this.tool = tool; } public void addGrammar(Grammar g) { grammars.appendElement(g.getName(), g); } public void generateExpandedFile() throws IOException { if (!expanded) { return; // don't generate if nothing got expanded } String expandedFileName = nameForExpandedGrammarFile(this.getName()); // create the new grammar file with expanded grammars PrintWriter expF = tool.openOutputFile(expandedFileName); expF.println(toString()); expF.close(); } public IndexedVector getGrammars() { return grammars; } public String getName() { return fileName; } public String nameForExpandedGrammarFile(String f) { if (expanded) { // strip path to original input, make expanded file in current dir return "expanded" + tool.fileMinusPath(f); } else { return f; } } public void setExpanded(boolean exp) { expanded = exp; } public void addHeaderAction(String a) { headerAction += a + System.getProperty("line.separator"); } public void setOptions(IndexedVector o) { options = o; } public String toString() { String h = headerAction == null ? "" : headerAction; String o = options == null ? "" : Hierarchy.optionsToString(options); StringBuffer s = new StringBuffer(10000); s.append(h); s.append(o); for (Enumeration e = grammars.elements(); e.hasMoreElements();) { Grammar g = (Grammar)e.nextElement(); s.append(g.toString()); } return s.toString(); } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/Hierarchy.java000066400000000000000000000122651161462365500264330ustar00rootroot00000000000000package antlr.preprocessor; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/preprocessor/Hierarchy.java#1 $ */ import antlr.collections.impl.IndexedVector; import java.util.Hashtable; import java.util.Enumeration; import java.io.*; import antlr.*; import antlr.preprocessor.Grammar; public class Hierarchy { protected Grammar LexerRoot = null; protected Grammar ParserRoot = null; protected Grammar TreeParserRoot = null; protected Hashtable symbols; // table of grammars protected Hashtable files; // table of grammar files read in protected antlr.Tool antlrTool; public Hierarchy(antlr.Tool tool) { this.antlrTool = tool; LexerRoot = new Grammar(tool, "Lexer", null, null); ParserRoot = new Grammar(tool, "Parser", null, null); TreeParserRoot = new Grammar(tool, "TreeParser", null, null); symbols = new Hashtable(10); files = new Hashtable(10); LexerRoot.setPredefined(true); ParserRoot.setPredefined(true); TreeParserRoot.setPredefined(true); symbols.put(LexerRoot.getName(), LexerRoot); symbols.put(ParserRoot.getName(), ParserRoot); symbols.put(TreeParserRoot.getName(), TreeParserRoot); } public void addGrammar(Grammar gr) { gr.setHierarchy(this); // add grammar to hierarchy symbols.put(gr.getName(), gr); // add grammar to file. GrammarFile f = getFile(gr.getFileName()); f.addGrammar(gr); } public void addGrammarFile(GrammarFile gf) { files.put(gf.getName(), gf); } public void expandGrammarsInFile(String fileName) { GrammarFile f = getFile(fileName); for (Enumeration e = f.getGrammars().elements(); e.hasMoreElements();) { Grammar g = (Grammar)e.nextElement(); g.expandInPlace(); } } public Grammar findRoot(Grammar g) { if (g.getSuperGrammarName() == null) { // at root return g; } // return root of super. Grammar sg = g.getSuperGrammar(); if (sg == null) return g; // return this grammar if super missing return findRoot(sg); } public GrammarFile getFile(String fileName) { return (GrammarFile)files.get(fileName); } public Grammar getGrammar(String gr) { return (Grammar)symbols.get(gr); } public static String optionsToString(IndexedVector options) { String s = "options {" + System.getProperty("line.separator"); for (Enumeration e = options.elements(); e.hasMoreElements();) { s += (Option)e.nextElement() + System.getProperty("line.separator"); } s += "}" + System.getProperty("line.separator") + System.getProperty("line.separator"); return s; } public void readGrammarFile(String file) throws FileNotFoundException { Reader grStream = new BufferedReader(new FileReader(file)); addGrammarFile(new GrammarFile(antlrTool, file)); // Create the simplified grammar lexer/parser PreprocessorLexer ppLexer = new PreprocessorLexer(grStream); ppLexer.setFilename(file); Preprocessor pp = new Preprocessor(ppLexer); pp.setTool(antlrTool); pp.setFilename(file); // populate the hierarchy with class(es) read in try { pp.grammarFile(this, file); } catch (TokenStreamException io) { antlrTool.toolError("Token stream error reading grammar(s):\n" + io); } catch (ANTLRException se) { antlrTool.toolError("error reading grammar(s):\n" + se); } } /** Return true if hierarchy is complete, false if not */ public boolean verifyThatHierarchyIsComplete() { boolean complete = true; // Make a pass to ensure all grammars are defined for (Enumeration e = symbols.elements(); e.hasMoreElements();) { Grammar c = (Grammar)e.nextElement(); if (c.getSuperGrammarName() == null) { continue; // at root: ignore predefined roots } Grammar superG = c.getSuperGrammar(); if (superG == null) { antlrTool.toolError("grammar " + c.getSuperGrammarName() + " not defined"); complete = false; symbols.remove(c.getName()); // super not defined, kill sub } } if (!complete) return false; // Make another pass to set the 'type' field of each grammar // This makes it easy later to ask a grammar what its type // is w/o having to search hierarchy. for (Enumeration e = symbols.elements(); e.hasMoreElements();) { Grammar c = (Grammar)e.nextElement(); if (c.getSuperGrammarName() == null) { continue; // ignore predefined roots } c.setType(findRoot(c).getName()); } return true; } public antlr.Tool getTool() { return antlrTool; } public void setTool(antlr.Tool antlrTool) { this.antlrTool = antlrTool; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/Option.java000066400000000000000000000020151161462365500257550ustar00rootroot00000000000000package antlr.preprocessor; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/preprocessor/Option.java#1 $ */ import antlr.collections.impl.Vector; class Option { protected String name; protected String rhs; protected Grammar enclosingGrammar; public Option(String n, String rhs, Grammar gr) { name = n; this.rhs = rhs; setEnclosingGrammar(gr); } public Grammar getEnclosingGrammar() { return enclosingGrammar; } public String getName() { return name; } public String getRHS() { return rhs; } public void setEnclosingGrammar(Grammar g) { enclosingGrammar = g; } public void setName(String n) { name = n; } public void setRHS(String rhs) { this.rhs = rhs; } public String toString() { return "\t" + name + "=" + rhs; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/Preprocessor.java000066400000000000000000000405411161462365500272010ustar00rootroot00000000000000// $ANTLR : "preproc.g" -> "Preprocessor.java"$ package antlr.preprocessor; import antlr.TokenBuffer; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.ANTLRException; import antlr.LLkParser; import antlr.Token; import antlr.TokenStream; import antlr.RecognitionException; import antlr.NoViableAltException; import antlr.MismatchedTokenException; import antlr.SemanticException; import antlr.ParserSharedInputState; import antlr.collections.impl.BitSet; import antlr.collections.impl.IndexedVector; import java.util.Hashtable; import antlr.preprocessor.Grammar; public class Preprocessor extends antlr.LLkParser implements PreprocessorTokenTypes { // This chunk of error reporting code provided by Brian Smith private antlr.Tool antlrTool; /** In order to make it so existing subclasses don't break, we won't require * that the antlr.Tool instance be passed as a constructor element. Instead, * the antlr.Tool instance should register itself via {@link #initTool(antlr.Tool)} * @throws IllegalStateException if a tool has already been registered * @since 2.7.2 */ public void setTool(antlr.Tool tool) { if (antlrTool == null) { antlrTool = tool; } else { throw new IllegalStateException("antlr.Tool already registered"); } } /** @since 2.7.2 */ protected antlr.Tool getTool() { return antlrTool; } /** Delegates the error message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportError(String s) { if (getTool() != null) { getTool().error(s, getFilename(), -1, -1); } else { super.reportError(s); } } /** Delegates the error message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportError(RecognitionException e) { if (getTool() != null) { getTool().error(e.getErrorMessage(), e.getFilename(), e.getLine(), e.getColumn()); } else { super.reportError(e); } } /** Delegates the warning message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportWarning(String s) { if (getTool() != null) { getTool().warning(s, getFilename(), -1, -1); } else { super.reportWarning(s); } } protected Preprocessor(TokenBuffer tokenBuf, int k) { super(tokenBuf,k); tokenNames = _tokenNames; } public Preprocessor(TokenBuffer tokenBuf) { this(tokenBuf,1); } protected Preprocessor(TokenStream lexer, int k) { super(lexer,k); tokenNames = _tokenNames; } public Preprocessor(TokenStream lexer) { this(lexer,1); } public Preprocessor(ParserSharedInputState state) { super(state,1); tokenNames = _tokenNames; } public final void grammarFile( Hierarchy hier, String file ) throws RecognitionException, TokenStreamException { Token hdr = null; Grammar gr; IndexedVector opt=null; try { // for error handling { _loop265: do { if ((LA(1)==HEADER_ACTION)) { hdr = LT(1); match(HEADER_ACTION); hier.getFile(file).addHeaderAction(hdr.getText()); } else { break _loop265; } } while (true); } { switch ( LA(1)) { case OPTIONS_START: { opt=optionSpec(null); break; } case EOF: case ACTION: case LITERAL_class: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { _loop268: do { if ((LA(1)==ACTION||LA(1)==LITERAL_class)) { gr=class_def(file, hier); if ( gr!=null && opt!=null ) { hier.getFile(file).setOptions(opt); } if ( gr!=null ) { gr.setFileName(file); hier.addGrammar(gr); } } else { break _loop268; } } while (true); } match(Token.EOF_TYPE); } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_0); } } public final IndexedVector optionSpec( Grammar gr ) throws RecognitionException, TokenStreamException { IndexedVector options; Token op = null; Token rhs = null; options = new IndexedVector(); try { // for error handling match(OPTIONS_START); { _loop280: do { if ((LA(1)==ID)) { op = LT(1); match(ID); rhs = LT(1); match(ASSIGN_RHS); Option newOp = new Option(op.getText(),rhs.getText(),gr); options.appendElement(newOp.getName(),newOp); if ( gr!=null && op.getText().equals("importVocab") ) { gr.specifiedVocabulary = true; gr.importVocab = rhs.getText(); } else if ( gr!=null && op.getText().equals("exportVocab") ) { // don't want ';' included in outputVocab. // This is heinously inconsistent! Ugh. gr.exportVocab = rhs.getText().substring(0,rhs.getText().length()-1); gr.exportVocab = gr.exportVocab.trim(); } } else { break _loop280; } } while (true); } match(RCURLY); } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_1); } return options; } public final Grammar class_def( String file, Hierarchy hier ) throws RecognitionException, TokenStreamException { Grammar gr; Token preamble = null; Token sub = null; Token sup = null; Token tk = null; Token memberA = null; gr=null; IndexedVector rules = new IndexedVector(100); IndexedVector classOptions = null; String sc = null; try { // for error handling { switch ( LA(1)) { case ACTION: { preamble = LT(1); match(ACTION); break; } case LITERAL_class: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } match(LITERAL_class); sub = LT(1); match(ID); match(LITERAL_extends); sup = LT(1); match(ID); { switch ( LA(1)) { case SUBRULE_BLOCK: { sc=superClass(); break; } case SEMI: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } match(SEMI); gr = (Grammar)hier.getGrammar(sub.getText()); if ( gr!=null ) { // antlr.Tool.toolError("redefinition of grammar "+gr.getName()+" ignored"); gr=null; throw new SemanticException("redefinition of grammar "+sub.getText(), file, sub.getLine(), sub.getColumn()); } else { gr = new Grammar(hier.getTool(), sub.getText(), sup.getText(), rules); gr.superClass=sc; if ( preamble!=null ) { gr.setPreambleAction(preamble.getText()); } } { switch ( LA(1)) { case OPTIONS_START: { classOptions=optionSpec(gr); break; } case ACTION: case ID: case TOKENS_SPEC: case LITERAL_protected: case LITERAL_private: case LITERAL_public: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } if ( gr!=null ) { gr.setOptions(classOptions); } { switch ( LA(1)) { case TOKENS_SPEC: { tk = LT(1); match(TOKENS_SPEC); gr.setTokenSection(tk.getText()); break; } case ACTION: case ID: case LITERAL_protected: case LITERAL_private: case LITERAL_public: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case ACTION: { memberA = LT(1); match(ACTION); gr.setMemberAction(memberA.getText()); break; } case ID: case LITERAL_protected: case LITERAL_private: case LITERAL_public: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { int _cnt277=0; _loop277: do { if ((_tokenSet_2.member(LA(1)))) { rule(gr); } else { if ( _cnt277>=1 ) { break _loop277; } else {throw new NoViableAltException(LT(1), getFilename());} } _cnt277++; } while (true); } } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_3); } return gr; } public final String superClass() throws RecognitionException, TokenStreamException { String sup; sup=LT(1).getText(); try { // for error handling match(SUBRULE_BLOCK); } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_4); } return sup; } public final void rule( Grammar gr ) throws RecognitionException, TokenStreamException { Token r = null; Token arg = null; Token ret = null; Token init = null; Token blk = null; IndexedVector o = null; // options for rule String vis = null; boolean bang=false; String eg=null, thr=""; try { // for error handling { switch ( LA(1)) { case LITERAL_protected: { match(LITERAL_protected); vis="protected"; break; } case LITERAL_private: { match(LITERAL_private); vis="private"; break; } case LITERAL_public: { match(LITERAL_public); vis="public"; break; } case ID: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } r = LT(1); match(ID); { switch ( LA(1)) { case BANG: { match(BANG); bang=true; break; } case ACTION: case OPTIONS_START: case ARG_ACTION: case LITERAL_returns: case RULE_BLOCK: case LITERAL_throws: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case ARG_ACTION: { arg = LT(1); match(ARG_ACTION); break; } case ACTION: case OPTIONS_START: case LITERAL_returns: case RULE_BLOCK: case LITERAL_throws: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case LITERAL_returns: { match(LITERAL_returns); ret = LT(1); match(ARG_ACTION); break; } case ACTION: case OPTIONS_START: case RULE_BLOCK: case LITERAL_throws: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case LITERAL_throws: { thr=throwsSpec(); break; } case ACTION: case OPTIONS_START: case RULE_BLOCK: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case OPTIONS_START: { o=optionSpec(null); break; } case ACTION: case RULE_BLOCK: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { switch ( LA(1)) { case ACTION: { init = LT(1); match(ACTION); break; } case RULE_BLOCK: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } blk = LT(1); match(RULE_BLOCK); eg=exceptionGroup(); String rtext = blk.getText()+eg; Rule ppr = new Rule(r.getText(),rtext,o,gr); ppr.setThrowsSpec(thr); if ( arg!=null ) { ppr.setArgs(arg.getText()); } if ( ret!=null ) { ppr.setReturnValue(ret.getText()); } if ( init!=null ) { ppr.setInitAction(init.getText()); } if ( bang ) { ppr.setBang(); } ppr.setVisibility(vis); if ( gr!=null ) { gr.addRule(ppr); } } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_5); } } public final String throwsSpec() throws RecognitionException, TokenStreamException { String t; Token a = null; Token b = null; t="throws "; try { // for error handling match(LITERAL_throws); a = LT(1); match(ID); t+=a.getText(); { _loop291: do { if ((LA(1)==COMMA)) { match(COMMA); b = LT(1); match(ID); t+=","+b.getText(); } else { break _loop291; } } while (true); } } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_6); } return t; } public final String exceptionGroup() throws RecognitionException, TokenStreamException { String g; String e=null; g=""; try { // for error handling { _loop294: do { if ((LA(1)==LITERAL_exception)) { e=exceptionSpec(); g += e; } else { break _loop294; } } while (true); } } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_5); } return g; } public final String exceptionSpec() throws RecognitionException, TokenStreamException { String es; Token aa = null; String h=null; es = System.getProperty("line.separator")+"exception "; try { // for error handling match(LITERAL_exception); { switch ( LA(1)) { case ARG_ACTION: { aa = LT(1); match(ARG_ACTION); es += aa.getText(); break; } case EOF: case ACTION: case LITERAL_class: case ID: case LITERAL_protected: case LITERAL_private: case LITERAL_public: case LITERAL_exception: case LITERAL_catch: { break; } default: { throw new NoViableAltException(LT(1), getFilename()); } } } { _loop298: do { if ((LA(1)==LITERAL_catch)) { h=exceptionHandler(); es += h; } else { break _loop298; } } while (true); } } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_7); } return es; } public final String exceptionHandler() throws RecognitionException, TokenStreamException { String h; Token a1 = null; Token a2 = null; h=null; try { // for error handling match(LITERAL_catch); a1 = LT(1); match(ARG_ACTION); a2 = LT(1); match(ACTION); h = System.getProperty("line.separator")+ "catch "+a1.getText()+" "+a2.getText(); } catch (RecognitionException ex) { reportError(ex); consume(); consumeUntil(_tokenSet_8); } return h; } public static final String[] _tokenNames = { "<0>", "EOF", "<2>", "NULL_TREE_LOOKAHEAD", "\"tokens\"", "HEADER_ACTION", "SUBRULE_BLOCK", "ACTION", "\"class\"", "ID", "\"extends\"", "SEMI", "TOKENS_SPEC", "OPTIONS_START", "ASSIGN_RHS", "RCURLY", "\"protected\"", "\"private\"", "\"public\"", "BANG", "ARG_ACTION", "\"returns\"", "RULE_BLOCK", "\"throws\"", "COMMA", "\"exception\"", "\"catch\"", "ALT", "ELEMENT", "LPAREN", "RPAREN", "ID_OR_KEYWORD", "CURLY_BLOCK_SCARF", "WS", "NEWLINE", "COMMENT", "SL_COMMENT", "ML_COMMENT", "CHAR_LITERAL", "STRING_LITERAL", "ESC", "DIGIT", "XDIGIT" }; private static final long[] mk_tokenSet_0() { long[] data = { 2L, 0L}; return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = { 4658050L, 0L}; return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); private static final long[] mk_tokenSet_2() { long[] data = { 459264L, 0L}; return data; } public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2()); private static final long[] mk_tokenSet_3() { long[] data = { 386L, 0L}; return data; } public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3()); private static final long[] mk_tokenSet_4() { long[] data = { 2048L, 0L}; return data; } public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4()); private static final long[] mk_tokenSet_5() { long[] data = { 459650L, 0L}; return data; } public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5()); private static final long[] mk_tokenSet_6() { long[] data = { 4202624L, 0L}; return data; } public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6()); private static final long[] mk_tokenSet_7() { long[] data = { 34014082L, 0L}; return data; } public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7()); private static final long[] mk_tokenSet_8() { long[] data = { 101122946L, 0L}; return data; } public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/PreprocessorLexer.java000066400000000000000000001041051161462365500301760ustar00rootroot00000000000000// $ANTLR : "preproc.g" -> "PreprocessorLexer.java"$ package antlr.preprocessor; import java.io.InputStream; import antlr.TokenStreamException; import antlr.TokenStreamIOException; import antlr.TokenStreamRecognitionException; import antlr.CharStreamException; import antlr.CharStreamIOException; import antlr.ANTLRException; import java.io.Reader; import java.util.Hashtable; import antlr.CharScanner; import antlr.InputBuffer; import antlr.ByteBuffer; import antlr.CharBuffer; import antlr.Token; import antlr.CommonToken; import antlr.RecognitionException; import antlr.NoViableAltForCharException; import antlr.MismatchedCharException; import antlr.TokenStream; import antlr.ANTLRHashString; import antlr.LexerSharedInputState; import antlr.collections.impl.BitSet; import antlr.SemanticException; public class PreprocessorLexer extends antlr.CharScanner implements PreprocessorTokenTypes, TokenStream { public PreprocessorLexer(InputStream in) { this(new ByteBuffer(in)); } public PreprocessorLexer(Reader in) { this(new CharBuffer(in)); } public PreprocessorLexer(InputBuffer ib) { this(new LexerSharedInputState(ib)); } public PreprocessorLexer(LexerSharedInputState state) { super(state); caseSensitiveLiterals = true; setCaseSensitive(true); literals = new Hashtable(); literals.put(new ANTLRHashString("public", this), new Integer(18)); literals.put(new ANTLRHashString("class", this), new Integer(8)); literals.put(new ANTLRHashString("throws", this), new Integer(23)); literals.put(new ANTLRHashString("catch", this), new Integer(26)); literals.put(new ANTLRHashString("private", this), new Integer(17)); literals.put(new ANTLRHashString("extends", this), new Integer(10)); literals.put(new ANTLRHashString("protected", this), new Integer(16)); literals.put(new ANTLRHashString("returns", this), new Integer(21)); literals.put(new ANTLRHashString("tokens", this), new Integer(4)); literals.put(new ANTLRHashString("exception", this), new Integer(25)); } public Token nextToken() throws TokenStreamException { Token theRetToken=null; tryAgain: for (;;) { Token _token = null; int _ttype = Token.INVALID_TYPE; resetText(); try { // for char stream error handling try { // for lexical error handling switch ( LA(1)) { case ':': { mRULE_BLOCK(true); theRetToken=_returnToken; break; } case '\t': case '\n': case '\r': case ' ': { mWS(true); theRetToken=_returnToken; break; } case '/': { mCOMMENT(true); theRetToken=_returnToken; break; } case '{': { mACTION(true); theRetToken=_returnToken; break; } case '"': { mSTRING_LITERAL(true); theRetToken=_returnToken; break; } case '\'': { mCHAR_LITERAL(true); theRetToken=_returnToken; break; } case '!': { mBANG(true); theRetToken=_returnToken; break; } case ';': { mSEMI(true); theRetToken=_returnToken; break; } case ',': { mCOMMA(true); theRetToken=_returnToken; break; } case '}': { mRCURLY(true); theRetToken=_returnToken; break; } case ')': { mRPAREN(true); theRetToken=_returnToken; break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mID_OR_KEYWORD(true); theRetToken=_returnToken; break; } case '=': { mASSIGN_RHS(true); theRetToken=_returnToken; break; } case '[': { mARG_ACTION(true); theRetToken=_returnToken; break; } default: if ((LA(1)=='(') && (_tokenSet_0.member(LA(2)))) { mSUBRULE_BLOCK(true); theRetToken=_returnToken; } else if ((LA(1)=='(') && (true)) { mLPAREN(true); theRetToken=_returnToken; } else { if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);} else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } } if ( _returnToken==null ) continue tryAgain; // found SKIP token _ttype = _returnToken.getType(); _ttype = testLiteralsTable(_ttype); _returnToken.setType(_ttype); return _returnToken; } catch (RecognitionException e) { throw new TokenStreamRecognitionException(e); } } catch (CharStreamException cse) { if ( cse instanceof CharStreamIOException ) { throw new TokenStreamIOException(((CharStreamIOException)cse).io); } else { throw new TokenStreamException(cse.getMessage()); } } } } public final void mRULE_BLOCK(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = RULE_BLOCK; int _saveIndex; match(':'); { if ((_tokenSet_1.member(LA(1))) && (_tokenSet_2.member(LA(2)))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_2.member(LA(1))) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } mALT(false); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ';': case '|': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop306: do { if ((LA(1)=='|')) { match('|'); { if ((_tokenSet_1.member(LA(1))) && (_tokenSet_2.member(LA(2)))) { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); } else if ((_tokenSet_2.member(LA(1))) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } mALT(false); { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { _saveIndex=text.length(); mWS(false); text.setLength(_saveIndex); break; } case ';': case '|': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } } else { break _loop306; } } while (true); } match(';'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WS; int _saveIndex; { int _cnt348=0; _loop348: do { if ((LA(1)==' ') && (true)) { match(' '); } else if ((LA(1)=='\t') && (true)) { match('\t'); } else if ((LA(1)=='\n'||LA(1)=='\r') && (true)) { mNEWLINE(false); } else { if ( _cnt348>=1 ) { break _loop348; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } _cnt348++; } while (true); } _ttype = Token.SKIP; if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mALT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ALT; int _saveIndex; { _loop317: do { if ((_tokenSet_3.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mELEMENT(false); } else { break _loop317; } } while (true); } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mSUBRULE_BLOCK(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SUBRULE_BLOCK; int _saveIndex; match('('); { if ((_tokenSet_1.member(LA(1))) && (_tokenSet_0.member(LA(2)))) { mWS(false); } else if ((_tokenSet_0.member(LA(1))) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } mALT(false); { _loop312: do { if ((_tokenSet_4.member(LA(1))) && (_tokenSet_0.member(LA(2)))) { { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '|': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match('|'); { if ((_tokenSet_1.member(LA(1))) && (_tokenSet_0.member(LA(2)))) { mWS(false); } else if ((_tokenSet_0.member(LA(1))) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } mALT(false); } else { break _loop312; } } while (true); } { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case ')': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } match(')'); { if ((LA(1)=='=') && (LA(2)=='>')) { match("=>"); } else if ((LA(1)=='*') && (true)) { match('*'); } else if ((LA(1)=='+') && (true)) { match('+'); } else if ((LA(1)=='?') && (true)) { match('?'); } else { } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ELEMENT; int _saveIndex; switch ( LA(1)) { case '/': { mCOMMENT(false); break; } case '{': { mACTION(false); break; } case '"': { mSTRING_LITERAL(false); break; } case '\'': { mCHAR_LITERAL(false); break; } case '(': { mSUBRULE_BLOCK(false); break; } case '\n': case '\r': { mNEWLINE(false); break; } default: if ((_tokenSet_5.member(LA(1)))) { { match(_tokenSet_5); } } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = COMMENT; int _saveIndex; { if ((LA(1)=='/') && (LA(2)=='/')) { mSL_COMMENT(false); } else if ((LA(1)=='/') && (LA(2)=='*')) { mML_COMMENT(false); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } _ttype = Token.SKIP; if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ACTION; int _saveIndex; match('{'); { _loop378: do { // nongreedy exit test if ((LA(1)=='}') && (true)) break _loop378; if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mNEWLINE(false); } else if ((LA(1)=='{') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mACTION(false); } else if ((LA(1)=='\'') && (_tokenSet_6.member(LA(2)))) { mCHAR_LITERAL(false); } else if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) { mCOMMENT(false); } else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mSTRING_LITERAL(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop378; } } while (true); } match('}'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mSTRING_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = STRING_LITERAL; int _saveIndex; match('"'); { _loop363: do { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_7.member(LA(1)))) { matchNot('"'); } else { break _loop363; } } while (true); } match('"'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mCHAR_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = CHAR_LITERAL; int _saveIndex; match('\''); { if ((LA(1)=='\\')) { mESC(false); } else if ((_tokenSet_8.member(LA(1)))) { matchNot('\''); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } match('\''); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mNEWLINE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = NEWLINE; int _saveIndex; { if ((LA(1)=='\r') && (LA(2)=='\n')) { match('\r'); match('\n'); newline(); } else if ((LA(1)=='\r') && (true)) { match('\r'); newline(); } else if ((LA(1)=='\n')) { match('\n'); newline(); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mBANG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = BANG; int _saveIndex; match('!'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mSEMI(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SEMI; int _saveIndex; match(';'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mCOMMA(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = COMMA; int _saveIndex; match(','); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mRCURLY(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = RCURLY; int _saveIndex; match('}'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mLPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = LPAREN; int _saveIndex; match('('); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mRPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = RPAREN; int _saveIndex; match(')'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } /** This rule picks off keywords in the lexer that need to be * handled specially. For example, "header" is the start * of the header action (used to distinguish between options * block and an action). We do not want "header" to go back * to the parser as a simple keyword...it must pick off * the action afterwards. */ public final void mID_OR_KEYWORD(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID_OR_KEYWORD; int _saveIndex; Token id=null; mID(true); id=_returnToken; _ttype = id.getType(); { if (((_tokenSet_9.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')))&&(id.getText().equals("header"))) { { if ((_tokenSet_1.member(LA(1))) && (_tokenSet_9.member(LA(2)))) { mWS(false); } else if ((_tokenSet_9.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } { switch ( LA(1)) { case '"': { mSTRING_LITERAL(false); break; } case '\t': case '\n': case '\r': case ' ': case '/': case '{': { break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop331: do { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '/': { mCOMMENT(false); break; } default: { break _loop331; } } } while (true); } mACTION(false); _ttype = HEADER_ACTION; } else if (((_tokenSet_10.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')))&&(id.getText().equals("tokens"))) { { _loop333: do { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '/': { mCOMMENT(false); break; } default: { break _loop333; } } } while (true); } mCURLY_BLOCK_SCARF(false); _ttype = TOKENS_SPEC; } else if (((_tokenSet_10.member(LA(1))) && (true))&&(id.getText().equals("options"))) { { _loop335: do { switch ( LA(1)) { case '\t': case '\n': case '\r': case ' ': { mWS(false); break; } case '/': { mCOMMENT(false); break; } default: { break _loop335; } } } while (true); } match('{'); _ttype = OPTIONS_START; } else { } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ID; int _saveIndex; { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '_': { match('_'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop342: do { switch ( LA(1)) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { matchRange('a','z'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': { matchRange('A','Z'); break; } case '_': { match('_'); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } default: { break _loop342; } } } while (true); } _ttype = testLiteralsTable(new String(text.getBuffer(),_begin,text.length()-_begin),_ttype); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mCURLY_BLOCK_SCARF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = CURLY_BLOCK_SCARF; int _saveIndex; match('{'); { _loop338: do { // nongreedy exit test if ((LA(1)=='}') && (true)) break _loop338; if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mNEWLINE(false); } else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mSTRING_LITERAL(false); } else if ((LA(1)=='\'') && (_tokenSet_6.member(LA(2)))) { mCHAR_LITERAL(false); } else if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) { mCOMMENT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop338; } } while (true); } match('}'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mASSIGN_RHS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ASSIGN_RHS; int _saveIndex; _saveIndex=text.length(); match('='); text.setLength(_saveIndex); { _loop345: do { // nongreedy exit test if ((LA(1)==';') && (true)) break _loop345; if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mSTRING_LITERAL(false); } else if ((LA(1)=='\'') && (_tokenSet_6.member(LA(2)))) { mCHAR_LITERAL(false); } else if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mNEWLINE(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop345; } } while (true); } match(';'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SL_COMMENT; int _saveIndex; match("//"); { _loop355: do { // nongreedy exit test if ((LA(1)=='\n'||LA(1)=='\r') && (true)) break _loop355; if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop355; } } while (true); } mNEWLINE(false); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ML_COMMENT; int _saveIndex; match("/*"); { _loop358: do { // nongreedy exit test if ((LA(1)=='*') && (LA(2)=='/')) break _loop358; if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mNEWLINE(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop358; } } while (true); } match("*/"); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ESC; int _saveIndex; match('\\'); { switch ( LA(1)) { case 'n': { match('n'); break; } case 'r': { match('r'); break; } case 't': { match('t'); break; } case 'b': { match('b'); break; } case 'f': { match('f'); break; } case 'w': { match('w'); break; } case 'a': { match('a'); break; } case '"': { match('"'); break; } case '\'': { match('\''); break; } case '\\': { match('\\'); break; } case '0': case '1': case '2': case '3': { { matchRange('0','3'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mDIGIT(false); { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } case '4': case '5': case '6': case '7': { { matchRange('4','7'); } { if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mDIGIT(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) { } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } break; } case 'u': { match('u'); mXDIGIT(false); mXDIGIT(false); mXDIGIT(false); mXDIGIT(false); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = DIGIT; int _saveIndex; matchRange('0','9'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } protected final void mXDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = XDIGIT; int _saveIndex; switch ( LA(1)) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { matchRange('0','9'); break; } case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': { matchRange('a','f'); break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': { matchRange('A','F'); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mARG_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = ARG_ACTION; int _saveIndex; match('['); { _loop375: do { // nongreedy exit test if ((LA(1)==']') && (true)) break _loop375; if ((LA(1)=='[') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mARG_ACTION(false); } else if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mNEWLINE(false); } else if ((LA(1)=='\'') && (_tokenSet_6.member(LA(2)))) { mCHAR_LITERAL(false); } else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { mSTRING_LITERAL(false); } else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) { matchNot(EOF_CHAR); } else { break _loop375; } } while (true); } match(']'); if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } private static final long[] mk_tokenSet_0() { long[] data = new long[8]; data[0]=-576460752303423496L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0()); private static final long[] mk_tokenSet_1() { long[] data = { 4294977024L, 0L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1()); private static final long[] mk_tokenSet_2() { long[] data = new long[8]; data[0]=-2199023255560L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2()); private static final long[] mk_tokenSet_3() { long[] data = new long[8]; data[0]=-576462951326679048L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3()); private static final long[] mk_tokenSet_4() { long[] data = { 4294977024L, 1152921504606846976L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4()); private static final long[] mk_tokenSet_5() { long[] data = new long[8]; data[0]=-576605355262354440L; data[1]=-576460752303423489L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5()); private static final long[] mk_tokenSet_6() { long[] data = new long[8]; data[0]=-549755813896L; for (int i = 1; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6()); private static final long[] mk_tokenSet_7() { long[] data = new long[8]; data[0]=-17179869192L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7()); private static final long[] mk_tokenSet_8() { long[] data = new long[8]; data[0]=-549755813896L; data[1]=-268435457L; for (int i = 2; i<=3; i++) { data[i]=-1L; } return data; } public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8()); private static final long[] mk_tokenSet_9() { long[] data = { 140758963201536L, 576460752303423488L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9()); private static final long[] mk_tokenSet_10() { long[] data = { 140741783332352L, 576460752303423488L, 0L, 0L, 0L}; return data; } public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10()); } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/PreprocessorTokenTypes.java000066400000000000000000000017621161462365500312310ustar00rootroot00000000000000// $ANTLR : "preproc.g" -> "Preprocessor.java"$ package antlr.preprocessor; public interface PreprocessorTokenTypes { int EOF = 1; int NULL_TREE_LOOKAHEAD = 3; int LITERAL_tokens = 4; int HEADER_ACTION = 5; int SUBRULE_BLOCK = 6; int ACTION = 7; int LITERAL_class = 8; int ID = 9; int LITERAL_extends = 10; int SEMI = 11; int TOKENS_SPEC = 12; int OPTIONS_START = 13; int ASSIGN_RHS = 14; int RCURLY = 15; int LITERAL_protected = 16; int LITERAL_private = 17; int LITERAL_public = 18; int BANG = 19; int ARG_ACTION = 20; int LITERAL_returns = 21; int RULE_BLOCK = 22; int LITERAL_throws = 23; int COMMA = 24; int LITERAL_exception = 25; int LITERAL_catch = 26; int ALT = 27; int ELEMENT = 28; int LPAREN = 29; int RPAREN = 30; int ID_OR_KEYWORD = 31; int CURLY_BLOCK_SCARF = 32; int WS = 33; int NEWLINE = 34; int COMMENT = 35; int SL_COMMENT = 36; int ML_COMMENT = 37; int CHAR_LITERAL = 38; int STRING_LITERAL = 39; int ESC = 40; int DIGIT = 41; int XDIGIT = 42; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/Rule.java000066400000000000000000000102111161462365500254110ustar00rootroot00000000000000package antlr.preprocessor; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/preprocessor/Rule.java#1 $ */ import antlr.collections.impl.IndexedVector; import java.util.Hashtable; import java.util.Enumeration; class Rule { protected String name; protected String block; protected String args; protected String returnValue; protected String throwsSpec; protected String initAction; protected IndexedVector options; protected String visibility; protected Grammar enclosingGrammar; protected boolean bang = false; public Rule(String n, String b, IndexedVector options, Grammar gr) { name = n; block = b; this.options = options; setEnclosingGrammar(gr); } public String getArgs() { return args; } public boolean getBang() { return bang; } public String getName() { return name; } public String getReturnValue() { return returnValue; } public String getVisibility() { return visibility; } /** If 'rule' narrows the visible of 'this', return true; * For example, 'this' is public and 'rule' is private, * true is returned. You cannot narrow the vis. of * a rule. */ public boolean narrowerVisibility(Rule rule) { if (visibility.equals("public")) { if (!rule.equals("public")) { return true; // everything narrower than public } return false; } else if (visibility.equals("protected")) { if (rule.equals("private")) { return true; // private narrower than protected } return false; } else if (visibility.equals("private")) { return false; // nothing is narrower than private } return false; } /** Two rules have the same signature if they have: * same name * same return value * same args * I do a simple string compare now, but later * the type could be pulled out so it is insensitive * to names of args etc... */ public boolean sameSignature(Rule rule) { boolean nSame = true; boolean aSame = true; boolean rSame = true; nSame = name.equals(rule.getName()); if (args != null) { aSame = args.equals(rule.getArgs()); } if (returnValue != null) { rSame = returnValue.equals(rule.getReturnValue()); } return nSame && aSame && rSame; } public void setArgs(String a) { args = a; } public void setBang() { bang = true; } public void setEnclosingGrammar(Grammar g) { enclosingGrammar = g; } public void setInitAction(String a) { initAction = a; } public void setOptions(IndexedVector options) { this.options = options; } public void setReturnValue(String ret) { returnValue = ret; } public void setThrowsSpec(String t) { throwsSpec = t; } public void setVisibility(String v) { visibility = v; } public String toString() { String s = ""; String retString = returnValue == null ? "" : "returns " + returnValue; String argString = args == null ? "" : args; String bang = getBang() ? "!" : ""; s += visibility == null ? "" : visibility + " "; s += name + bang + argString + " " + retString + throwsSpec; if (options != null) { s += System.getProperty("line.separator") + "options {" + System.getProperty("line.separator"); for (Enumeration e = options.elements(); e.hasMoreElements();) { s += (Option)e.nextElement() + System.getProperty("line.separator"); } s += "}" + System.getProperty("line.separator"); } if (initAction != null) { s += initAction + System.getProperty("line.separator"); } s += block; return s; } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/Tool.java000066400000000000000000000113461161462365500254310ustar00rootroot00000000000000package antlr.preprocessor; /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/preprocessor/Tool.java#1 $ */ import java.io.*; import antlr.collections.impl.Vector; import java.util.Enumeration; /** Tester for the preprocessor */ public class Tool { protected Hierarchy theHierarchy; protected String grammarFileName; protected String[] args; protected int nargs; // how many args in new args list protected Vector grammars; protected antlr.Tool antlrTool; public Tool(antlr.Tool t, String[] args) { antlrTool = t; processArguments(args); } public static void main(String[] args) { antlr.Tool antlrTool = new antlr.Tool(); Tool theTool = new Tool(antlrTool, args); theTool.preprocess(); String[] a = theTool.preprocessedArgList(); for (int i = 0; i < a.length; i++) { System.out.print(" " + a[i]); } System.out.println(); } public boolean preprocess() { if (grammarFileName == null) { antlrTool.toolError("no grammar file specified"); return false; } if (grammars != null) { theHierarchy = new Hierarchy(antlrTool); for (Enumeration e = grammars.elements(); e.hasMoreElements();) { String f = (String)e.nextElement(); try { theHierarchy.readGrammarFile(f); } catch (FileNotFoundException fe) { antlrTool.toolError("file " + f + " not found"); return false; } } } // do the actual inheritance stuff boolean complete = theHierarchy.verifyThatHierarchyIsComplete(); if (!complete) return false; theHierarchy.expandGrammarsInFile(grammarFileName); GrammarFile gf = theHierarchy.getFile(grammarFileName); String expandedFileName = gf.nameForExpandedGrammarFile(grammarFileName); // generate the output file if necessary if (expandedFileName.equals(grammarFileName)) { args[nargs++] = grammarFileName; // add to argument list } else { try { gf.generateExpandedFile(); // generate file to feed ANTLR args[nargs++] = antlrTool.getOutputDirectory() + System.getProperty("file.separator") + expandedFileName; // add to argument list } catch (IOException io) { antlrTool.toolError("cannot write expanded grammar file " + expandedFileName); return false; } } return true; } /** create new arg list with correct length to pass to ANTLR */ public String[] preprocessedArgList() { String[] a = new String[nargs]; System.arraycopy(args, 0, a, 0, nargs); args = a; return args; } /** Process -glib options and grammar file. Create a new args list * that does not contain the -glib option. The grammar file name * might be modified and, hence, is not added yet to args list. */ private void processArguments(String[] incomingArgs) { this.nargs = 0; this.args = new String[incomingArgs.length]; for (int i = 0; i < incomingArgs.length; i++) { if ( incomingArgs[i].length() == 0 ) { antlrTool.warning("Zero length argument ignoring..."); continue; } if (incomingArgs[i].equals("-glib")) { // if on a pc and they use a '/', warn them if (File.separator.equals("\\") && incomingArgs[i].indexOf('/') != -1) { antlrTool.warning("-glib cannot deal with '/' on a PC: use '\\'; ignoring..."); } else { grammars = antlrTool.parseSeparatedList(incomingArgs[i + 1], ';'); i++; } } else if (incomingArgs[i].equals("-o")) { args[this.nargs++] = incomingArgs[i]; if (i + 1 >= incomingArgs.length) { antlrTool.error("missing output directory with -o option; ignoring"); } else { i++; args[this.nargs++] = incomingArgs[i]; antlrTool.setOutputDirectory(incomingArgs[i]); } } else if (incomingArgs[i].charAt(0) == '-') { args[this.nargs++] = incomingArgs[i]; } else { // Must be the grammar file grammarFileName = incomingArgs[i]; if (grammars == null) { grammars = new Vector(10); } grammars.appendElement(grammarFileName); // process it too if ((i + 1) < incomingArgs.length) { antlrTool.warning("grammar file must be last; ignoring other arguments..."); break; } } } } } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/preprocessor/preproc.g000066400000000000000000000216731161462365500254770ustar00rootroot00000000000000header { package antlr.preprocessor; } /* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/preprocessor/preproc.g#1 $ */ { import antlr.collections.impl.IndexedVector; import java.util.Hashtable; import antlr.preprocessor.Grammar; } class Preprocessor extends Parser; options { k=1; interactive=true; } tokens { "tokens"; } { // This chunk of error reporting code provided by Brian Smith private antlr.Tool antlrTool; /** In order to make it so existing subclasses don't break, we won't require * that the antlr.Tool instance be passed as a constructor element. Instead, * the antlr.Tool instance should register itself via {@link #initTool(antlr.Tool)} * @throws IllegalStateException if a tool has already been registered * @since 2.7.2 */ public void setTool(antlr.Tool tool) { if (antlrTool == null) { antlrTool = tool; } else { throw new IllegalStateException("antlr.Tool already registered"); } } /** @since 2.7.2 */ protected antlr.Tool getTool() { return antlrTool; } /** Delegates the error message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportError(String s) { if (getTool() != null) { getTool().error(s, getFilename(), -1, -1); } else { super.reportError(s); } } /** Delegates the error message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportError(RecognitionException e) { if (getTool() != null) { getTool().error(e.getErrorMessage(), e.getFilename(), e.getLine(), e.getColumn()); } else { super.reportError(e); } } /** Delegates the warning message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportWarning(String s) { if (getTool() != null) { getTool().warning(s, getFilename(), -1, -1); } else { super.reportWarning(s); } } } grammarFile[Hierarchy hier, String file] { Grammar gr; IndexedVector opt=null; } : ( hdr:HEADER_ACTION { hier.getFile(file).addHeaderAction(hdr.getText()); } )* ( opt=optionSpec[null] )? ( gr=class_def[file, hier] { if ( gr!=null && opt!=null ) { hier.getFile(file).setOptions(opt); } if ( gr!=null ) { gr.setFileName(file); hier.addGrammar(gr); } } )* EOF ; superClass returns [String sup] {sup=LT(1).getText();} : SUBRULE_BLOCK // looks like ("mypackage.MyParserClass") ; class_def[String file, Hierarchy hier] returns [Grammar gr] { gr=null; IndexedVector rules = new IndexedVector(100); IndexedVector classOptions = null; String sc = null; } : ( preamble:ACTION )? "class" sub:ID "extends" sup:ID (sc=superClass)? SEMI { gr = (Grammar)hier.getGrammar(sub.getText()); if ( gr!=null ) { // antlr.Tool.toolError("redefinition of grammar "+gr.getName()+" ignored"); gr=null; throw new SemanticException("redefinition of grammar "+sub.getText(), file, sub.getLine(), sub.getColumn()); } else { gr = new Grammar(hier.getTool(), sub.getText(), sup.getText(), rules); gr.superClass=sc; if ( preamble!=null ) { gr.setPreambleAction(preamble.getText()); } } } ( classOptions = optionSpec[gr] )? { if ( gr!=null ) { gr.setOptions(classOptions); } } ( tk:TOKENS_SPEC {gr.setTokenSection(tk.getText());} )? ( memberA:ACTION {gr.setMemberAction(memberA.getText());} )? ( rule[gr] )+ ; optionSpec[Grammar gr] returns [IndexedVector options] { options = new IndexedVector(); } : OPTIONS_START ( op:ID rhs:ASSIGN_RHS { Option newOp = new Option(op.getText(),rhs.getText(),gr); options.appendElement(newOp.getName(),newOp); if ( gr!=null && op.getText().equals("importVocab") ) { gr.specifiedVocabulary = true; gr.importVocab = rhs.getText(); } else if ( gr!=null && op.getText().equals("exportVocab") ) { // don't want ';' included in outputVocab. // This is heinously inconsistent! Ugh. gr.exportVocab = rhs.getText().substring(0,rhs.getText().length()-1); gr.exportVocab = gr.exportVocab.trim(); } } )* // {gr.fixupVocabOptionsForInheritance();} RCURLY ; rule[Grammar gr] { IndexedVector o = null; // options for rule String vis = null; boolean bang=false; String eg=null, thr=""; } : ( "protected" {vis="protected";} | "private" {vis="private";} | "public" {vis="public";} )? r:ID ( BANG {bang=true;} )? ( arg:ARG_ACTION )? ( "returns" ret:ARG_ACTION )? ( thr=throwsSpec )? ( o = optionSpec[null] )? ( init:ACTION )? blk:RULE_BLOCK eg=exceptionGroup { String rtext = blk.getText()+eg; Rule ppr = new Rule(r.getText(),rtext,o,gr); ppr.setThrowsSpec(thr); if ( arg!=null ) { ppr.setArgs(arg.getText()); } if ( ret!=null ) { ppr.setReturnValue(ret.getText()); } if ( init!=null ) { ppr.setInitAction(init.getText()); } if ( bang ) { ppr.setBang(); } ppr.setVisibility(vis); if ( gr!=null ) { gr.addRule(ppr); } } ; throwsSpec returns [String t] {t="throws ";} : "throws" a:ID {t+=a.getText();} ( COMMA b:ID {t+=","+b.getText();} )* ; exceptionGroup returns [String g] {String e=null; g="";} : ( e=exceptionSpec {g += e;} )* ; exceptionSpec returns [String es] { String h=null; es = System.getProperty("line.separator")+"exception "; } : "exception" ( aa:ARG_ACTION {es += aa.getText();} )? ( h=exceptionHandler {es += h;} )* ; exceptionHandler returns [String h] {h=null;} : "catch" a1:ARG_ACTION a2:ACTION {h = System.getProperty("line.separator")+ "catch "+a1.getText()+" "+a2.getText();} ; class PreprocessorLexer extends Lexer; options { k=2; charVocabulary = '\3'..'\377'; // common ASCII interactive=true; } RULE_BLOCK : ':' (options {greedy=true;}:WS!)? ALT (options {greedy=true;}:WS!)? ( '|' (options {greedy=true;}:WS!)? ALT (options {greedy=true;}:WS!)? )* ';' ; SUBRULE_BLOCK : '(' (options {greedy=true;}:WS)? ALT ( options {greedy=true;} : (WS)? '|' (options {greedy=true;}:WS)? ALT )* (WS)? ')' ( options {greedy=true;} : '*' | '+' | '?' | "=>" )? ; protected ALT : (options {greedy=true;} : ELEMENT)* ; protected ELEMENT : COMMENT | ACTION | STRING_LITERAL | CHAR_LITERAL | SUBRULE_BLOCK | NEWLINE | ~('\n' | '\r' | '(' | ')' | '/' | '{' | '"' | '\'' | ';') ; BANG: '!' ; SEMI: ';' ; COMMA: ',' ; RCURLY : '}' ; LPAREN : '(' ; RPAREN : ')' ; /** This rule picks off keywords in the lexer that need to be * handled specially. For example, "header" is the start * of the header action (used to distinguish between options * block and an action). We do not want "header" to go back * to the parser as a simple keyword...it must pick off * the action afterwards. */ ID_OR_KEYWORD : id:ID {$setType(id.getType());} ( {id.getText().equals("header")}? (options {greedy=true;}:WS)? (STRING_LITERAL)? (WS|COMMENT)* ACTION {$setType(HEADER_ACTION);} | {id.getText().equals("tokens")}? (WS|COMMENT)* CURLY_BLOCK_SCARF {$setType(TOKENS_SPEC);} | {id.getText().equals("options")}? (WS|COMMENT)* '{' {$setType(OPTIONS_START);} )? ; protected CURLY_BLOCK_SCARF : '{' ( options {greedy=false;} : NEWLINE | STRING_LITERAL | CHAR_LITERAL | COMMENT | . )* '}' ; protected ID options { testLiterals=true; } : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')* ; ASSIGN_RHS : '='! ( options {greedy=false;} : STRING_LITERAL | CHAR_LITERAL | NEWLINE | . )* ';' ; WS : ( options {greedy=true;} : ' ' | '\t' | NEWLINE )+ {$setType(Token.SKIP);} ; protected NEWLINE : ( options { generateAmbigWarnings=false; } : '\r' '\n' {newline();} | '\r' {newline();} | '\n' {newline();} ) ; COMMENT : ( SL_COMMENT | ML_COMMENT ) {$setType(Token.SKIP);} ; protected SL_COMMENT : "//" (options {greedy=false;}:.)* NEWLINE ; protected ML_COMMENT : "/*" ( options {greedy=false;} : NEWLINE | . )* "*/" ; CHAR_LITERAL : '\'' (ESC|~'\'') '\'' ; STRING_LITERAL : '"' (ESC|~'"')* '"' ; protected ESC : '\\' ( 'n' | 'r' | 't' | 'b' | 'f' | 'w' | 'a' | '"' | '\'' | '\\' | ('0'..'3') ( options {greedy=true;} : DIGIT ( options {greedy=true;} : DIGIT )? )? | ('4'..'7') (options {greedy=true;}:DIGIT)? | 'u' XDIGIT XDIGIT XDIGIT XDIGIT ) ; protected DIGIT : '0'..'9' ; protected XDIGIT : '0' .. '9' | 'a' .. 'f' | 'A' .. 'F' ; ARG_ACTION : '[' ( options { greedy=false; } : ARG_ACTION | NEWLINE | CHAR_LITERAL | STRING_LITERAL | . )* ']' ; ACTION : '{' ( options { greedy=false; } : NEWLINE | ACTION | CHAR_LITERAL | COMMENT | STRING_LITERAL | . )* '}' ; nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/antlr/tokdef.g000066400000000000000000000100331161462365500225370ustar00rootroot00000000000000/* ANTLR Translator Generator * Project led by Terence Parr at http://www.jGuru.com * Software rights: http://www.antlr.org/license.html * * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/tokdef.g#1 $ */ header { package antlr; } /** Simple lexer/parser for reading token definition files in support of the import/export vocab option for grammars. */ class ANTLRTokdefParser extends Parser; options { k=3; interactive=true; } { // This chunk of error reporting code provided by Brian Smith private antlr.Tool antlrTool; /** In order to make it so existing subclasses don't break, we won't require * that the antlr.Tool instance be passed as a constructor element. Instead, * the antlr.Tool instance should register itself via {@link #initTool(antlr.Tool)} * @throws IllegalStateException if a tool has already been registered * @since 2.7.2 */ public void setTool(antlr.Tool tool) { if (antlrTool == null) { antlrTool = tool; } else { throw new IllegalStateException("antlr.Tool already registered"); } } /** @since 2.7.2 */ protected antlr.Tool getTool() { return antlrTool; } /** Delegates the error message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportError(String s) { if (getTool() != null) { getTool().error(s, getFilename(), -1, -1); } else { super.reportError(s); } } /** Delegates the error message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportError(RecognitionException e) { if (getTool() != null) { getTool().error(e.getErrorMessage(), e.getFilename(), e.getLine(), e.getColumn()); } else { super.reportError(e); } } /** Delegates the warning message to the tool if any was registered via * {@link #initTool(antlr.Tool)} * @since 2.7.2 */ public void reportWarning(String s) { if (getTool() != null) { getTool().warning(s, getFilename(), -1, -1); } else { super.reportWarning(s); } } } file [ImportVocabTokenManager tm] : name:ID (line[tm])*; line [ImportVocabTokenManager tm] { Token t=null; Token s=null; } : ( s1:STRING {s = s1;} | lab:ID {t = lab;} ASSIGN s2:STRING {s = s2;} | id:ID {t=id;} LPAREN para:STRING RPAREN | id2:ID {t=id2;} ) ASSIGN i:INT { Integer value = Integer.valueOf(i.getText()); // if literal found, define as a string literal if ( s!=null ) { tm.define(s.getText(), value.intValue()); // if label, then label the string and map label to token symbol also if ( t!=null ) { StringLiteralSymbol sl = (StringLiteralSymbol) tm.getTokenSymbol(s.getText()); sl.setLabel(t.getText()); tm.mapToTokenSymbol(t.getText(), sl); } } // define token (not a literal) else if ( t!=null ) { tm.define(t.getText(), value.intValue()); if ( para!=null ) { TokenSymbol ts = tm.getTokenSymbol(t.getText()); ts.setParaphrase( para.getText() ); } } } ; class ANTLRTokdefLexer extends Lexer; options { k=2; testLiterals=false; interactive=true; charVocabulary='\003'..'\377'; } WS : ( ' ' | '\t' | '\r' ('\n')? {newline();} | '\n' {newline();} ) { _ttype = Token.SKIP; } ; SL_COMMENT : "//" (~('\n'|'\r'))* ('\n'|'\r'('\n')?) { _ttype = Token.SKIP; newline(); } ; ML_COMMENT : "/*" ( '\n' { newline(); } | '*' ~'/' | ~'*' )* "*/" { _ttype = Token.SKIP; } ; LPAREN : '(' ; RPAREN : ')' ; ASSIGN : '=' ; STRING : '"' (ESC|~'"')* '"' ; protected ESC : '\\' ( 'n' | 'r' | 't' | 'b' | 'f' | '"' | '\'' | '\\' | ('0'..'3') ( DIGIT (DIGIT)? )? | ('4'..'7') (DIGIT)? | 'u' XDIGIT XDIGIT XDIGIT XDIGIT ) ; protected DIGIT : '0'..'9' ; protected XDIGIT : '0' .. '9' | 'a' .. 'f' | 'A' .. 'F' ; ID : ('a'..'z'|'A'..'Z') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')* ; INT : (DIGIT)+ ; nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/config.log000066400000000000000000000740031161462365500217520ustar00rootroot00000000000000This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by antlr configure 2.7.5, which was generated by GNU Autoconf 2.59. Invocation command line was $ ./configure ## --------- ## ## Platform. ## ## --------- ## hostname = bambuntu uname -m = i686 uname -r = 2.6.10-5-k7 uname -s = Linux uname -v = #1 Tue Apr 5 12:56:05 UTC 2005 /usr/bin/uname -p = unknown /bin/uname -X = unknown /bin/arch = i686 /usr/bin/arch -k = unknown /usr/convex/getsysinfo = unknown hostinfo = unknown /bin/machine = unknown /usr/bin/oslevel = unknown /bin/universe = unknown PATH: /usr/local/bin PATH: /usr/local/sbin PATH: /sbin PATH: /usr/sbin PATH: /bin PATH: /usr/bin PATH: /usr/bin/X11 PATH: /usr/games PATH: /usr/local/mono/bin ## ----------- ## ## Core tests. ## ## ----------- ## configure:1974: checking build system type configure:1992: result: i686-pc-linux-gnu configure:2000: checking host system type configure:2014: result: i686-pc-linux-gnu configure:2027: checking whether this is Cygwin configure:2034: result: no configure:2039: checking whether this is MinGW configure:2046: result: no configure:2073: checking for cygpath configure:2106: result: no configure:2177: checking for make configure:2179: result: /usr/bin/make configure:2186: checking for gmake configure:2188: result: no configure:2186: checking for gnumake configure:2188: result: no configure:2156: checking for /bin/make configure:2163: result: no configure:2156: checking for /usr/bin/make configure:2160: result: yes configure:2219: checking whether /usr/bin/make is GNU make configure:2223: result: yes configure:2252: checking for make configure:2270: found /usr/bin/make configure:2282: result: /usr/bin/make configure:2357: checking for java configure:2359: result: /usr/bin/java configure:2366: checking for gij configure:2368: result: no configure:2451: checking for jikes configure:2453: result: no configure:2442: checking for javac configure:2444: result: /usr/bin/javac configure:2451: checking for gcj configure:2453: result: no configure:2527: checking for jar configure:2529: result: /usr/bin/jar configure:2761: checking for tlib configure:2763: result: no configure:2761: checking for lib configure:2763: result: no configure:2752: checking for ar configure:2754: result: /usr/bin/ar configure:2731: checking for /usr/bin/ar configure:2735: result: yes configure:2861: checking for g++ configure:2877: found /usr/bin/g++ configure:2887: result: g++ configure:2903: checking for C++ compiler version configure:2906: g++ --version &5 g++ (GCC) 3.3.5 (Debian 1:3.3.5-8ubuntu2) Copyright (C) 2003 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. configure:2909: $? = 0 configure:2911: g++ -v &5 Reading specs from /usr/lib/gcc-lib/i486-linux/3.3.5/specs Configured with: ../src/configure -v --enable-languages=c,c++,java,f77,pascal,objc,ada,treelang --prefix=/usr --mandir=/usr/share/man --infodir=/usr/share/info --with-gxx-include-dir=/usr/include/c++/3.3 --enable-shared --with-system-zlib --enable-nls --without-included-gettext --enable-__cxa_atexit --enable-clocale=gnu --enable-debug --enable-java-gc=boehm --enable-java-awt=xlib --enable-objc-gc i486-linux Thread model: posix gcc version 3.3.5 (Debian 1:3.3.5-8ubuntu2) configure:2914: $? = 0 configure:2916: g++ -V &5 g++: `-V' option must have argument configure:2919: $? = 1 configure:2942: checking for C++ compiler default output file name configure:2945: g++ conftest.cc >&5 configure:2948: $? = 0 configure:2994: result: a.out configure:2999: checking whether the C++ compiler works configure:3005: ./a.out configure:3008: $? = 0 configure:3025: result: yes configure:3032: checking whether we are cross compiling configure:3034: result: no configure:3037: checking for suffix of executables configure:3039: g++ -o conftest conftest.cc >&5 configure:3042: $? = 0 configure:3067: result: configure:3073: checking for suffix of object files configure:3094: g++ -c conftest.cc >&5 configure:3097: $? = 0 configure:3119: result: o configure:3123: checking whether we are using the GNU C++ compiler configure:3147: g++ -c conftest.cc >&5 configure:3153: $? = 0 configure:3157: test -z || test ! -s conftest.err configure:3160: $? = 0 configure:3163: test -s conftest.o configure:3166: $? = 0 configure:3179: result: yes configure:3185: checking whether g++ accepts -g configure:3206: g++ -c -g conftest.cc >&5 configure:3212: $? = 0 configure:3216: test -z || test ! -s conftest.err configure:3219: $? = 0 configure:3222: test -s conftest.o configure:3225: $? = 0 configure:3236: result: yes configure:3278: g++ -c -g -O2 conftest.cc >&5 configure:3284: $? = 0 configure:3288: test -z || test ! -s conftest.err configure:3291: $? = 0 configure:3294: test -s conftest.o configure:3297: $? = 0 configure:3323: g++ -c -g -O2 conftest.cc >&5 conftest.cc: In function `int main()': conftest.cc:13: error: `exit' undeclared (first use this function) conftest.cc:13: error: (Each undeclared identifier is reported only once for each function it appears in.) configure:3329: $? = 1 configure: failed program was: | /* confdefs.h. */ | | #define PACKAGE_NAME "antlr" | #define PACKAGE_TARNAME "antlr" | #define PACKAGE_VERSION "2.7.5" | #define PACKAGE_STRING "antlr 2.7.5" | #define PACKAGE_BUGREPORT "" | /* end confdefs.h. */ | | int | main () | { | exit (42); | ; | return 0; | } configure:3278: g++ -c -g -O2 conftest.cc >&5 configure:3284: $? = 0 configure:3288: test -z || test ! -s conftest.err configure:3291: $? = 0 configure:3294: test -s conftest.o configure:3297: $? = 0 configure:3323: g++ -c -g -O2 conftest.cc >&5 configure:3329: $? = 0 configure:3333: test -z || test ! -s conftest.err configure:3336: $? = 0 configure:3339: test -s conftest.o configure:3342: $? = 0 configure:3417: checking for cc configure:3433: found /usr/bin/cc configure:3443: result: cc configure:3464: checking for C compiler version configure:3467: cc --version &5 cc (GCC) 3.3.5 (Debian 1:3.3.5-8ubuntu2) Copyright (C) 2003 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. configure:3470: $? = 0 configure:3472: cc -v &5 Reading specs from /usr/lib/gcc-lib/i486-linux/3.3.5/specs Configured with: ../src/configure -v --enable-languages=c,c++,java,f77,pascal,objc,ada,treelang --prefix=/usr --mandir=/usr/share/man --infodir=/usr/share/info --with-gxx-include-dir=/usr/include/c++/3.3 --enable-shared --with-system-zlib --enable-nls --without-included-gettext --enable-__cxa_atexit --enable-clocale=gnu --enable-debug --enable-java-gc=boehm --enable-java-awt=xlib --enable-objc-gc i486-linux Thread model: posix gcc version 3.3.5 (Debian 1:3.3.5-8ubuntu2) configure:3475: $? = 0 configure:3477: cc -V &5 cc: `-V' option must have argument configure:3480: $? = 1 configure:3483: checking whether we are using the GNU C compiler configure:3507: cc -c conftest.c >&5 configure:3513: $? = 0 configure:3517: test -z || test ! -s conftest.err configure:3520: $? = 0 configure:3523: test -s conftest.o configure:3526: $? = 0 configure:3539: result: yes configure:3545: checking whether cc accepts -g configure:3566: cc -c -g conftest.c >&5 configure:3572: $? = 0 configure:3576: test -z || test ! -s conftest.err configure:3579: $? = 0 configure:3582: test -s conftest.o configure:3585: $? = 0 configure:3596: result: yes configure:3613: checking for cc option to accept ANSI C configure:3683: cc -c -g -O2 conftest.c >&5 configure:3689: $? = 0 configure:3693: test -z || test ! -s conftest.err configure:3696: $? = 0 configure:3699: test -s conftest.o configure:3702: $? = 0 configure:3720: result: none needed configure:3738: cc -c -g -O2 conftest.c >&5 conftest.c:2: error: syntax error before "me" configure:3744: $? = 1 configure: failed program was: | #ifndef __cplusplus | choke me | #endif configure:4042: checking how to run the C preprocessor configure:4077: cc -E conftest.c configure:4083: $? = 0 configure:4115: cc -E conftest.c conftest.c:12:28: ac_nonexistent.h: No such file or directory configure:4121: $? = 1 configure: failed program was: | /* confdefs.h. */ | | #define PACKAGE_NAME "antlr" | #define PACKAGE_TARNAME "antlr" | #define PACKAGE_VERSION "2.7.5" | #define PACKAGE_STRING "antlr 2.7.5" | #define PACKAGE_BUGREPORT "" | #ifdef __cplusplus | extern "C" void std::exit (int) throw (); using std::exit; | #endif | /* end confdefs.h. */ | #include configure:4160: result: cc -E configure:4184: cc -E conftest.c configure:4190: $? = 0 configure:4222: cc -E conftest.c conftest.c:12:28: ac_nonexistent.h: No such file or directory configure:4228: $? = 1 configure: failed program was: | /* confdefs.h. */ | | #define PACKAGE_NAME "antlr" | #define PACKAGE_TARNAME "antlr" | #define PACKAGE_VERSION "2.7.5" | #define PACKAGE_STRING "antlr 2.7.5" | #define PACKAGE_BUGREPORT "" | #ifdef __cplusplus | extern "C" void std::exit (int) throw (); using std::exit; | #endif | /* end confdefs.h. */ | #include configure:4272: checking for egrep configure:4282: result: grep -E configure:4287: checking for ANSI C header files configure:4312: cc -c -g -O2 conftest.c >&5 configure:4318: $? = 0 configure:4322: test -z || test ! -s conftest.err configure:4325: $? = 0 configure:4328: test -s conftest.o configure:4331: $? = 0 configure:4420: cc -o conftest -g -O2 conftest.c >&5 configure:4423: $? = 0 configure:4425: ./conftest configure:4428: $? = 0 configure:4443: result: yes configure:4467: checking for sys/types.h configure:4483: cc -c -g -O2 conftest.c >&5 configure:4489: $? = 0 configure:4493: test -z || test ! -s conftest.err configure:4496: $? = 0 configure:4499: test -s conftest.o configure:4502: $? = 0 configure:4513: result: yes configure:4467: checking for sys/stat.h configure:4483: cc -c -g -O2 conftest.c >&5 configure:4489: $? = 0 configure:4493: test -z || test ! -s conftest.err configure:4496: $? = 0 configure:4499: test -s conftest.o configure:4502: $? = 0 configure:4513: result: yes configure:4467: checking for stdlib.h configure:4483: cc -c -g -O2 conftest.c >&5 configure:4489: $? = 0 configure:4493: test -z || test ! -s conftest.err configure:4496: $? = 0 configure:4499: test -s conftest.o configure:4502: $? = 0 configure:4513: result: yes configure:4467: checking for string.h configure:4483: cc -c -g -O2 conftest.c >&5 configure:4489: $? = 0 configure:4493: test -z || test ! -s conftest.err configure:4496: $? = 0 configure:4499: test -s conftest.o configure:4502: $? = 0 configure:4513: result: yes configure:4467: checking for memory.h configure:4483: cc -c -g -O2 conftest.c >&5 configure:4489: $? = 0 configure:4493: test -z || test ! -s conftest.err configure:4496: $? = 0 configure:4499: test -s conftest.o configure:4502: $? = 0 configure:4513: result: yes configure:4467: checking for strings.h configure:4483: cc -c -g -O2 conftest.c >&5 configure:4489: $? = 0 configure:4493: test -z || test ! -s conftest.err configure:4496: $? = 0 configure:4499: test -s conftest.o configure:4502: $? = 0 configure:4513: result: yes configure:4467: checking for inttypes.h configure:4483: cc -c -g -O2 conftest.c >&5 configure:4489: $? = 0 configure:4493: test -z || test ! -s conftest.err configure:4496: $? = 0 configure:4499: test -s conftest.o configure:4502: $? = 0 configure:4513: result: yes configure:4467: checking for stdint.h configure:4483: cc -c -g -O2 conftest.c >&5 configure:4489: $? = 0 configure:4493: test -z || test ! -s conftest.err configure:4496: $? = 0 configure:4499: test -s conftest.o configure:4502: $? = 0 configure:4513: result: yes configure:4467: checking for unistd.h configure:4483: cc -c -g -O2 conftest.c >&5 configure:4489: $? = 0 configure:4493: test -z || test ! -s conftest.err configure:4496: $? = 0 configure:4499: test -s conftest.o configure:4502: $? = 0 configure:4513: result: yes configure:4531: checking for stdlib.h configure:4536: result: yes configure:4531: checking for unistd.h configure:4536: result: yes configure:4678: checking for an ANSI C-conforming const configure:4745: cc -c -g -O2 conftest.c >&5 configure:4751: $? = 0 configure:4755: test -z || test ! -s conftest.err configure:4758: $? = 0 configure:4761: test -s conftest.o configure:4764: $? = 0 configure:4775: result: yes configure:4785: checking for inline configure:4806: cc -c -g -O2 conftest.c >&5 configure:4812: $? = 0 configure:4816: test -z || test ! -s conftest.err configure:4819: $? = 0 configure:4822: test -s conftest.o configure:4825: $? = 0 configure:4837: result: inline configure:4863: checking for stdlib.h configure:4868: result: yes configure:5008: checking for GNU libc compatible malloc configure:5037: cc -o conftest -g -O2 conftest.c >&5 configure:5040: $? = 0 configure:5042: ./conftest configure:5045: $? = 0 configure:5059: result: yes configure:5093: checking for strcasecmp configure:5150: cc -o conftest -g -O2 conftest.c >&5 configure:5156: $? = 0 configure:5160: test -z || test ! -s conftest.err configure:5163: $? = 0 configure:5166: test -s conftest configure:5169: $? = 0 configure:5181: result: yes configure:5200: checking for doxygen configure:5231: result: doxygen configure:5255: checking for a BSD-compatible install configure:5310: result: /usr/bin/install -c configure:5362: checking for ranlib configure:5378: found /usr/bin/ranlib configure:5389: result: ranlib configure:5404: checking for mkdir configure:5422: found /bin/mkdir configure:5435: result: /bin/mkdir configure:5444: checking for rm configure:5462: found /bin/rm configure:5475: result: /bin/rm configure:5527: checking for gnutar configure:5529: result: no configure:5518: checking for tar configure:5520: result: /bin/tar configure:5568: checking for /bin/touch configure:5572: result: yes configure:5568: checking for /usr/bin/touch configure:5572: result: yes configure:5589: checking for touch configure:5591: result: /bin/touch configure:5589: checking for touch configure:5591: result: /usr/bin/touch configure:5627: checking for chmod configure:5645: found /bin/chmod configure:5658: result: /bin/chmod configure:5667: checking for sed configure:5685: found /bin/sed configure:5698: result: /bin/sed configure:5707: checking for cat configure:5725: found /bin/cat configure:5738: result: /bin/cat configure:5747: checking for grep configure:5765: found /bin/grep configure:5778: result: /bin/grep configure:5832: checking for python configure:5834: result: /usr/bin/python configure:5960: checking for cscc configure:5962: result: no configure:5951: checking for mcs configure:5953: result: /usr/local/mono/bin/mcs configure:5960: checking for csc configure:5962: result: no configure:5930: checking for /usr/local/bin/cscc configure:5937: result: no configure:5930: checking for /usr/local/bin/mcs configure:5937: result: no configure:5930: checking for /opt/bin/cscc configure:5937: result: no configure:5930: checking for /opt/bin/mcs configure:5937: result: no configure:6112: checking for /usr/local/mono/bin/mono configure:6116: result: yes configure:6133: checking for mono configure:6135: result: /usr/local/mono/bin/mono configure:6112: checking for /usr/local/bin/mono configure:6119: result: no configure:6112: checking for /opt/bin/mono configure:6119: result: no configure:6643: creating ./config.status ## ---------------------- ## ## Running config.status. ## ## ---------------------- ## This file was extended by antlr config.status 2.7.5, which was generated by GNU Autoconf 2.59. Invocation command line was CONFIG_FILES = CONFIG_HEADERS = CONFIG_LINKS = CONFIG_COMMANDS = $ ./config.status on bambuntu config.status:888: creating scripts/config.vars config.status:888: creating scripts/config.deps config.status:888: creating scripts/config.make config.status:888: creating antlr/Version.java config.status:888: creating scripts/python.sh config.status:888: creating scripts/pyantlr.sh config.status:888: creating lib/python/Makefile config.status:888: creating examples/python/Makefile config.status:888: creating examples/python/asn1/Makefile config.status:888: creating examples/python/ASTsupport/Makefile config.status:888: creating examples/python/calc/Makefile config.status:888: creating examples/python/columns/Makefile config.status:888: creating examples/python/exprAST/Makefile config.status:888: creating examples/python/filter/Makefile config.status:888: creating examples/python/filterWithRule/Makefile config.status:888: creating examples/python/heteroAST/Makefile config.status:888: creating examples/python/HTML/Makefile config.status:888: creating examples/python/IDL/Makefile config.status:888: creating examples/python/imagNodeAST/Makefile config.status:888: creating examples/python/includeFile/Makefile config.status:888: creating examples/python/inherit.tinyc/Makefile config.status:888: creating examples/python/java/Makefile config.status:888: creating examples/python/lexerTester/Makefile config.status:888: creating examples/python/lexRewrite/Makefile config.status:888: creating examples/python/linkChecker/Makefile config.status:888: creating examples/python/multiLexer/Makefile config.status:888: creating examples/python/multiParser/Makefile config.status:888: creating examples/python/parseBinary/Makefile config.status:888: creating examples/python/pascal/Makefile config.status:888: creating examples/python/cpp/Makefile config.status:888: creating examples/python/preserveWhiteSpace/Makefile config.status:888: creating examples/python/tinybasic/Makefile config.status:888: creating examples/python/tinyc/Makefile config.status:888: creating examples/python/transform/Makefile config.status:888: creating examples/python/treewalk/Makefile config.status:888: creating examples/python/unicode/Makefile config.status:888: creating examples/python/unicode.IDENTs/Makefile config.status:888: creating examples/python/xml/Makefile config.status:888: creating scripts/csc.sh config.status:888: creating lib/csharp/Makefile config.status:888: creating lib/csharp/src/Makefile config.status:888: creating lib/csharp/ASTFrame/Makefile config.status:888: creating examples/csharp/ASTsupport/Makefile config.status:888: creating examples/csharp/HTML/Makefile config.status:888: creating examples/csharp/IDL/Makefile config.status:888: creating examples/csharp/ParseTreeDebug/Makefile config.status:888: creating examples/csharp/TokenStreamRewrite/Makefile config.status:888: creating examples/csharp/calc/Makefile config.status:888: creating examples/csharp/columns/Makefile config.status:888: creating examples/csharp/exprAST/Makefile config.status:888: creating examples/csharp/filter/Makefile config.status:888: creating examples/csharp/filterWithRule/Makefile config.status:888: creating examples/csharp/heteroAST/Makefile config.status:888: creating examples/csharp/java/Makefile config.status:888: creating examples/csharp/multiLexer/Makefile config.status:888: creating examples/csharp/parseBinary/Makefile config.status:888: creating examples/csharp/preserveWhiteSpace/Makefile config.status:888: creating examples/csharp/tinyc/Makefile config.status:888: creating examples/csharp/unicode/Makefile config.status:888: creating examples/csharp/Makefile config.status:888: creating scripts/java.sh config.status:888: creating scripts/jar.sh config.status:888: creating scripts/javac.sh config.status:888: creating scripts/antlr.sh config.status:888: creating scripts/cxx.sh config.status:888: creating scripts/link.sh config.status:888: creating scripts/c.sh config.status:888: creating scripts/lib.sh config.status:888: creating scripts/cpp.sh config.status:888: creating lib/cpp/Makefile config.status:888: creating lib/cpp/antlr/Makefile config.status:888: creating lib/cpp/src/Makefile config.status:888: creating examples/cpp/Makefile config.status:888: creating examples/cpp/ASTsupport/Makefile config.status:888: creating examples/cpp/calc/Makefile config.status:888: creating examples/cpp/exprAST/Makefile config.status:888: creating examples/cpp/filter/Makefile config.status:888: creating examples/cpp/filterWithRule/Makefile config.status:888: creating examples/cpp/flexLexer/Makefile config.status:888: creating examples/cpp/HTML/Makefile config.status:888: creating examples/cpp/IDL/Makefile config.status:888: creating examples/cpp/imagNodeAST/Makefile config.status:888: creating examples/cpp/includeFile/Makefile config.status:888: creating examples/cpp/inherit.tinyc/Makefile config.status:888: creating examples/cpp/java/Makefile config.status:888: creating examples/cpp/lexRewrite/Makefile config.status:888: creating examples/cpp/multiLexer/Makefile config.status:888: creating examples/cpp/multiParser/Makefile config.status:888: creating examples/cpp/parseBinary/Makefile config.status:888: creating examples/cpp/preserveWhiteSpace/Makefile config.status:888: creating examples/cpp/tinyc/Makefile config.status:888: creating examples/cpp/tokenStreamRewrite/Makefile config.status:888: creating examples/cpp/transform/Makefile config.status:888: creating examples/cpp/treewalk/Makefile config.status:888: creating examples/cpp/unicode/Makefile config.status:888: creating lib/Makefile config.status:888: creating examples/Makefile config.status:888: creating doc/Makefile config.status:888: creating Makefile config.status:888: creating scripts/antlr-config config.status:888: creating scripts/run-antlr config.status:888: creating scripts/antlr.spec config.status:888: creating antlr/Makefile config.status:888: creating examples/java/ASTsupport/Makefile config.status:888: creating examples/java/HTML/Makefile config.status:888: creating examples/java/IDL/Makefile config.status:888: creating examples/java/calc/Makefile config.status:888: creating examples/java/columns/Makefile config.status:888: creating examples/java/exprAST/Makefile config.status:888: creating examples/java/filter/Makefile config.status:888: creating examples/java/filterWithRule/Makefile config.status:888: creating examples/java/heteroAST/Makefile config.status:888: creating examples/java/imagNodeAST/Makefile config.status:888: creating examples/java/includeFile/Makefile config.status:888: creating examples/java/inherit.tinyc/Makefile config.status:888: creating examples/java/java/Makefile config.status:888: creating examples/java/lexRewrite/Makefile config.status:888: creating examples/java/linkChecker/Makefile config.status:888: creating examples/java/multiLexer/Makefile config.status:888: creating examples/java/parseBinary/Makefile config.status:888: creating examples/java/pascal/Makefile config.status:888: creating examples/java/preserveWhiteSpace/Makefile config.status:888: creating examples/java/tinybasic/Makefile config.status:888: creating examples/java/tinyc/Makefile config.status:888: creating examples/java/transform/Makefile config.status:888: creating examples/java/treewalk/Makefile config.status:888: creating examples/java/unicode.IDENTs/Makefile config.status:888: creating examples/java/unicode/Makefile config.status:888: creating examples/java/xml/Makefile config.status:888: creating examples/java/Makefile config.status:888: creating examples/java/cpp/Makefile ## ---------------- ## ## Cache variables. ## ## ---------------- ## ac_cv_build=i686-pc-linux-gnu ac_cv_build_alias=i686-pc-linux-gnu ac_cv_c_compiler_gnu=yes ac_cv_c_const=yes ac_cv_c_inline=inline ac_cv_cxx_compiler_gnu=yes ac_cv_env_ANTLRFLAGS_set= ac_cv_env_ANTLRFLAGS_value= ac_cv_env_CC_set= ac_cv_env_CC_value= ac_cv_env_CFLAGS_set= ac_cv_env_CFLAGS_value= ac_cv_env_CPPFLAGS_set= ac_cv_env_CPPFLAGS_value= ac_cv_env_CPP_set= ac_cv_env_CPP_value= ac_cv_env_CSHARPCFLAGS_set= ac_cv_env_CSHARPCFLAGS_value= ac_cv_env_CSHARPC_set= ac_cv_env_CSHARPC_value= ac_cv_env_CXXFLAGS_set= ac_cv_env_CXXFLAGS_value= ac_cv_env_CXX_set= ac_cv_env_CXX_value= ac_cv_env_JAVACFLAGS_set= ac_cv_env_JAVACFLAGS_value= ac_cv_env_JAVAC_set= ac_cv_env_JAVAC_value= ac_cv_env_JAVAFLAGS_set= ac_cv_env_JAVAFLAGS_value= ac_cv_env_JAVA_set= ac_cv_env_JAVA_value= ac_cv_env_LDFLAGS_set= ac_cv_env_LDFLAGS_value= ac_cv_env_MAKE_set= ac_cv_env_MAKE_value= ac_cv_env_PYTHONFLAGS_set= ac_cv_env_PYTHONFLAGS_value= ac_cv_env_PYTHON_set= ac_cv_env_PYTHON_value= ac_cv_env_build_alias_set= ac_cv_env_build_alias_value= ac_cv_env_host_alias_set= ac_cv_env_host_alias_value= ac_cv_env_target_alias_set= ac_cv_env_target_alias_value= ac_cv_exeext= ac_cv_func_malloc_0_nonnull=yes ac_cv_func_strcasecmp=yes ac_cv_header_inttypes_h=yes ac_cv_header_memory_h=yes ac_cv_header_stdc=yes ac_cv_header_stdint_h=yes ac_cv_header_stdlib_h=yes ac_cv_header_string_h=yes ac_cv_header_strings_h=yes ac_cv_header_sys_stat_h=yes ac_cv_header_sys_types_h=yes ac_cv_header_unistd_h=yes ac_cv_host=i686-pc-linux-gnu ac_cv_host_alias=i686-pc-linux-gnu ac_cv_objext=o ac_cv_path_CAT=/bin/cat ac_cv_path_CHMOD=/bin/chmod ac_cv_path_DOXYGEN=doxygen ac_cv_path_GREP=/bin/grep ac_cv_path_MKDIR=/bin/mkdir ac_cv_path_RM=/bin/rm ac_cv_path_SED=/bin/sed ac_cv_path_install='/usr/bin/install -c' ac_cv_path_just_make=/usr/bin/make ac_cv_prog_CPP='cc -E' ac_cv_prog_ac_ct_CC=cc ac_cv_prog_ac_ct_CXX=g++ ac_cv_prog_ac_ct_RANLIB=ranlib ac_cv_prog_cc_g=yes ac_cv_prog_cc_stdc= ac_cv_prog_cxx_g=yes ac_cv_prog_egrep='grep -E' ## ----------------- ## ## Output variables. ## ## ----------------- ## ANTLR='' ANTLRFLAGS='' ANTLR_ACTION_FILES=' actions/cpp/ActionLexer.java actions/cpp/ActionLexerTokenTypes.java actions/csharp/ActionLexer.java actions/csharp/ActionLexerTokenTypes.java actions/java/ActionLexer.java actions/java/ActionLexerTokenTypes.java actions/python/ActionLexer.java actions/python/ActionLexerTokenTypes.java actions/python/CodeLexer.java actions/python/CodeLexerTokenTypes.java' ANTLR_ANTLR_FILES=' ANTLRParser.java ANTLRTokenTypes.java ANTLRLexer.java' ANTLR_COMPILE_CMD='/bin/sh /home/rodrigob/java/antlr-2.7.5/scripts/antlr.sh' ANTLR_CYGWIN='no' ANTLR_JAR='/home/rodrigob/java/antlr-2.7.5/antlr/antlr.jar' ANTLR_LIB='/home/rodrigob/java/antlr-2.7.5/lib/cpp/src/libantlr.a' ANTLR_MINGW='no' ANTLR_NET='/home/rodrigob/java/antlr-2.7.5/lib/antlr.runtime.dll' ANTLR_PY='/home/rodrigob/java/antlr-2.7.5/lib/python/antlr/python.py' ANTLR_TOKDEF_FILES=' ANTLRTokdefParser.java ANTLRTokdefLexer.java ANTLRTokdefParserTokenTypes.java' ANTLR_WIN32='' ANTLR_WITH_ANTLR_CMD='' ANTLR_WITH_ANTLR_JAR='' AR='/usr/bin/ar' ARFLAGS='' AS='' ASTFRAME_NET='/home/rodrigob/java/antlr-2.7.5/lib/antlr.astframe.dll' BOOTCLASSPATH='' CAT='/bin/cat' CC='cc' CFLAGS='-g -O2' CHMOD='/bin/chmod' CLR='/usr/local/mono/bin/mono' CPP='cc -E' CPPFLAGS='' CSHARPC='/usr/local/mono/bin/mcs' CSHARPCFLAGS='' CSHARP_COMPILE_CMD='/bin/sh /home/rodrigob/java/antlr-2.7.5/scripts/csc.sh' CXX='g++' CXXFLAGS='' CXX_COMPILE_CMD='/bin/sh /home/rodrigob/java/antlr-2.7.5/scripts/cxx.sh' CXX_LIB_CMD='/bin/sh /home/rodrigob/java/antlr-2.7.5/scripts/lib.sh' CXX_LINK_CMD='/bin/sh /home/rodrigob/java/antlr-2.7.5/scripts/link.sh' CYGPATH='' CYGPATH_M='echo' CYGPATH_W='echo' C_COMPILE_CMD='/bin/sh /home/rodrigob/java/antlr-2.7.5/scripts/c.sh' DEBUG='0' DEFS='-DPACKAGE_NAME=\"antlr\" -DPACKAGE_TARNAME=\"antlr\" -DPACKAGE_VERSION=\"2.7.5\" -DPACKAGE_STRING=\"antlr\ 2.7.5\" -DPACKAGE_BUGREPORT=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DHAVE_MALLOC=1 -DHAVE_STRCASECMP=1 ' DOXYGEN='doxygen' ECHO_C='' ECHO_N='-n' ECHO_T='' EGREP='grep -E' EXEEXT='' GREP='/bin/grep' INSTALL_DATA='${INSTALL} -m 644' INSTALL_PROGRAM='${INSTALL}' INSTALL_SCRIPT='${INSTALL}' JAR='/usr/bin/jar' JARFLAGS='' JAR_CMD='/bin/sh /home/rodrigob/java/antlr-2.7.5/scripts/jar.sh' JAVA='/usr/bin/java' JAVAC='/usr/bin/javac' JAVACFLAGS='' JAVAFLAGS='' JAVA_CMD='/bin/sh /home/rodrigob/java/antlr-2.7.5/scripts/java.sh' JAVA_COMPILE_CMD='/bin/sh /home/rodrigob/java/antlr-2.7.5/scripts/javac.sh' LDFLAGS='' LIBEXT='.a' LIBOBJS='' LIBS='' LTLIBOBJS='' MAKE='/usr/bin/make' MKDIR='/bin/mkdir' OBJEXT='.o' PACKAGE_BUGREPORT='' PACKAGE_NAME='antlr' PACKAGE_STRING='antlr 2.7.5' PACKAGE_TARNAME='antlr' PACKAGE_VERSION='2.7.5' PATCHLEVEL='5' PATH_SEPARATOR=':' PYTHON='/usr/bin/python' PYTHONFLAGS='' RANLIB='ranlib' RM='/bin/rm' SED='/bin/sed' SHELL='/bin/sh' SUBVERSION='7' TAR='/bin/tar' TIMESTAMP='20050517' TOUCH='/bin/touch' VERBOSE='0' VERSION='2' WITH_EXAMPLES='1' abs_this_builddir='/home/rodrigob/java/antlr-2.7.5' ac_ct_CC='cc' ac_ct_CXX='g++' ac_ct_RANLIB='ranlib' antlr_jar='antlr.jar' antlr_lib='libantlr.a' antlr_net='antlr.runtime.dll' antlr_py='python.py' astframe_net='antlr.astframe.dll' bindir='${exec_prefix}/bin' build='i686-pc-linux-gnu' build_alias='' build_cpu='i686' build_os='linux-gnu' build_vendor='pc' cxx='gcc' datadir='${prefix}/share' exec_prefix='${prefix}' host='i686-pc-linux-gnu' host_alias='' host_cpu='i686' host_os='linux-gnu' host_vendor='pc' includedir='${prefix}/include' infodir='${prefix}/info' jar='jar' java='' javac='javac' just_make='/usr/bin/make' libdir='${exec_prefix}/lib' libexecdir='${exec_prefix}/libexec' localstatedir='${prefix}/var' mandir='${prefix}/man' oldincludedir='/usr/include' prefix='/usr/local' program_transform_name='s,x,x,' sbindir='${exec_prefix}/sbin' sharedstatedir='${prefix}/com' sysconfdir='${prefix}/etc' target_alias='' ## ------------- ## ## Output files. ## ## ------------- ## stddeps='scripts/config.deps' stdmake='scripts/config.make' stdvars='scripts/config.vars' ## ----------- ## ## confdefs.h. ## ## ----------- ## #define HAVE_INTTYPES_H 1 #define HAVE_MALLOC 1 #define HAVE_MEMORY_H 1 #define HAVE_STDINT_H 1 #define HAVE_STDLIB_H 1 #define HAVE_STDLIB_H 1 #define HAVE_STDLIB_H 1 #define HAVE_STRCASECMP 1 #define HAVE_STRINGS_H 1 #define HAVE_STRING_H 1 #define HAVE_SYS_STAT_H 1 #define HAVE_SYS_TYPES_H 1 #define HAVE_UNISTD_H 1 #define HAVE_UNISTD_H 1 #define PACKAGE_BUGREPORT "" #define PACKAGE_NAME "antlr" #define PACKAGE_STRING "antlr 2.7.5" #define PACKAGE_TARNAME "antlr" #define PACKAGE_VERSION "2.7.5" #define STDC_HEADERS 1 #endif #ifdef __cplusplus extern "C" void std::exit (int) throw (); using std::exit; configure: exit 0 nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/configure000077500000000000000000007251031161462365500217150ustar00rootroot00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.59 for antlr 2.7.5. # # Copyright (C) 2003 Free Software Foundation, Inc. # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## --------------------- ## ## M4sh Initialization. ## ## --------------------- ## # Be Bourne compatible if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then set -o posix fi DUALCASE=1; export DUALCASE # for MKS sh # Support unset when possible. if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false fi # Work around bugs in pre-3.0 UWIN ksh. $as_unset ENV MAIL MAILPATH PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. for as_var in \ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ LC_TELEPHONE LC_TIME do if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then eval $as_var=C; export $as_var else $as_unset $as_var fi done # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1; then as_expr=expr else as_expr=false fi if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi # Name of the executable. as_me=`$as_basename "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)$' \| \ . : '\(.\)' 2>/dev/null || echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; } /^X\/\(\/\/\)$/{ s//\1/; q; } /^X\/\(\/\).*/{ s//\1/; q; } s/.*/./; q'` # PATH needs CR, and LINENO needs CR and PATH. # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi as_lineno_1=$LINENO as_lineno_2=$LINENO as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` test "x$as_lineno_1" != "x$as_lineno_2" && test "x$as_lineno_3" = "x$as_lineno_2" || { # Find who we are. Look in the path if we contain no path at all # relative or not. case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then { echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2 { (exit 1); exit 1; }; } fi case $CONFIG_SHELL in '') as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for as_base in sh bash ksh sh5; do case $as_dir in /*) if ("$as_dir/$as_base" -c ' as_lineno_1=$LINENO as_lineno_2=$LINENO as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` test "x$as_lineno_1" != "x$as_lineno_2" && test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; } $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; } CONFIG_SHELL=$as_dir/$as_base export CONFIG_SHELL exec "$CONFIG_SHELL" "$0" ${1+"$@"} fi;; esac done done ;; esac # Create $as_me.lineno as a copy of $as_myself, but with $LINENO # uniformly replaced by the line number. The first 'sed' inserts a # line-number line before each line; the second 'sed' does the real # work. The second script uses 'N' to pair each line-number line # with the numbered line, and appends trailing '-' during # substitution so that $LINENO is not a special case at line end. # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-) sed '=' <$as_myself | sed ' N s,$,-, : loop s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3, t loop s,-$,, s,^['$as_cr_digits']*\n,, ' >$as_me.lineno && chmod +x $as_me.lineno || { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensible to this). . ./$as_me.lineno # Exit status is that of the last command. exit } case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in *c*,-n*) ECHO_N= ECHO_C=' ' ECHO_T=' ' ;; *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;; *) ECHO_N= ECHO_C='\c' ECHO_T= ;; esac if expr a : '\(a\)' >/dev/null 2>&1; then as_expr=expr else as_expr=false fi rm -f conf$$ conf$$.exe conf$$.file echo >conf$$.file if ln -s conf$$.file conf$$ 2>/dev/null; then # We could just check for DJGPP; but this test a) works b) is more generic # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04). if test -f conf$$.exe; then # Don't use ln at all; we don't have any links as_ln_s='cp -p' else as_ln_s='ln -s' fi elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.file if mkdir -p . 2>/dev/null; then as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_executable_p="test -f" # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" # IFS # We need space, tab and new line, in precisely that order. as_nl=' ' IFS=" $as_nl" # CDPATH. $as_unset CDPATH # Name of the host. # hostname on some systems (SVR3.2, Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` exec 6>&1 # # Initializations. # ac_default_prefix=/usr/local ac_config_libobj_dir=. cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= SHELL=${CONFIG_SHELL-/bin/sh} # Maximum number of lines to put in a shell here document. # This variable seems obsolete. It should probably be removed, and # only ac_max_sed_lines should be used. : ${ac_max_here_lines=38} # Identity of this package. PACKAGE_NAME='antlr' PACKAGE_TARNAME='antlr' PACKAGE_VERSION='2.7.5' PACKAGE_STRING='antlr 2.7.5' PACKAGE_BUGREPORT='' ac_unique_file="LICENSE.txt" # Factoring default headers for most tests. ac_includes_default="\ #include #if HAVE_SYS_TYPES_H # include #endif #if HAVE_SYS_STAT_H # include #endif #if STDC_HEADERS # include # include #else # if HAVE_STDLIB_H # include # endif #endif #if HAVE_STRING_H # if !STDC_HEADERS && HAVE_MEMORY_H # include # endif # include #endif #if HAVE_STRINGS_H # include #endif #if HAVE_INTTYPES_H # include #else # if HAVE_STDINT_H # include # endif #endif #if HAVE_UNISTD_H # include #endif" ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS ANTLR_JAR ANTLR_LIB ANTLR_NET ANTLR_PY ASTFRAME_NET antlr_jar antlr_lib antlr_net antlr_py astframe_net ANTLRFLAGS ANTLR ANTLR_ACTION_FILES ANTLR_ANTLR_FILES ANTLR_COMPILE_CMD ANTLR_CYGWIN ANTLR_MINGW ANTLR_TOKDEF_FILES ANTLR_WIN32 ANTLR_WITH_ANTLR_CMD ANTLR_WITH_ANTLR_JAR ARFLAGS AR AS BOOTCLASSPATH CSHARPCFLAGS CSHARPC CSHARP_COMPILE_CMD CLR CXX_COMPILE_CMD CXX_LIB_CMD CXX_LINK_CMD CYGPATH C_COMPILE_CMD DEBUG EXEEXT JARFLAGS JAR JAR_CMD JAVACFLAGS JAVAC JAVAFLAGS JAVA JAVA_CMD JAVA_COMPILE_CMD LIBEXT MAKE OBJEXT PATCHLEVEL PYTHONFLAGS PYTHON SUBVERSION TIMESTAMP TOUCH VERBOSE VERSION WITH_EXAMPLES abs_this_builddir cxx jar java javac TAR build build_cpu build_vendor build_os host host_cpu host_vendor host_os CYGPATH_M CYGPATH_W just_make CXX CXXFLAGS LDFLAGS CPPFLAGS ac_ct_CXX CC CFLAGS ac_ct_CC CPP EGREP LIBOBJS DOXYGEN INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA RANLIB ac_ct_RANLIB MKDIR RM CHMOD SED CAT GREP LTLIBOBJS' ac_subst_files='stdvars stddeps stdmake' # Initialize some variables set by options. ac_init_help= ac_init_version=false # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datadir='${prefix}/share' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' libdir='${exec_prefix}/lib' includedir='${prefix}/include' oldincludedir='/usr/include' infodir='${prefix}/info' mandir='${prefix}/man' ac_prev= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval "$ac_prev=\$ac_option" ac_prev= continue fi ac_optarg=`expr "x$ac_option" : 'x[^=]*=\(.*\)'` # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_option in -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad | --data | --dat | --da) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \ | --da=*) datadir=$ac_optarg ;; -disable-* | --disable-*) ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid feature name: $ac_feature" >&2 { (exit 1); exit 1; }; } ac_feature=`echo $ac_feature | sed 's/-/_/g'` eval "enable_$ac_feature=no" ;; -enable-* | --enable-*) ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid feature name: $ac_feature" >&2 { (exit 1); exit 1; }; } ac_feature=`echo $ac_feature | sed 's/-/_/g'` case $ac_option in *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;; *) ac_optarg=yes ;; esac eval "enable_$ac_feature='$ac_optarg'" ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst \ | --locals | --local | --loca | --loc | --lo) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* \ | --locals=* | --local=* | --loca=* | --loc=* | --lo=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid package name: $ac_package" >&2 { (exit 1); exit 1; }; } ac_package=`echo $ac_package| sed 's/-/_/g'` case $ac_option in *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;; *) ac_optarg=yes ;; esac eval "with_$ac_package='$ac_optarg'" ;; -without-* | --without-*) ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid package name: $ac_package" >&2 { (exit 1); exit 1; }; } ac_package=`echo $ac_package | sed 's/-/_/g'` eval "with_$ac_package=no" ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) { echo "$as_me: error: unrecognized option: $ac_option Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; } ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid variable name: $ac_envvar" >&2 { (exit 1); exit 1; }; } ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` eval "$ac_envvar='$ac_optarg'" export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` { echo "$as_me: error: missing argument to $ac_option" >&2 { (exit 1); exit 1; }; } fi # Be sure to have absolute paths. for ac_var in exec_prefix prefix do eval ac_val=$`echo $ac_var` case $ac_val in [\\/$]* | ?:[\\/]* | NONE | '' ) ;; *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 { (exit 1); exit 1; }; };; esac done # Be sure to have absolute paths. for ac_var in bindir sbindir libexecdir datadir sysconfdir sharedstatedir \ localstatedir libdir includedir oldincludedir infodir mandir do eval ac_val=$`echo $ac_var` case $ac_val in [\\/$]* | ?:[\\/]* ) ;; *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 { (exit 1); exit 1; }; };; esac done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. If a cross compiler is detected then cross compile mode will be used." >&2 elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then its parent. ac_confdir=`(dirname "$0") 2>/dev/null || $as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$0" : 'X\(//\)[^/]' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| \ . : '\(.\)' 2>/dev/null || echo X"$0" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } /^X\(\/\/\)[^/].*/{ s//\1/; q; } /^X\(\/\/\)$/{ s//\1/; q; } /^X\(\/\).*/{ s//\1/; q; } s/.*/./; q'` srcdir=$ac_confdir if test ! -r $srcdir/$ac_unique_file; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r $srcdir/$ac_unique_file; then if test "$ac_srcdir_defaulted" = yes; then { echo "$as_me: error: cannot find sources ($ac_unique_file) in $ac_confdir or .." >&2 { (exit 1); exit 1; }; } else { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 { (exit 1); exit 1; }; } fi fi (cd $srcdir && test -r ./$ac_unique_file) 2>/dev/null || { echo "$as_me: error: sources are in $srcdir, but \`cd $srcdir' does not work" >&2 { (exit 1); exit 1; }; } srcdir=`echo "$srcdir" | sed 's%\([^\\/]\)[\\/]*$%\1%'` ac_env_build_alias_set=${build_alias+set} ac_env_build_alias_value=$build_alias ac_cv_env_build_alias_set=${build_alias+set} ac_cv_env_build_alias_value=$build_alias ac_env_host_alias_set=${host_alias+set} ac_env_host_alias_value=$host_alias ac_cv_env_host_alias_set=${host_alias+set} ac_cv_env_host_alias_value=$host_alias ac_env_target_alias_set=${target_alias+set} ac_env_target_alias_value=$target_alias ac_cv_env_target_alias_set=${target_alias+set} ac_cv_env_target_alias_value=$target_alias ac_env_ANTLRFLAGS_set=${ANTLRFLAGS+set} ac_env_ANTLRFLAGS_value=$ANTLRFLAGS ac_cv_env_ANTLRFLAGS_set=${ANTLRFLAGS+set} ac_cv_env_ANTLRFLAGS_value=$ANTLRFLAGS ac_env_MAKE_set=${MAKE+set} ac_env_MAKE_value=$MAKE ac_cv_env_MAKE_set=${MAKE+set} ac_cv_env_MAKE_value=$MAKE ac_env_JAVAC_set=${JAVAC+set} ac_env_JAVAC_value=$JAVAC ac_cv_env_JAVAC_set=${JAVAC+set} ac_cv_env_JAVAC_value=$JAVAC ac_env_JAVACFLAGS_set=${JAVACFLAGS+set} ac_env_JAVACFLAGS_value=$JAVACFLAGS ac_cv_env_JAVACFLAGS_set=${JAVACFLAGS+set} ac_cv_env_JAVACFLAGS_value=$JAVACFLAGS ac_env_JAVA_set=${JAVA+set} ac_env_JAVA_value=$JAVA ac_cv_env_JAVA_set=${JAVA+set} ac_cv_env_JAVA_value=$JAVA ac_env_JAVAFLAGS_set=${JAVAFLAGS+set} ac_env_JAVAFLAGS_value=$JAVAFLAGS ac_cv_env_JAVAFLAGS_set=${JAVAFLAGS+set} ac_cv_env_JAVAFLAGS_value=$JAVAFLAGS ac_env_CXX_set=${CXX+set} ac_env_CXX_value=$CXX ac_cv_env_CXX_set=${CXX+set} ac_cv_env_CXX_value=$CXX ac_env_CXXFLAGS_set=${CXXFLAGS+set} ac_env_CXXFLAGS_value=$CXXFLAGS ac_cv_env_CXXFLAGS_set=${CXXFLAGS+set} ac_cv_env_CXXFLAGS_value=$CXXFLAGS ac_env_LDFLAGS_set=${LDFLAGS+set} ac_env_LDFLAGS_value=$LDFLAGS ac_cv_env_LDFLAGS_set=${LDFLAGS+set} ac_cv_env_LDFLAGS_value=$LDFLAGS ac_env_CPPFLAGS_set=${CPPFLAGS+set} ac_env_CPPFLAGS_value=$CPPFLAGS ac_cv_env_CPPFLAGS_set=${CPPFLAGS+set} ac_cv_env_CPPFLAGS_value=$CPPFLAGS ac_env_CC_set=${CC+set} ac_env_CC_value=$CC ac_cv_env_CC_set=${CC+set} ac_cv_env_CC_value=$CC ac_env_CFLAGS_set=${CFLAGS+set} ac_env_CFLAGS_value=$CFLAGS ac_cv_env_CFLAGS_set=${CFLAGS+set} ac_cv_env_CFLAGS_value=$CFLAGS ac_env_CPP_set=${CPP+set} ac_env_CPP_value=$CPP ac_cv_env_CPP_set=${CPP+set} ac_cv_env_CPP_value=$CPP ac_env_PYTHON_set=${PYTHON+set} ac_env_PYTHON_value=$PYTHON ac_cv_env_PYTHON_set=${PYTHON+set} ac_cv_env_PYTHON_value=$PYTHON ac_env_PYTHONFLAGS_set=${PYTHONFLAGS+set} ac_env_PYTHONFLAGS_value=$PYTHONFLAGS ac_cv_env_PYTHONFLAGS_set=${PYTHONFLAGS+set} ac_cv_env_PYTHONFLAGS_value=$PYTHONFLAGS ac_env_CSHARPC_set=${CSHARPC+set} ac_env_CSHARPC_value=$CSHARPC ac_cv_env_CSHARPC_set=${CSHARPC+set} ac_cv_env_CSHARPC_value=$CSHARPC ac_env_CSHARPCFLAGS_set=${CSHARPCFLAGS+set} ac_env_CSHARPCFLAGS_value=$CSHARPCFLAGS ac_cv_env_CSHARPCFLAGS_set=${CSHARPCFLAGS+set} ac_cv_env_CSHARPCFLAGS_value=$CSHARPCFLAGS # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures antlr 2.7.5 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] _ACEOF cat <<_ACEOF Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --datadir=DIR read-only architecture-independent data [PREFIX/share] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --infodir=DIR info documentation [PREFIX/info] --mandir=DIR man documentation [PREFIX/man] _ACEOF cat <<\_ACEOF System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of antlr 2.7.5:";; esac cat <<\_ACEOF Optional Features: --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-java enable or disable ANTLR for Java (enabled) --enable-cxx enable or disable ANTLR for C++ (enabled) --enable-python enable or disable ANTLR for Python (enabled). --enable-csharp enable or disable ANTLR for C# (enabled) --enable-verbose turn on verbosity when building package. --enable-debug set debug level - any value greater zero enables a debug version --enable-examples include examples into this configuration (enabled) Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-antlr-jar=ARG use given file (antlr.jar) to bootstrap --with-antlr-cmd=ARG use given command to compile ANTLR grammar files while bootstrapping.. --bootclasspath=ARG use this option to set bootclasspath when using jikes. ARG is a white space seperated list of absolute file or directory names, typically /opt/jdk1.3/jre/lib/rt.jar. In most cases this option is not requird as configure tries to detect rt.jar itself. If configure fails or detects the wrong boot library you may use this option. Note that this option is only used when using jikes. --with-cxx=ARG given argument will override variable $CXX. For a detailed description of $CXX see below. --with-cxxflags=ARG given argument will override variable $CXXFLAGS. For a detailed description of $CXXFLAGS see below. --with-make=ARG given argument will override variable $MAKE. For a detailed description of $MAKE see below. --with-makeflags=ARG given argument will override variable $MAKEFLAGS. For a detailed description of $MAKEFLAGS see below. --with-java=ARG given argument will override variable $JAVA. For a detailed description of $JAVA see below. --with-javaflags=ARG given argument will override variable $JAVAFLAGS. For a detailed description of $JAVAFLAGS see below. --with-javac=ARG given argument will override variable $JAVAC. For a detailed description of $JAVAC see below. --with-javacflags=ARG given argument will override variable $JAVACFLAGS. For a detailed description of $JAVACFLAGS see below. --with-jar=ARG given argument will override variable $JAR. For a detailed description of $JAR see below. --with-jarflags=ARG given argument will override variable $JARFLAGS. For a detailed description of $JARFLAGS see below. --with-python=ARG given argument will override variable $PYTHON. For a detailed description of $PYTHON see below. --with-pythonflags=ARG given argument will override variable $PYTHONFLAGS. For a detailed description of $PYTHONFLAGS see below. --with-csharpc=ARG given argument will override variable $CSHARPC. For a detailed description of $CSHARPC see below. --with-csharpcflags=ARG given argument will override variable $CSHARPCFLAGS. For a detailed description of $CSHARPCFLAGS see below. Some influential environment variables: ANTLRFLAGS Use environment variable ANTLRFLAGS to pass some extra flags to antlr when compiling grammar (*.g) files. MAKE By default we search for "make", "gmake" and "gnumake" in your PATH as well as "/bin/make" and "/usr/bin/make". You may override this search by using enviromnent variable $MAKE. Note that a GNU make is required to build this package. However, when providing your own candidate a check for GNU make is skipped and all bets are on. JAVAC By default we search for "jikes", "javac" and "gcj" in your $PATH on how to comile Java source files. You may override this search by using enviromnent variable $JAVAC. JAVAC may contain a list of candidates, either as absolute path names or as a relative one. In case a relative name is given, a search in $PATH will take place, otherwise the absolute name is tried. JAVACFLAGS Environment variable JAVACFLAGS can be used to change or override all flags required to compile Java source files. Note that JAVACFLAGS understands the following: "+ flag1 flag2 .." append "flag1 flag2 .." to precomputed list "- flag1 flag2 .." prepend "flag1 flag2 .." to precomputed list "= flag1 flag2 .. override with flag1 flag2 ..". If there is a need to hardwire additional flags then edit scripts/javac.sh.in and run "CONFIG_FILES=scripts/javac.sh ./config.status" again. JAVA By default we search for "java" and "gij" in your PATH on how to run Java class files. You may override this search by using enviromnent variable $JAVA. JAVA may contain a list of candidates, either as absolute path name or as a relative one. In case of a relative name, a search in $PATH will take place. Otherwise the absolute name will be accepted if existing. JAVAFLAGS Shall contain all flags required to run Java class files. You may override by using environment variable JAVAFLAGS. CXX C++ compiler command CXXFLAGS C++ compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory CPPFLAGS C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CC C compiler command CFLAGS C compiler flags CPP C preprocessor PYTHON By default we search for "python" in $PATH to execute Python files. Override this by providing a list of candidates in environment variable $PYTHON and use whitespace as spereration character. A candidate can be either a relative or absolute path name. In the former case a lookup in $PATH takes place, in the latter, the absolute path name must exist. PYTHONFLAGS Shall contain all flags required to run Python. Override the default by using environment variable $PYTHONFLAGS. CSHARPC By default we search for "cscc", "msc" and "csc" in $PATH to compile C# files. Override this by providing a list of candidates in environment variable $CSHARP and use whitespace as spereration character. A candidate can be either a relative or absolute path name. In the former case a lookup in $PATH takes place, in the latter, the absolute path name must exist. CSHARPCFLAGS Shall contain all flags required to compile a #C file. Override the default by using environment variable $CSHARPCFLAGS. Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. _ACEOF fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. ac_popdir=`pwd` for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d $ac_dir || continue ac_builddir=. if test "$ac_dir" != .; then ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` # A "../" for each directory in $ac_dir_suffix. ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` else ac_dir_suffix= ac_top_builddir= fi case $srcdir in .) # No --srcdir option. We are building in place. ac_srcdir=. if test -z "$ac_top_builddir"; then ac_top_srcdir=. else ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` fi ;; [\\/]* | ?:[\\/]* ) # Absolute path. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ;; *) # Relative path. ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_builddir$srcdir ;; esac # Do not use `cd foo && pwd` to compute absolute paths, because # the directories may not exist. case `pwd` in .) ac_abs_builddir="$ac_dir";; *) case "$ac_dir" in .) ac_abs_builddir=`pwd`;; [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; *) ac_abs_builddir=`pwd`/"$ac_dir";; esac;; esac case $ac_abs_builddir in .) ac_abs_top_builddir=${ac_top_builddir}.;; *) case ${ac_top_builddir}. in .) ac_abs_top_builddir=$ac_abs_builddir;; [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; esac;; esac case $ac_abs_builddir in .) ac_abs_srcdir=$ac_srcdir;; *) case $ac_srcdir in .) ac_abs_srcdir=$ac_abs_builddir;; [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; esac;; esac case $ac_abs_builddir in .) ac_abs_top_srcdir=$ac_top_srcdir;; *) case $ac_top_srcdir in .) ac_abs_top_srcdir=$ac_abs_builddir;; [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; esac;; esac cd $ac_dir # Check for guested configure; otherwise get Cygnus style configure. if test -f $ac_srcdir/configure.gnu; then echo $SHELL $ac_srcdir/configure.gnu --help=recursive elif test -f $ac_srcdir/configure; then echo $SHELL $ac_srcdir/configure --help=recursive elif test -f $ac_srcdir/configure.ac || test -f $ac_srcdir/configure.in; then echo $ac_configure --help else echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi cd $ac_popdir done fi test -n "$ac_init_help" && exit 0 if $ac_init_version; then cat <<\_ACEOF antlr configure 2.7.5 generated by GNU Autoconf 2.59 Copyright (C) 2003 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit 0 fi exec 5>config.log cat >&5 <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by antlr $as_me 2.7.5, which was generated by GNU Autoconf 2.59. Invocation command line was $ $0 $@ _ACEOF { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` hostinfo = `(hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. echo "PATH: $as_dir" done } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_sep= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*) ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; 2) ac_configure_args1="$ac_configure_args1 '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi ac_configure_args="$ac_configure_args$ac_sep'$ac_arg'" # Get rid of the leading space. ac_sep=" " ;; esac done done $as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } $as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Be sure not to use single quotes in there, as some shells, # such as our DU 5.0 friend, will then `close' the trap. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo cat <<\_ASBOX ## ---------------- ## ## Cache variables. ## ## ---------------- ## _ASBOX echo # The following way of writing the cache mishandles newlines in values, { (set) 2>&1 | case `(ac_space='"'"' '"'"'; set | grep ac_space) 2>&1` in *ac_space=\ *) sed -n \ "s/'"'"'/'"'"'\\\\'"'"''"'"'/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='"'"'\\2'"'"'/p" ;; *) sed -n \ "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" ;; esac; } echo cat <<\_ASBOX ## ----------------- ## ## Output variables. ## ## ----------------- ## _ASBOX echo for ac_var in $ac_subst_vars do eval ac_val=$`echo $ac_var` echo "$ac_var='"'"'$ac_val'"'"'" done | sort echo if test -n "$ac_subst_files"; then cat <<\_ASBOX ## ------------- ## ## Output files. ## ## ------------- ## _ASBOX echo for ac_var in $ac_subst_files do eval ac_val=$`echo $ac_var` echo "$ac_var='"'"'$ac_val'"'"'" done | sort echo fi if test -s confdefs.h; then cat <<\_ASBOX ## ----------- ## ## confdefs.h. ## ## ----------- ## _ASBOX echo sed "/^$/d" confdefs.h | sort echo fi test "$ac_signal" != 0 && echo "$as_me: caught signal $ac_signal" echo "$as_me: exit $exit_status" } >&5 rm -f core *.core && rm -rf conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -rf conftest* confdefs.h # AIX cpp loses on an empty file, so make sure it contains at least a newline. echo >confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer explicitly selected file to automatically selected ones. if test -z "$CONFIG_SITE"; then if test "x$prefix" != xNONE; then CONFIG_SITE="$prefix/share/config.site $prefix/etc/config.site" else CONFIG_SITE="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site" fi fi for ac_site_file in $CONFIG_SITE; do if test -r "$ac_site_file"; then { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special # files actually), so we avoid doing that. if test -f "$cache_file"; then { echo "$as_me:$LINENO: loading cache $cache_file" >&5 echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . $cache_file;; *) . ./$cache_file;; esac fi else { echo "$as_me:$LINENO: creating cache $cache_file" >&5 echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in `(set) 2>&1 | sed -n 's/^ac_env_\([a-zA-Z_0-9]*\)_set=.*/\1/p'`; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val="\$ac_cv_env_${ac_var}_value" eval ac_new_val="\$ac_env_${ac_var}_value" case $ac_old_set,$ac_new_set in set,) { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 echo "$as_me: former value: $ac_old_val" >&2;} { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 echo "$as_me: current value: $ac_new_val" >&2;} ac_cache_corrupted=: fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*) ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 echo "$as_me: error: changes in the environment can compromise the build" >&2;} { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} { (exit 1); exit 1; }; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_aux_dir= for ac_dir in scripts $srcdir/scripts; do if test -f $ac_dir/install-sh; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f $ac_dir/install.sh; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f $ac_dir/shtool; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in scripts $srcdir/scripts" >&5 echo "$as_me: error: cannot find install-sh or install.sh in scripts $srcdir/scripts" >&2;} { (exit 1); exit 1; }; } fi ac_config_guess="$SHELL $ac_aux_dir/config.guess" ac_config_sub="$SHELL $ac_aux_dir/config.sub" ac_configure="$SHELL $ac_aux_dir/configure" # This should be Cygnus configure. ## This shall be the very first config file. Do not change ## this. ac_config_files="$ac_config_files scripts/config.vars" ac_config_files="$ac_config_files scripts/config.deps" ac_config_files="$ac_config_files scripts/config.make" ac_config_files="$ac_config_files antlr/Version.java" ## ANTLR's core libraries for each supporte language. The variable ## in uppercase letters denotes the absolute name of the library. ## When in lower cases letters - see below - the variable just ## holds the basename. ## introduce package information as autoconf vars. VERSION=`echo $PACKAGE_VERSION | cut -d . -f 1` SUBVERSION=`echo $PACKAGE_VERSION | cut -d . -f 2` PATCHLEVEL=`echo $PACKAGE_VERSION | cut -d . -f 3` TIMESTAMP=`date +%Y%m%d` ## @abs_this_builddir@ - absolute path to top of build directory. ## According to GNU autoconf we can rely on that there's a proper ## pwd around. abs_this_builddir=`pwd` ## This is how we compile Java files .. JAVA_COMPILE_CMD="/bin/sh $abs_this_builddir/scripts/javac.sh" ## This is how we run Java .. JAVA_CMD="/bin/sh $abs_this_builddir/scripts/java.sh" ## This is how we pack Java (class) files .. JAR_CMD="/bin/sh $abs_this_builddir/scripts/jar.sh" ## And this is how we are going to compile ANTLR grammar files .. ANTLR_COMPILE_CMD="/bin/sh $abs_this_builddir/scripts/antlr.sh" ## This is how we compile CSHARP files .. CSHARP_COMPILE_CMD="/bin/sh $abs_this_builddir/scripts/csc.sh" ## This is how we compile C++ files and how we are going to create ## libantlr.a or antlr.lib etc. .. CXX_COMPILE_CMD="/bin/sh $abs_this_builddir/scripts/cxx.sh" CXX_LIB_CMD="/bin/sh $abs_this_builddir/scripts/lib.sh" CXX_LINK_CMD="/bin/sh $abs_this_builddir/scripts/link.sh" C_COMPILE_CMD="/bin/sh $abs_this_builddir/scripts/c.sh" ANTLR_JAR="$abs_this_builddir/antlr/antlr.jar" ANTLR_NET="$abs_this_builddir/lib/antlr.runtime.dll" ASTFRAME_NET="$abs_this_builddir/lib/antlr.astframe.dll" ANTLR_PY="$abs_this_builddir/lib/python/antlr/python.py" ## Note: values might be overriden in C++ section. OBJEXT=".o" LIBEXT=".a" ANTLR_LIB="$abs_this_builddir/lib/cpp/src/libantlr.a" stdvars="scripts/config.vars" stddeps="scripts/config.deps" stdmake="scripts/config.make" ## ## option --enable-java ## LANG_JAVA=1 # Check whether --enable-java or --disable-java was given. if test "${enable_java+set}" = set; then enableval="$enable_java" LANG_JAVA="${enableval}" case "${LANG_JAVA}" in no|0|false) LANG_JAVA=0;; * ) LANG_JAVA=1;; esac fi; ## ## option --enable-cxx ## LANG_CXX=1 # Check whether --enable-cxx or --disable-cxx was given. if test "${enable_cxx+set}" = set; then enableval="$enable_cxx" LANG_CXX="${enableval}" case "${LANG_CXX}" in no|0|false) LANG_CXX=0;; * ) LANG_CXX=1;; esac fi; ## ## option --enable-python ## LANG_PY=1 # Check whether --enable-python or --disable-python was given. if test "${enable_python+set}" = set; then enableval="$enable_python" LANG_PY="${enableval}" case "${LANG_PY}" in no|0|false) LANG_PY=0;; * ) LANG_PY=1;; esac fi; ## ## option --enable-csharp ## LANG_CS=1 # Check whether --enable-csharp or --disable-csharp was given. if test "${enable_csharp+set}" = set; then enableval="$enable_csharp" LANG_CS="${enableval}" case "${LANG_CS}" in no|0|false) LANG_CS=0;; * ) LANG_CS=1;; esac fi; ## ## option --enable-verbose= ## VERBOSE=0 # Check whether --enable-verbose or --disable-verbose was given. if test "${enable_verbose+set}" = set; then enableval="$enable_verbose" VERBOSE="${enableval}" case "${VERBOSE}" in no|0|false) VERBOSE=0;; * ) VERBOSE=1;; esac fi; ## ## option --enable-debug= ## DEBUG=0 # Check whether --enable-debug or --disable-debug was given. if test "${enable_debug+set}" = set; then enableval="$enable_debug" DEBUG="${enableval}" case "${DEBUG}" in no|0|false) DEBUG=0;; * ) DEBUG=1;; esac fi; ## ## option --enable-examples ## WITH_EXAMPLES=1 WITH_EXAMPLES=1 # Check whether --enable-examples or --disable-examples was given. if test "${enable_examples+set}" = set; then enableval="$enable_examples" WITH_EXAMPLES="${enableval}" case "${WITH_EXAMPLES}" in no|0|false) WITH_EXAMPLES=0;; * ) WITH_EXAMPLES=1;; esac fi; ## ## option --with-antlr-jar ## ANTLR_WITH_ANTLR_JAR="" # Check whether --with-antlr-jar or --without-antlr-jar was given. if test "${with_antlr_jar+set}" = set; then withval="$with_antlr_jar" if test -n "${ANTLR_WITH_ANTLR_CMD}" ; then opts="--with-antlr-jar,--with-antlr-cmd" { { echo "$as_me:$LINENO: error: this configuration options mutually exclusive: $opts" >&5 echo "$as_me: error: this configuration options mutually exclusive: $opts" >&2;} { (exit 1); exit 1; }; } fi ANTLR_WITH_ANTLR_JAR="${withval}" fi; ## ## option --with-antlr-cmd ## ANTLR_WITH_ANTLR_CMD="" # Check whether --with-antlr-cmd or --without-antlr-cmd was given. if test "${with_antlr_cmd+set}" = set; then withval="$with_antlr_cmd" if test -n "${ANTLR_WITH_ANTLR_JAR}" ; then opts="--with-antlr-jar,--with-antlr-cmd" { { echo "$as_me:$LINENO: error: this configuration options mutually exclusive: $opts" >&5 echo "$as_me: error: this configuration options mutually exclusive: $opts" >&2;} { (exit 1); exit 1; }; } fi ANTLR_WITH_ANTLR_CMD="${withval}" fi; # Check whether --with-bootclasspath or --without-bootclasspath was given. if test "${with_bootclasspath+set}" = set; then withval="$with_bootclasspath" BOOTCLASSPATH="${withval}" fi; # Check whether --with-cxx or --without-cxx was given. if test "${with_cxx+set}" = set; then withval="$with_cxx" CXX="${withval}" fi; # Check whether --with-cxxflags or --without-cxxflags was given. if test "${with_cxxflags+set}" = set; then withval="$with_cxxflags" CXXFLAGS="${withval}" fi; # Check whether --with-make or --without-make was given. if test "${with_make+set}" = set; then withval="$with_make" MAKE="${withval}" fi; # Check whether --with-makeflags or --without-makeflags was given. if test "${with_makeflags+set}" = set; then withval="$with_makeflags" MAKEFLAGS="${withval}" fi; # Check whether --with-java or --without-java was given. if test "${with_java+set}" = set; then withval="$with_java" JAVA="${withval}" fi; # Check whether --with-javaflags or --without-javaflags was given. if test "${with_javaflags+set}" = set; then withval="$with_javaflags" JAVAFLAGS="${withval}" fi; # Check whether --with-javac or --without-javac was given. if test "${with_javac+set}" = set; then withval="$with_javac" JAVAC="${withval}" fi; # Check whether --with-javacflags or --without-javacflags was given. if test "${with_javacflags+set}" = set; then withval="$with_javacflags" JAVACFLAGS="${withval}" fi; # Check whether --with-jar or --without-jar was given. if test "${with_jar+set}" = set; then withval="$with_jar" JAR="${withval}" fi; # Check whether --with-jarflags or --without-jarflags was given. if test "${with_jarflags+set}" = set; then withval="$with_jarflags" JARFLAGS="${withval}" fi; # Check whether --with-python or --without-python was given. if test "${with_python+set}" = set; then withval="$with_python" PYTHON="${withval}" fi; # Check whether --with-pythonflags or --without-pythonflags was given. if test "${with_pythonflags+set}" = set; then withval="$with_pythonflags" PYTHONFLAGS="${withval}" fi; # Check whether --with-csharpc or --without-csharpc was given. if test "${with_csharpc+set}" = set; then withval="$with_csharpc" CSHARPC="${withval}" fi; # Check whether --with-csharpcflags or --without-csharpcflags was given. if test "${with_csharpcflags+set}" = set; then withval="$with_csharpcflags" CSHARPCFLAGS="${withval}" fi; #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # S T A R T T E S T S # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # get host_os set # Make sure we can run config.sub. $ac_config_sub sun4 >/dev/null 2>&1 || { { echo "$as_me:$LINENO: error: cannot run $ac_config_sub" >&5 echo "$as_me: error: cannot run $ac_config_sub" >&2;} { (exit 1); exit 1; }; } echo "$as_me:$LINENO: checking build system type" >&5 echo $ECHO_N "checking build system type... $ECHO_C" >&6 if test "${ac_cv_build+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else ac_cv_build_alias=$build_alias test -z "$ac_cv_build_alias" && ac_cv_build_alias=`$ac_config_guess` test -z "$ac_cv_build_alias" && { { echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5 echo "$as_me: error: cannot guess build type; you must specify one" >&2;} { (exit 1); exit 1; }; } ac_cv_build=`$ac_config_sub $ac_cv_build_alias` || { { echo "$as_me:$LINENO: error: $ac_config_sub $ac_cv_build_alias failed" >&5 echo "$as_me: error: $ac_config_sub $ac_cv_build_alias failed" >&2;} { (exit 1); exit 1; }; } fi echo "$as_me:$LINENO: result: $ac_cv_build" >&5 echo "${ECHO_T}$ac_cv_build" >&6 build=$ac_cv_build build_cpu=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` build_vendor=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` build_os=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` echo "$as_me:$LINENO: checking host system type" >&5 echo $ECHO_N "checking host system type... $ECHO_C" >&6 if test "${ac_cv_host+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else ac_cv_host_alias=$host_alias test -z "$ac_cv_host_alias" && ac_cv_host_alias=$ac_cv_build_alias ac_cv_host=`$ac_config_sub $ac_cv_host_alias` || { { echo "$as_me:$LINENO: error: $ac_config_sub $ac_cv_host_alias failed" >&5 echo "$as_me: error: $ac_config_sub $ac_cv_host_alias failed" >&2;} { (exit 1); exit 1; }; } fi echo "$as_me:$LINENO: result: $ac_cv_host" >&5 echo "${ECHO_T}$ac_cv_host" >&6 host=$ac_cv_host host_cpu=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` host_vendor=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` host_os=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` # Detect cygwin or mingw ANTLR_CYGWIN=no ANTLR_MINGW=no echo "$as_me:$LINENO: checking whether this is Cygwin" >&5 echo $ECHO_N "checking whether this is Cygwin... $ECHO_C" >&6 case $host_os in *cygwin* ) ANTLR_CYGWIN=yes ;; *) echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 ;; esac echo "$as_me:$LINENO: checking whether this is MinGW" >&5 echo $ECHO_N "checking whether this is MinGW... $ECHO_C" >&6 case $host_os in *mingw* ) ANTLR_MINGW=yes ;; *) echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 ;; esac ## Set common file extensions depending on OS we are running on. ## File extensions depend on C++/C compiler in use. This values ## are just guesses and redefined further below. case "${host_os}" in *mingw*|*cygwin*) OBJEXT=".o" LIBEXT=".a" EXEEXT=".exe" ;; *) OBJEXT=".o" LIBEXT=".a" EXEEXT="" ;; esac ## Test whether we have cygpath test -z "$CYGPATH" && for ac_prog in cygpath$EXEEXT do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_path_CYGPATH+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $CYGPATH in [\\/]* | ?:[\\/]*) ac_cv_path_CYGPATH="$CYGPATH" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_CYGPATH="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done ;; esac fi CYGPATH=$ac_cv_path_CYGPATH if test -n "$CYGPATH"; then echo "$as_me:$LINENO: result: $CYGPATH" >&5 echo "${ECHO_T}$CYGPATH" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi test -n "$CYGPATH" && break done if test -n "$CYGPATH" ; then CYGPATH_M="${CYGPATH} -m" CYGPATH_W="${CYGPATH} -w" else CYGPATH_M="echo" CYGPATH_W="echo" fi #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # MAKE # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# ## Check whether there's a make program around. We search for a ## couple of well know names within $PATH. A user may skip this ## search by providing variable $MAKE. ## @MAKE@ shall contain absolut path name of make program found. ## Search for well known make programs - take user given MAKE ## into account. The result will be a list of valid make prog- ## grams found and will be stored in variable MAKE. user_make="${MAKE}" ax_arg_list="make gmake gnumake /bin/make /usr/bin/make " if test "x${MAKE}" != "x" ; then ax_arg_list="${MAKE}" fi MAKE="" for ac_prog in ${ax_arg_list} ; do set dummy $ac_prog; ac_word=${2} ## if argument is absolute we check whether such a file exists, ## otherwise we lookup PATH. Each hit will be added to main ## variable. case $ac_word in [\\/]* | ?:[\\/]*) echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then MAKE="${MAKE} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then MAKE="${MAKE} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${MAKE}" != "x" ; then : else { { echo "$as_me:$LINENO: error: no suitable value has been found for MAKE" >&5 echo "$as_me: error: no suitable value has been found for MAKE" >&2;} { (exit 1); exit 1; }; } fi ## right now we need to have a GNU make around, other makes are ## not supported and likely to fail. if test "x${user_make}" == "x" ; then #Search all the common names for GNU make ax_gnu_make_list="${MAKE}" MAKE= for a in . ${ax_gnu_make_list} ; do if test "$a" == "." ; then continue fi echo "$as_me:$LINENO: checking whether ${a} is GNU make" >&5 echo $ECHO_N "checking whether ${a} is GNU make... $ECHO_C" >&6 if (/bin/sh -c "$a --version" 2> /dev/null | grep GNU 2>&1 > /dev/null ); then MAKE="$a" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 break else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi done ## handle search result if test "x${MAKE}" == "x" ; then : { { echo "$as_me:$LINENO: error: package requires GNU make" >&5 echo "$as_me: error: package requires GNU make" >&2;} { (exit 1); exit 1; }; } else : fi fi ## we lookup 'make' in PATH. If the one found is not the same ## as the configured one we issue a warning message. for ac_prog in make do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_path_just_make+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $just_make in [\\/]* | ?:[\\/]*) ac_cv_path_just_make="$just_make" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_just_make="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done ;; esac fi just_make=$ac_cv_path_just_make if test -n "$just_make"; then echo "$as_me:$LINENO: result: $just_make" >&5 echo "${ECHO_T}$just_make" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi test -n "$just_make" && break done test -n "$just_make" || just_make="%" case "${just_make}" in ${MAKE}) ;; *) ac_config_commands="$ac_config_commands notice" ;; esac #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # JAVA # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# ## @JAVAC@ shall contain absolut path name of javac program and ## similar to CXXFLAGS, @JAVACFLAGS@ shall contain all options ## required to compile JAVA source files. ## @JAVA@ shall contain absolut path name of java program and ## similar to CXXFLAGS, @JAVAFLAGS@ shall contain all options ## required to run JAVA class files. case $LANG_JAVA in 1) ax_arg_list="java gij" if test "x${JAVA}" != "x" ; then ax_arg_list="${JAVA}" fi JAVA="" for ac_prog in ${ax_arg_list} ; do set dummy $ac_prog; ac_word=${2} ## if argument is absolute we check whether such a file exists, ## otherwise we lookup PATH. Each hit will be added to main ## variable. case $ac_word in [\\/]* | ?:[\\/]*) echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then JAVA="${JAVA} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then JAVA="${JAVA} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${JAVA}" != "x" ; then set x ${JAVA} JAVA="${2}" else LANG_JAVA=0 cat <&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then JAVAC="${JAVAC} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then JAVAC="${JAVAC} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${JAVAC}" != "x" ; then set x ${JAVAC} JAVAC="${2}" else LANG_JAVA=0 cat <&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then JAR="${JAR} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then JAR="${JAR} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${JAR}" != "x" ; then set x ${JAR} JAR="${2}" else LANG_JAVA=0 cat <&1 | grep -i 'GCC' 2>&1 > /dev/null ) ; then javac=gcj elif (/bin/sh -c "$JAVAC --version" 2>&1 | grep -i 'jikes' 2>&1 > /dev/null ) ; then javac=jikes else javac=javac fi ## Jikes cannot live without having a Java around. Have ## therefore a look into Java installations found for ## a 'rt.jar'. test -n "${BOOTCLASSPATH}" && { for f in ${BOOTCLASSPATH} ; do echo "$as_me:$LINENO: checking bootclasspath \"$f\"" >&5 echo $ECHO_N "checking bootclasspath \"$f\"... $ECHO_C" >&6 test -f "${f}" -o -d "${f}" || { echo "$as_me:$LINENO: result: does not exist" >&5 echo "${ECHO_T}does not exist" >&6 { { echo "$as_me:$LINENO: error: =================================================================== Please check arguments given to --with-bootclasspath or \${BOOTCLASSPATH} Each argument must be a valid file or directory. Use whitespace to seperate your args. =================================================================== " >&5 echo "$as_me: error: =================================================================== Please check arguments given to --with-bootclasspath or \${BOOTCLASSPATH} Each argument must be a valid file or directory. Use whitespace to seperate your args. =================================================================== " >&2;} { (exit 1); exit 1; }; } } echo "$as_me:$LINENO: result: good" >&5 echo "${ECHO_T}good" >&6 done } test -z "${BOOTCLASSPATH}" && { case "${javac}" in jikes) BOOTCLASSPATH="" set x ${JAVA} while test $# -gt 1 ; do x="$2" ; shift d=`dirname $x` test -d "$d" || continue d=`(cd $d && cd .. && pwd)` test -d "$d" || continue test -f "$d/jre/lib/rt.jar" && { BOOTCLASSPATH="$d/jre/lib/rt.jar" ## we need to try whether jikes accept .. (tbd) break } test -f "$d/lib/rt.jar" && { BOOTCLASSPATH="$d/lib/rt.jar" ## we need to try whether jikes accept .. (tbd) break } done ## go for some unusual locations (MacOS) test -z "${BOOTCLASSPATH}" && { fwdir=/System/Library/Frameworks/JavaVM.framework/Versions for x in 1.4.1 1.3.1 ; do if test -f "$fwdir/$x/Classes/classes.jar" ; then BOOTCLASSPATH="$fwdir/$x/Classes/classes.jar" break fi done } ## give up in case we can't set. test -z "${BOOTCLASSPATH}" && { { { echo "$as_me:$LINENO: error: Unable to set BOOTCLASSPATH - there is no rt.jar around." >&5 echo "$as_me: error: Unable to set BOOTCLASSPATH - there is no rt.jar around." >&2;} { (exit 1); exit 1; }; } } ;; *) BOOTCLASSPATH="" ;; esac } test -n "${BOOTCLASSPATH}" && { ## Finalize BOOTCLASSPATH. Depending on platform join arguments using ## a different seperator. case $build_os in cygwin) sep=";" ;; *) sep=":" ;; esac set x $BOOTCLASSPATH ; shift BOOTCLASSPATH="$1" shift while test $# -gt 0 ; do BOOTCLASSPATH="${BOOTCLASSPATH}${sep}${1}" shift done } ## Use Java first in list. set x ${JAVA} JAVA="${2}" ;; esac #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # C++ # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# case $LANG_CXX in 1) ax_arg_list="tlib lib ar /usr/bin/ar " if test "x${AR}" != "x" ; then ax_arg_list="${AR}" fi AR="" for ac_prog in ${ax_arg_list} ; do set dummy $ac_prog; ac_word=${2} ## if argument is absolute we check whether such a file exists, ## otherwise we lookup PATH. Each hit will be added to main ## variable. case $ac_word in [\\/]* | ?:[\\/]*) echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then AR="${AR} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then AR="${AR} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${AR}" != "x" ; then : else { { echo "$as_me:$LINENO: error: no suitable value has been found for AR" >&5 echo "$as_me: error: no suitable value has been found for AR" >&2;} { (exit 1); exit 1; }; } fi ## Try to figure out what C++ compiler shall be used. Note that CC ## clashes on cygwin. While CC is usually SUN's C++ compiler name, ## CC is also present on Cygwin - it's just an alias for gcc. The ## real alias is actually 'cc' but names are searched in non- ## sensitive manner. To solve this problem we use kind of hack ## here and list compilers availabe to known operating systems. case $build_os in cygwin) ## On Cygwin/Microsoft we are aware of Borland C++, Microsoft ## C++ and GNU. cxx_compiler_list="bcc32 cl g++" # FIXME: for bcc32 c_compiler_list="cl gcc" ;; *) ## On other platforms we now HP C++ (aCC), IBM C++ (xlC*) and ## of course GNU. If there's a GNU compiler around we prefer ## GNU. This avoids also a problem with vendors having CC ## a symbolic link to "gcc" instead of "g++". cxx_compiler_list="g++ aCC CC xlC xlC_r cxx c++" # FIXME: for other unix flavours c_compiler_list="cc gcc xlc_r acc" ;; esac ## Find a compiler for me. If compiler is not in list you can al- ## ways override by using environment varialbe CXX. ac_ext=cc ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -n "$ac_tool_prefix"; then for ac_prog in $CCC ${cxx_compiler_list} do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_prog_CXX+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then echo "$as_me:$LINENO: result: $CXX" >&5 echo "${ECHO_T}$CXX" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in $CCC ${cxx_compiler_list} do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5 echo "${ECHO_T}$ac_ct_CXX" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi test -n "$ac_ct_CXX" && break done test -n "$ac_ct_CXX" || ac_ct_CXX="g++" CXX=$ac_ct_CXX fi # Provide some information about the compiler. echo "$as_me:$LINENO:" \ "checking for C++ compiler version" >&5 ac_compiler=`set X $ac_compile; echo $2` { (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 (eval $ac_compiler --version &5) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (eval echo "$as_me:$LINENO: \"$ac_compiler -v &5\"") >&5 (eval $ac_compiler -v &5) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (eval echo "$as_me:$LINENO: \"$ac_compiler -V &5\"") >&5 (eval $ac_compiler -V &5) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. echo "$as_me:$LINENO: checking for C++ compiler default output file name" >&5 echo $ECHO_N "checking for C++ compiler default output file name... $ECHO_C" >&6 ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` if { (eval echo "$as_me:$LINENO: \"$ac_link_default\"") >&5 (eval $ac_link_default) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # Find the output, starting from the most likely. This scheme is # not robust to junk in `.', hence go to wildcards (a.*) only as a last # resort. # Be careful to initialize this variable, since it used to be cached. # Otherwise an old cache value of `no' led to `EXEEXT = no' in a Makefile. ac_cv_exeext= # b.out is created by i960 compilers. for ac_file in a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) ;; conftest.$ac_ext ) # This is the source file. ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` # FIXME: I believe we export ac_cv_exeext for Libtool, # but it would be cool to find out if it's true. Does anybody # maintain Libtool? --akim. export ac_cv_exeext break;; * ) break;; esac done else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { echo "$as_me:$LINENO: error: C++ compiler cannot create executables See \`config.log' for more details." >&5 echo "$as_me: error: C++ compiler cannot create executables See \`config.log' for more details." >&2;} { (exit 77); exit 77; }; } fi ac_exeext=$ac_cv_exeext echo "$as_me:$LINENO: result: $ac_file" >&5 echo "${ECHO_T}$ac_file" >&6 # Check the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. echo "$as_me:$LINENO: checking whether the C++ compiler works" >&5 echo $ECHO_N "checking whether the C++ compiler works... $ECHO_C" >&6 # FIXME: These cross compiler hacks should be removed for Autoconf 3.0 # If not cross compiling, check that we can run a simple program. if test "$cross_compiling" != yes; then if { ac_try='./$ac_file' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { echo "$as_me:$LINENO: error: cannot run C++ compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details." >&5 echo "$as_me: error: cannot run C++ compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi fi fi echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 rm -f a.out a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save # Check the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $cross_compiling" >&5 echo "${ECHO_T}$cross_compiling" >&6 echo "$as_me:$LINENO: checking for suffix of executables" >&5 echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6 if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` export ac_cv_exeext break;; * ) break;; esac done else { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link See \`config.log' for more details." >&5 echo "$as_me: error: cannot compute suffix of executables: cannot compile and link See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi rm -f conftest$ac_cv_exeext echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 echo "${ECHO_T}$ac_cv_exeext" >&6 rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT echo "$as_me:$LINENO: checking for suffix of object files" >&5 echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6 if test "${ac_cv_objext+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then for ac_file in `(ls conftest.o conftest.obj; ls conftest.*) 2>/dev/null`; do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile See \`config.log' for more details." >&5 echo "$as_me: error: cannot compute suffix of object files: cannot compile See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 echo "${ECHO_T}$ac_cv_objext" >&6 OBJEXT=$ac_cv_objext ac_objext=$OBJEXT echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5 echo $ECHO_N "checking whether we are using the GNU C++ compiler... $ECHO_C" >&6 if test "${ac_cv_cxx_compiler_gnu+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_compiler_gnu=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5 echo "${ECHO_T}$ac_cv_cxx_compiler_gnu" >&6 GXX=`test $ac_compiler_gnu = yes && echo yes` ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS CXXFLAGS="-g" echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5 echo $ECHO_N "checking whether $CXX accepts -g... $ECHO_C" >&6 if test "${ac_cv_prog_cxx_g+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_prog_cxx_g=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_prog_cxx_g=no fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5 echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6 if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi for ac_declaration in \ '' \ 'extern "C" void std::exit (int) throw (); using std::exit;' \ 'extern "C" void std::exit (int); using std::exit;' \ 'extern "C" void exit (int) throw ();' \ 'extern "C" void exit (int);' \ 'void exit (int);' do cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_declaration #include int main () { exit (42); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then : else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 continue fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_declaration int main () { exit (42); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done rm -f conftest* if test -n "$ac_declaration"; then echo '#ifdef __cplusplus' >>confdefs.h echo $ac_declaration >>confdefs.h echo '#endif' >>confdefs.h fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then for ac_prog in ${c_compiler_list} do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_prog_CC+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then echo "$as_me:$LINENO: result: $CC" >&5 echo "${ECHO_T}$CC" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in ${c_compiler_list} do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_prog_ac_ct_CC+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 echo "${ECHO_T}$ac_ct_CC" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi test -n "$ac_ct_CC" && break done CC=$ac_ct_CC fi test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH See \`config.log' for more details." >&5 echo "$as_me: error: no acceptable C compiler found in \$PATH See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } # Provide some information about the compiler. echo "$as_me:$LINENO:" \ "checking for C compiler version" >&5 ac_compiler=`set X $ac_compile; echo $2` { (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 (eval $ac_compiler --version &5) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (eval echo "$as_me:$LINENO: \"$ac_compiler -v &5\"") >&5 (eval $ac_compiler -v &5) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (eval echo "$as_me:$LINENO: \"$ac_compiler -V &5\"") >&5 (eval $ac_compiler -V &5) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6 if test "${ac_cv_c_compiler_gnu+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_compiler_gnu=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6 GCC=`test $ac_compiler_gnu = yes && echo yes` ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS CFLAGS="-g" echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6 if test "${ac_cv_prog_cc_g+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_prog_cc_g=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_prog_cc_g=no fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 echo "${ECHO_T}$ac_cv_prog_cc_g" >&6 if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi echo "$as_me:$LINENO: checking for $CC option to accept ANSI C" >&5 echo $ECHO_N "checking for $CC option to accept ANSI C... $ECHO_C" >&6 if test "${ac_cv_prog_cc_stdc+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else ac_cv_prog_cc_stdc=no ac_save_CC=$CC cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include #include /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std1 is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std1. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF # Don't try gcc -ansi; that turns off useful extensions and # breaks some systems' header files. # AIX -qlanglvl=ansi # Ultrix and OSF/1 -std1 # HP-UX 10.20 and later -Ae # HP-UX older versions -Aa -D_HPUX_SOURCE # SVR4 -Xc -D__EXTENSIONS__ for ac_arg in "" -qlanglvl=ansi -std1 -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_prog_cc_stdc=$ac_arg break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f conftest.err conftest.$ac_objext done rm -f conftest.$ac_ext conftest.$ac_objext CC=$ac_save_CC fi case "x$ac_cv_prog_cc_stdc" in x|xno) echo "$as_me:$LINENO: result: none needed" >&5 echo "${ECHO_T}none needed" >&6 ;; *) echo "$as_me:$LINENO: result: $ac_cv_prog_cc_stdc" >&5 echo "${ECHO_T}$ac_cv_prog_cc_stdc" >&6 CC="$CC $ac_cv_prog_cc_stdc" ;; esac # Some people use a C++ compiler to compile C. Since we use `exit', # in C++ we need to declare it. In case someone uses the same compiler # for both compiling C and C++ we need to have the C++ compiler decide # the declaration of exit, since it's the most demanding environment. cat >conftest.$ac_ext <<_ACEOF #ifndef __cplusplus choke me #endif _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then for ac_declaration in \ '' \ 'extern "C" void std::exit (int) throw (); using std::exit;' \ 'extern "C" void std::exit (int); using std::exit;' \ 'extern "C" void exit (int) throw ();' \ 'extern "C" void exit (int);' \ 'void exit (int);' do cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_declaration #include int main () { exit (42); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then : else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 continue fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_declaration int main () { exit (42); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done rm -f conftest* if test -n "$ac_declaration"; then echo '#ifdef __cplusplus' >>confdefs.h echo $ac_declaration >>confdefs.h echo '#endif' >>confdefs.h fi else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ## just overrule what autoconf figured out - we never asked for ## this anyway. Our handling of compiler options is done below ## in the fine tuning section. CXXFLAGS="" ## 'cxx' shall be the canonical compiler name. For example, gcc ## cl, bcc, CC, etc. Note that this is in general not equal to CXX. ## For example, CYGWIN appears to have c++ as name for g++ and cc ## as alias for gcc. ## CXX is used to call the compiler, 'cxx' shall be used for ## decisions based on compiler in use. cxx="" if test "x$GXX" = xyes; then cxx="gcc" else cxx=`basename $CXX` cxx=`echo ${cxx}|sed 's,\.[^.]*$,,'` fi case ${cxx} in gcc*) cxx='gcc' ;; cl*|CL*) cxx='cl' ## check whether this is Microsoft C++ (tbd) ;; bcc32*|BCC32*) cxx='bcc32' ## check whether this is Borland C++ (tbd) ;; CC*) ## check whether this is SUN C++ (tbd) cxx="CC" ;; xlC*|xlC_r*) cxx="xlC" ## check whether this is IBM C++ (tbd) ;; aCC*) cxx='aCC' ## check whether this is HP C++ (tbd) ;; *) ## unknown compiler - good luck. { echo "$as_me:$LINENO: WARNING: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% U N K N O W N C++ COMPILER: ${cxx} ============================================================ Compilation is very likely to fail as we are not aware of this compiler yet. In case of problems please try to set additional flags by using environment variable CXXFLAGS. If CXXFLAGS does not help you, please edit either ${srcdir}/scripts/cxx.sh.in ; or ${srcdir}/scripts/link.sh.in Those scripts are getting called for compilation of all C++ source code (cxx.sh.in) or for linking binaries (link.sh.in). In very obscure cases, building the library may also fail.If so, please try variable ARFLAGS or edit ${srcdir}/scripts/lib.sh.in ============================================================ *** PLEASE PROVIDE FEEDBACK TO antlr.org - THANK YOU *** %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% " >&5 echo "$as_me: WARNING: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% U N K N O W N C++ COMPILER: ${cxx} ============================================================ Compilation is very likely to fail as we are not aware of this compiler yet. In case of problems please try to set additional flags by using environment variable CXXFLAGS. If CXXFLAGS does not help you, please edit either ${srcdir}/scripts/cxx.sh.in ; or ${srcdir}/scripts/link.sh.in Those scripts are getting called for compilation of all C++ source code (cxx.sh.in) or for linking binaries (link.sh.in). In very obscure cases, building the library may also fail.If so, please try variable ARFLAGS or edit ${srcdir}/scripts/lib.sh.in ============================================================ *** PLEASE PROVIDE FEEDBACK TO antlr.org - THANK YOU *** %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% " >&2;} ;; esac ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx## ## COMPILER TUNING SECTION ## ##==============================================================## ## compiler tuning has basically removed from this configure ## script as it appears not to be handy and practical. All ## compiler flags are set in cxx.sh.in. If there is any ## change required, go there and change. ## Note that flags given in this file may overrule settings ## given in cxx.sh.in. Therefore, if you "add" flags here, ## put a "+" in front of variable CXXFLAGS. For example, let's ## say you want to add "-g". Then do this: ## ## CXXFLAGS="-g" ## .. ## CXXFLAGS="+ ${CXXFLAGS}" ## ## The addition of "+" CXXFLAGS should be the last action for ## that variable. The net effect is that "-g" will be added to ## flags set in cxx.sh.in. So the result may look like ## gcc -Wall -c -g .. ## ## Similar, put a "-" in front to get "gcc -g -Wall -c .." and ## put nothing or a "=" in front to get "gcc -g ..". ## ## Similar to CXXFLAGS are LDFLAGS and ARFLAGS for linking ## and making a static library. case "${cxx}" in cl|bcc32) OBJEXT=".obj" LIBEXT=".lib" EXEEXT=".exe" ANTLR_LIB="$abs_this_builddir/lib/cpp/src/antlr.lib" ;; *) OBJEXT=".o" ;; esac LDFLAGS= set x ${AR} AR="${2}" ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx## ## END COMPILER TUNING SECTION ## ##==============================================================## # Checks for header files. ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5 echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6 # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if test "${ac_cv_prog_CPP+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi else ac_cpp_err=yes fi if test -z "$ac_cpp_err"; then : else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Broken: fails on valid input. continue fi rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether non-existent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi else ac_cpp_err=yes fi if test -z "$ac_cpp_err"; then # Broken: success on invalid input. continue else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext if $ac_preproc_ok; then break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi echo "$as_me:$LINENO: result: $CPP" >&5 echo "${ECHO_T}$CPP" >&6 ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi else ac_cpp_err=yes fi if test -z "$ac_cpp_err"; then : else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Broken: fails on valid input. continue fi rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether non-existent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi else ac_cpp_err=yes fi if test -z "$ac_cpp_err"; then # Broken: success on invalid input. continue else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details." >&5 echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu echo "$as_me:$LINENO: checking for egrep" >&5 echo $ECHO_N "checking for egrep... $ECHO_C" >&6 if test "${ac_cv_prog_egrep+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if echo a | (grep -E '(a|b)') >/dev/null 2>&1 then ac_cv_prog_egrep='grep -E' else ac_cv_prog_egrep='egrep' fi fi echo "$as_me:$LINENO: result: $ac_cv_prog_egrep" >&5 echo "${ECHO_T}$ac_cv_prog_egrep" >&6 EGREP=$ac_cv_prog_egrep echo "$as_me:$LINENO: checking for ANSI C header files" >&5 echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6 if test "${ac_cv_header_stdc+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_header_stdc=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_header_stdc=no fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) exit(2); exit (0); } _ACEOF rm -f conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then : else echo "$as_me: program exited with status $ac_status" >&5 echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_header_stdc=no fi rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi fi echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 echo "${ECHO_T}$ac_cv_header_stdc" >&6 if test $ac_cv_header_stdc = yes; then cat >>confdefs.h <<\_ACEOF #define STDC_HEADERS 1 _ACEOF fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` echo "$as_me:$LINENO: checking for $ac_header" >&5 echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 if eval "test \"\${$as_ac_Header+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then eval "$as_ac_Header=yes" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Header=no" fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 if test `eval echo '${'$as_ac_Header'}'` = yes; then cat >>confdefs.h <<_ACEOF #define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in stdlib.h unistd.h do as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` if eval "test \"\${$as_ac_Header+set}\" = set"; then echo "$as_me:$LINENO: checking for $ac_header" >&5 echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 if eval "test \"\${$as_ac_Header+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 fi echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 else # Is the header compilable? echo "$as_me:$LINENO: checking $ac_header usability" >&5 echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_header_compiler=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 echo "${ECHO_T}$ac_header_compiler" >&6 # Is the header present? echo "$as_me:$LINENO: checking $ac_header presence" >&5 echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi else ac_cpp_err=yes fi if test -z "$ac_cpp_err"; then ac_header_preproc=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 echo "${ECHO_T}$ac_header_preproc" >&6 # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## -------------------------------- ## ## Report this to the antlr lists. ## ## -------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac echo "$as_me:$LINENO: checking for $ac_header" >&5 echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 if eval "test \"\${$as_ac_Header+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 fi if test `eval echo '${'$as_ac_Header'}'` = yes; then cat >>confdefs.h <<_ACEOF #define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done # Checks for typedefs, structures, and compiler characteristics. echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5 echo $ECHO_N "checking for an ANSI C-conforming const... $ECHO_C" >&6 if test "${ac_cv_c_const+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { /* FIXME: Include the comments suggested by Paul. */ #ifndef __cplusplus /* Ultrix mips cc rejects this. */ typedef int charset[2]; const charset x; /* SunOS 4.1.1 cc rejects this. */ char const *const *ccp; char **p; /* NEC SVR4.0.2 mips cc rejects this. */ struct point {int x, y;}; static struct point const zero = {0,0}; /* AIX XL C 1.02.0.0 rejects this. It does not let you subtract one const X* pointer from another in an arm of an if-expression whose if-part is not a constant expression */ const char *g = "string"; ccp = &g + (g ? g-g : 0); /* HPUX 7.0 cc rejects these. */ ++ccp; p = (char**) ccp; ccp = (char const *const *) p; { /* SCO 3.2v4 cc rejects this. */ char *t; char const *s = 0 ? (char *) 0 : (char const *) 0; *t++ = 0; } { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ int x[] = {25, 17}; const int *foo = &x[0]; ++foo; } { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ typedef const int *iptr; iptr p = 0; ++p; } { /* AIX XL C 1.02.0.0 rejects this saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ struct s { int j; const int *ap[3]; }; struct s *b; b->j = 5; } { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ const int foo = 10; } #endif ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_c_const=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_c_const=no fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext fi echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5 echo "${ECHO_T}$ac_cv_c_const" >&6 if test $ac_cv_c_const = no; then cat >>confdefs.h <<\_ACEOF #define const _ACEOF fi echo "$as_me:$LINENO: checking for inline" >&5 echo $ECHO_N "checking for inline... $ECHO_C" >&6 if test "${ac_cv_c_inline+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else ac_cv_c_inline=no for ac_kw in inline __inline__ __inline; do cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifndef __cplusplus typedef int foo_t; static $ac_kw foo_t static_foo () {return 0; } $ac_kw foo_t foo () {return 0; } #endif _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_c_inline=$ac_kw; break else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext done fi echo "$as_me:$LINENO: result: $ac_cv_c_inline" >&5 echo "${ECHO_T}$ac_cv_c_inline" >&6 case $ac_cv_c_inline in inline | yes) ;; *) case $ac_cv_c_inline in no) ac_val=;; *) ac_val=$ac_cv_c_inline;; esac cat >>confdefs.h <<_ACEOF #ifndef __cplusplus #define inline $ac_val #endif _ACEOF ;; esac # Checks for library functions. for ac_header in stdlib.h do as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` if eval "test \"\${$as_ac_Header+set}\" = set"; then echo "$as_me:$LINENO: checking for $ac_header" >&5 echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 if eval "test \"\${$as_ac_Header+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 fi echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 else # Is the header compilable? echo "$as_me:$LINENO: checking $ac_header usability" >&5 echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest.$ac_objext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_header_compiler=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f conftest.err conftest.$ac_objext conftest.$ac_ext echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 echo "${ECHO_T}$ac_header_compiler" >&6 # Is the header present? echo "$as_me:$LINENO: checking $ac_header presence" >&5 echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null; then if test -s conftest.err; then ac_cpp_err=$ac_c_preproc_warn_flag ac_cpp_err=$ac_cpp_err$ac_c_werror_flag else ac_cpp_err= fi else ac_cpp_err=yes fi if test -z "$ac_cpp_err"; then ac_header_preproc=yes else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 echo "${ECHO_T}$ac_header_preproc" >&6 # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## -------------------------------- ## ## Report this to the antlr lists. ## ## -------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac echo "$as_me:$LINENO: checking for $ac_header" >&5 echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 if eval "test \"\${$as_ac_Header+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 fi if test `eval echo '${'$as_ac_Header'}'` = yes; then cat >>confdefs.h <<_ACEOF #define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done echo "$as_me:$LINENO: checking for GNU libc compatible malloc" >&5 echo $ECHO_N "checking for GNU libc compatible malloc... $ECHO_C" >&6 if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if test "$cross_compiling" = yes; then ac_cv_func_malloc_0_nonnull=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #if STDC_HEADERS || HAVE_STDLIB_H # include #else char *malloc (); #endif int main () { exit (malloc (0) ? 0 : 1); ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_malloc_0_nonnull=yes else echo "$as_me: program exited with status $ac_status" >&5 echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_malloc_0_nonnull=no fi rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi echo "$as_me:$LINENO: result: $ac_cv_func_malloc_0_nonnull" >&5 echo "${ECHO_T}$ac_cv_func_malloc_0_nonnull" >&6 if test $ac_cv_func_malloc_0_nonnull = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_MALLOC 1 _ACEOF else cat >>confdefs.h <<\_ACEOF #define HAVE_MALLOC 0 _ACEOF case $LIBOBJS in "malloc.$ac_objext" | \ *" malloc.$ac_objext" | \ "malloc.$ac_objext "* | \ *" malloc.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS malloc.$ac_objext" ;; esac cat >>confdefs.h <<\_ACEOF #define malloc rpl_malloc _ACEOF fi for ac_func in strcasecmp do as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` echo "$as_me:$LINENO: checking for $ac_func" >&5 echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 if eval "test \"\${$as_ac_var+set}\" = set"; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define $ac_func to an innocuous variant, in case declares $ac_func. For example, HP-UX 11i declares gettimeofday. */ #define $ac_func innocuous_$ac_func /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $ac_func /* Override any gcc2 internal prototype to avoid an error. */ #ifdef __cplusplus extern "C" { #endif /* We use char because int might match the return type of a gcc2 builtin and then its argument prototype would still apply. */ char $ac_func (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined (__stub_$ac_func) || defined (__stub___$ac_func) choke me #else char (*f) () = $ac_func; #endif #ifdef __cplusplus } #endif int main () { return f != $ac_func; ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; } && { ac_try='test -s conftest$ac_exeext' { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 (eval $ac_try) 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then eval "$as_ac_var=yes" else echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_var=no" fi rm -f conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 if test `eval echo '${'$as_ac_var'}'` = yes; then cat >>confdefs.h <<_ACEOF #define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done ## Some further specific test required as are using std C++. ## (tbd) ;; esac ## test -z "$DOXYGEN" && # Extract the first word of "doxygen", so it can be a program name with args. set dummy doxygen; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_path_DOXYGEN+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $DOXYGEN in [\\/]* | ?:[\\/]*) ac_cv_path_DOXYGEN="$DOXYGEN" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_DOXYGEN="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done test -z "$ac_cv_path_DOXYGEN" && ac_cv_path_DOXYGEN="doxygen" ;; esac fi DOXYGEN=$ac_cv_path_DOXYGEN if test -n "$DOXYGEN"; then echo "$as_me:$LINENO: result: $DOXYGEN" >&5 echo "${ECHO_T}$DOXYGEN" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi # This seems to convince configure to use an absolute path to the backup # install-sh script. ac_install_sh="$PWD/scripts/install-sh" # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6 if test -z "$INSTALL"; then if test "${ac_cv_path_install+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in ./ | .// | /cC/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi done done ;; esac done fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. We don't cache a # path for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the path is relative. INSTALL=$ac_install_sh fi fi echo "$as_me:$LINENO: result: $INSTALL" >&5 echo "${ECHO_T}$INSTALL" >&6 # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_prog_RANLIB+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then echo "$as_me:$LINENO: result: $RANLIB" >&5 echo "${ECHO_T}$RANLIB" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done test -z "$ac_cv_prog_ac_ct_RANLIB" && ac_cv_prog_ac_ct_RANLIB=":" fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5 echo "${ECHO_T}$ac_ct_RANLIB" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi RANLIB=$ac_ct_RANLIB else RANLIB="$ac_cv_prog_RANLIB" fi test -z "$MKDIR" && # Extract the first word of "mkdir$EXEEXT", so it can be a program name with args. set dummy mkdir$EXEEXT; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_path_MKDIR+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $MKDIR in [\\/]* | ?:[\\/]*) ac_cv_path_MKDIR="$MKDIR" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_MKDIR="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done test -z "$ac_cv_path_MKDIR" && ac_cv_path_MKDIR="mkdir$EXEEXT " ;; esac fi MKDIR=$ac_cv_path_MKDIR if test -n "$MKDIR"; then echo "$as_me:$LINENO: result: $MKDIR" >&5 echo "${ECHO_T}$MKDIR" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi test -z "$RM" && # Extract the first word of "rm$EXEEXT", so it can be a program name with args. set dummy rm$EXEEXT; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_path_RM+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $RM in [\\/]* | ?:[\\/]*) ac_cv_path_RM="$RM" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_RM="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done test -z "$ac_cv_path_RM" && ac_cv_path_RM="rm$EXEEXT " ;; esac fi RM=$ac_cv_path_RM if test -n "$RM"; then echo "$as_me:$LINENO: result: $RM" >&5 echo "${ECHO_T}$RM" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ax_arg_list="gnutar tar" if test "x${TAR}" != "x" ; then ax_arg_list="${TAR}" fi TAR="" for ac_prog in ${ax_arg_list} ; do set dummy $ac_prog; ac_word=${2} ## if argument is absolute we check whether such a file exists, ## otherwise we lookup PATH. Each hit will be added to main ## variable. case $ac_word in [\\/]* | ?:[\\/]*) echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then TAR="${TAR} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then TAR="${TAR} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${TAR}" != "x" ; then set x ${TAR} TAR="${2}" else { { echo "$as_me:$LINENO: error: no suitable value has been found for TAR" >&5 echo "$as_me: error: no suitable value has been found for TAR" >&2;} { (exit 1); exit 1; }; } fi ax_arg_list="/bin/touch /usr/bin/touch touch" if test "x${TOUCH}" != "x" ; then ax_arg_list="${TOUCH}" fi TOUCH="" for ac_prog in ${ax_arg_list} ; do set dummy $ac_prog; ac_word=${2} ## if argument is absolute we check whether such a file exists, ## otherwise we lookup PATH. Each hit will be added to main ## variable. case $ac_word in [\\/]* | ?:[\\/]*) echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then TOUCH="${TOUCH} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then TOUCH="${TOUCH} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${TOUCH}" != "x" ; then set x ${TOUCH} TOUCH="${2}" else { { echo "$as_me:$LINENO: error: no suitable value has been found for TOUCH" >&5 echo "$as_me: error: no suitable value has been found for TOUCH" >&2;} { (exit 1); exit 1; }; } fi test -z "$CHMOD" && # Extract the first word of "chmod$EXEEXT", so it can be a program name with args. set dummy chmod$EXEEXT; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_path_CHMOD+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $CHMOD in [\\/]* | ?:[\\/]*) ac_cv_path_CHMOD="$CHMOD" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_CHMOD="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done test -z "$ac_cv_path_CHMOD" && ac_cv_path_CHMOD="chmod$EXEEXT " ;; esac fi CHMOD=$ac_cv_path_CHMOD if test -n "$CHMOD"; then echo "$as_me:$LINENO: result: $CHMOD" >&5 echo "${ECHO_T}$CHMOD" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi test -z "$SED" && # Extract the first word of "sed$EXEEXT", so it can be a program name with args. set dummy sed$EXEEXT; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_path_SED+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $SED in [\\/]* | ?:[\\/]*) ac_cv_path_SED="$SED" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_SED="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done test -z "$ac_cv_path_SED" && ac_cv_path_SED="sed$EXEEXT " ;; esac fi SED=$ac_cv_path_SED if test -n "$SED"; then echo "$as_me:$LINENO: result: $SED" >&5 echo "${ECHO_T}$SED" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi test -z "$CAT" && # Extract the first word of "cat$EXEEXT", so it can be a program name with args. set dummy cat$EXEEXT; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_path_CAT+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $CAT in [\\/]* | ?:[\\/]*) ac_cv_path_CAT="$CAT" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_CAT="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done test -z "$ac_cv_path_CAT" && ac_cv_path_CAT="cat$EXEEXT " ;; esac fi CAT=$ac_cv_path_CAT if test -n "$CAT"; then echo "$as_me:$LINENO: result: $CAT" >&5 echo "${ECHO_T}$CAT" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi test -z "$GREP" && # Extract the first word of "grep$EXEEXT", so it can be a program name with args. set dummy grep$EXEEXT; ac_word=$2 echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test "${ac_cv_path_GREP+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $GREP in [\\/]* | ?:[\\/]*) ac_cv_path_GREP="$GREP" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_GREP="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done test -z "$ac_cv_path_GREP" && ac_cv_path_GREP="grep$EXEEXT " ;; esac fi GREP=$ac_cv_path_GREP if test -n "$GREP"; then echo "$as_me:$LINENO: result: $GREP" >&5 echo "${ECHO_T}$GREP" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # PYTHON # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# case $LANG_PY in 1) ax_arg_list="python" if test "x${PYTHON}" != "x" ; then ax_arg_list="${PYTHON}" fi PYTHON="" for ac_prog in ${ax_arg_list} ; do set dummy $ac_prog; ac_word=${2} ## if argument is absolute we check whether such a file exists, ## otherwise we lookup PATH. Each hit will be added to main ## variable. case $ac_word in [\\/]* | ?:[\\/]*) echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then PYTHON="${PYTHON} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then PYTHON="${PYTHON} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${PYTHON}" != "x" ; then set x ${PYTHON} PYTHON="${2}" else LANG_PY=0 cat <&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then CSHARPC="${CSHARPC} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then CSHARPC="${CSHARPC} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${CSHARPC}" != "x" ; then set x ${CSHARPC} CSHARPC="${2}" else LANG_CS=0 cat <&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then CLR="${CLR} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then CLR="${CLR} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${CLR}" != "x" ; then set x ${CLR} CLR="${2}" else LANG_CS=0 cat <&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 if test -f $ac_word ; then CLR="${CLR} ${ac_word}" echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 else echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 fi ;; *) as_found= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then CLR="${CLR} $as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: $as_dir/$ac_word$ac_exec_ext" >&5 echo "${ECHO_T}$as_dir/$ac_word$ac_exec_ext" >&6 as_found=1 fi done done test "x$as_found" == "x" && { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6 } ;; esac done if test "x${CLR}" != "x" ; then set x ${CLR} CLR="${2}" else LANG_CS=0 cat <confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, don't put newlines in cache variables' values. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. { (set) 2>&1 | case `(ac_space=' '; set | grep ac_space) 2>&1` in *ac_space=\ *) # `set' does not quote correctly, so add quotes (double-quote # substitution turns \\\\ into \\, and sed turns \\ into \). sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n \ "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" ;; esac; } | sed ' t clear : clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end /^ac_cv_env/!s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ : end' >>confcache if diff $cache_file confcache >/dev/null 2>&1; then :; else if test -w $cache_file; then test "x$cache_file" != "x/dev/null" && echo "updating cache $cache_file" cat confcache >$cache_file else echo "not updating unwritable cache $cache_file" fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' # VPATH may cause trouble with some makes, so we remove $(srcdir), # ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=/{ s/:*\$(srcdir):*/:/; s/:*\${srcdir}:*/:/; s/:*@srcdir@:*/:/; s/^\([^=]*=[ ]*\):*/\1/; s/:*$//; s/^[^=]*=[ ]*$//; }' fi # Transform confdefs.h into DEFS. # Protect against shell expansion while executing Makefile rules. # Protect against Makefile macro expansion. # # If the first sed substitution is executed (which looks for macros that # take arguments), then we branch to the quote section. Otherwise, # look for a macro that doesn't take arguments. cat >confdef2opt.sed <<\_ACEOF t clear : clear s,^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\),-D\1=\2,g t quote s,^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\),-D\1=\2,g t quote d : quote s,[ `~#$^&*(){}\\|;'"<>?],\\&,g s,\[,\\&,g s,\],\\&,g s,\$,$$,g p _ACEOF # We use echo to avoid assuming a particular line-breaking character. # The extra dot is to prevent the shell from consuming trailing # line-breaks from the sub-command output. A line-break within # single-quotes doesn't work because, if this script is created in a # platform that uses two characters for line-breaks (e.g., DOS), tr # would break. ac_LF_and_DOT=`echo; echo .` DEFS=`sed -n -f confdef2opt.sed confdefs.h | tr "$ac_LF_and_DOT" ' .'` rm -f confdef2opt.sed ac_libobjs= ac_ltlibobjs= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_i=`echo "$ac_i" | sed 's/\$U\././;s/\.o$//;s/\.obj$//'` # 2. Add them. ac_libobjs="$ac_libobjs $ac_i\$U.$ac_objext" ac_ltlibobjs="$ac_ltlibobjs $ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : ${CONFIG_STATUS=./config.status} ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 echo "$as_me: creating $CONFIG_STATUS" >&6;} cat >$CONFIG_STATUS <<_ACEOF #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF ## --------------------- ## ## M4sh Initialization. ## ## --------------------- ## # Be Bourne compatible if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then set -o posix fi DUALCASE=1; export DUALCASE # for MKS sh # Support unset when possible. if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false fi # Work around bugs in pre-3.0 UWIN ksh. $as_unset ENV MAIL MAILPATH PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. for as_var in \ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ LC_TELEPHONE LC_TIME do if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then eval $as_var=C; export $as_var else $as_unset $as_var fi done # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1; then as_expr=expr else as_expr=false fi if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi # Name of the executable. as_me=`$as_basename "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)$' \| \ . : '\(.\)' 2>/dev/null || echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; } /^X\/\(\/\/\)$/{ s//\1/; q; } /^X\/\(\/\).*/{ s//\1/; q; } s/.*/./; q'` # PATH needs CR, and LINENO needs CR and PATH. # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi as_lineno_1=$LINENO as_lineno_2=$LINENO as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` test "x$as_lineno_1" != "x$as_lineno_2" && test "x$as_lineno_3" = "x$as_lineno_2" || { # Find who we are. Look in the path if we contain no path at all # relative or not. case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then { { echo "$as_me:$LINENO: error: cannot find myself; rerun with an absolute path" >&5 echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2;} { (exit 1); exit 1; }; } fi case $CONFIG_SHELL in '') as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for as_base in sh bash ksh sh5; do case $as_dir in /*) if ("$as_dir/$as_base" -c ' as_lineno_1=$LINENO as_lineno_2=$LINENO as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` test "x$as_lineno_1" != "x$as_lineno_2" && test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; } $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; } CONFIG_SHELL=$as_dir/$as_base export CONFIG_SHELL exec "$CONFIG_SHELL" "$0" ${1+"$@"} fi;; esac done done ;; esac # Create $as_me.lineno as a copy of $as_myself, but with $LINENO # uniformly replaced by the line number. The first 'sed' inserts a # line-number line before each line; the second 'sed' does the real # work. The second script uses 'N' to pair each line-number line # with the numbered line, and appends trailing '-' during # substitution so that $LINENO is not a special case at line end. # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-) sed '=' <$as_myself | sed ' N s,$,-, : loop s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3, t loop s,-$,, s,^['$as_cr_digits']*\n,, ' >$as_me.lineno && chmod +x $as_me.lineno || { { echo "$as_me:$LINENO: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&5 echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2;} { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensible to this). . ./$as_me.lineno # Exit status is that of the last command. exit } case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in *c*,-n*) ECHO_N= ECHO_C=' ' ECHO_T=' ' ;; *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;; *) ECHO_N= ECHO_C='\c' ECHO_T= ;; esac if expr a : '\(a\)' >/dev/null 2>&1; then as_expr=expr else as_expr=false fi rm -f conf$$ conf$$.exe conf$$.file echo >conf$$.file if ln -s conf$$.file conf$$ 2>/dev/null; then # We could just check for DJGPP; but this test a) works b) is more generic # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04). if test -f conf$$.exe; then # Don't use ln at all; we don't have any links as_ln_s='cp -p' else as_ln_s='ln -s' fi elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.file if mkdir -p . 2>/dev/null; then as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_executable_p="test -f" # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" # IFS # We need space, tab and new line, in precisely that order. as_nl=' ' IFS=" $as_nl" # CDPATH. $as_unset CDPATH exec 6>&1 # Open the log real soon, to keep \$[0] and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. Logging --version etc. is OK. exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX } >&5 cat >&5 <<_CSEOF This file was extended by antlr $as_me 2.7.5, which was generated by GNU Autoconf 2.59. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ _CSEOF echo "on `(hostname || uname -n) 2>/dev/null | sed 1q`" >&5 echo >&5 _ACEOF # Files that config.status was made for. if test -n "$ac_config_files"; then echo "config_files=\"$ac_config_files\"" >>$CONFIG_STATUS fi if test -n "$ac_config_headers"; then echo "config_headers=\"$ac_config_headers\"" >>$CONFIG_STATUS fi if test -n "$ac_config_links"; then echo "config_links=\"$ac_config_links\"" >>$CONFIG_STATUS fi if test -n "$ac_config_commands"; then echo "config_commands=\"$ac_config_commands\"" >>$CONFIG_STATUS fi cat >>$CONFIG_STATUS <<\_ACEOF ac_cs_usage="\ \`$as_me' instantiates files from templates according to the current configuration. Usage: $0 [OPTIONS] [FILE]... -h, --help print this help, then exit -V, --version print version number, then exit -q, --quiet do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE Configuration files: $config_files Configuration commands: $config_commands Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF ac_cs_version="\\ antlr config.status 2.7.5 configured by $0, generated by GNU Autoconf 2.59, with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\" Copyright (C) 2003 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." srcdir=$srcdir INSTALL="$INSTALL" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF # If no file are specified by the user, then we need to provide default # value. By we need to know if files were specified by the user. ac_need_defaults=: while test $# != 0 do case $1 in --*=*) ac_option=`expr "x$1" : 'x\([^=]*\)='` ac_optarg=`expr "x$1" : 'x[^=]*=\(.*\)'` ac_shift=: ;; -*) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; *) # This is not an option, so the user has probably given explicit # arguments. ac_option=$1 ac_need_defaults=false;; esac case $ac_option in # Handling of the options. _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --vers* | -V ) echo "$ac_cs_version"; exit 0 ;; --he | --h) # Conflict between --help and --header { { echo "$as_me:$LINENO: error: ambiguous option: $1 Try \`$0 --help' for more information." >&5 echo "$as_me: error: ambiguous option: $1 Try \`$0 --help' for more information." >&2;} { (exit 1); exit 1; }; };; --help | --hel | -h ) echo "$ac_cs_usage"; exit 0 ;; --debug | --d* | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift CONFIG_FILES="$CONFIG_FILES $ac_optarg" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg" ac_need_defaults=false;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) { { echo "$as_me:$LINENO: error: unrecognized option: $1 Try \`$0 --help' for more information." >&5 echo "$as_me: error: unrecognized option: $1 Try \`$0 --help' for more information." >&2;} { (exit 1); exit 1; }; } ;; *) ac_config_targets="$ac_config_targets $1" ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF if \$ac_cs_recheck; then echo "running $SHELL $0 " $ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6 exec $SHELL $0 $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF # # INIT-COMMANDS section. # MAKE="${MAKE}" CHMOD=${CHMOD} CHMOD=${CHMOD} CHMOD=${CHMOD} ANTLR_CONFIG_FILES='*' ANTLR_FILE_LIST="${ANTLR_FILE_LIST}" CHMOD="${CHMOD}" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF for ac_config_target in $ac_config_targets do case "$ac_config_target" in # Handling of arguments. "scripts/config.vars" ) CONFIG_FILES="$CONFIG_FILES scripts/config.vars" ;; "scripts/config.deps" ) CONFIG_FILES="$CONFIG_FILES scripts/config.deps" ;; "scripts/config.make" ) CONFIG_FILES="$CONFIG_FILES scripts/config.make" ;; "antlr/Version.java" ) CONFIG_FILES="$CONFIG_FILES antlr/Version.java" ;; "scripts/python.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/python.sh" ;; "scripts/pyantlr.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/pyantlr.sh:scripts/pyinst.sh.in" ;; "lib/python/Makefile" ) CONFIG_FILES="$CONFIG_FILES lib/python/Makefile" ;; "examples/python/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/Makefile" ;; "examples/python/asn1/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/asn1/Makefile" ;; "examples/python/ASTsupport/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/ASTsupport/Makefile" ;; "examples/python/calc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/calc/Makefile" ;; "examples/python/columns/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/columns/Makefile" ;; "examples/python/exprAST/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/exprAST/Makefile" ;; "examples/python/filter/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/filter/Makefile" ;; "examples/python/filterWithRule/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/filterWithRule/Makefile" ;; "examples/python/heteroAST/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/heteroAST/Makefile" ;; "examples/python/HTML/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/HTML/Makefile" ;; "examples/python/IDL/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/IDL/Makefile" ;; "examples/python/imagNodeAST/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/imagNodeAST/Makefile" ;; "examples/python/includeFile/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/includeFile/Makefile" ;; "examples/python/inherit.tinyc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/inherit.tinyc/Makefile" ;; "examples/python/java/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/java/Makefile" ;; "examples/python/lexerTester/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/lexerTester/Makefile" ;; "examples/python/lexRewrite/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/lexRewrite/Makefile" ;; "examples/python/linkChecker/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/linkChecker/Makefile" ;; "examples/python/multiLexer/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/multiLexer/Makefile" ;; "examples/python/multiParser/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/multiParser/Makefile" ;; "examples/python/parseBinary/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/parseBinary/Makefile" ;; "examples/python/pascal/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/pascal/Makefile" ;; "examples/python/cpp/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/cpp/Makefile" ;; "examples/python/preserveWhiteSpace/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/preserveWhiteSpace/Makefile" ;; "examples/python/tinybasic/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/tinybasic/Makefile" ;; "examples/python/tinyc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/tinyc/Makefile" ;; "examples/python/transform/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/transform/Makefile" ;; "examples/python/treewalk/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/treewalk/Makefile" ;; "examples/python/unicode/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/unicode/Makefile" ;; "examples/python/unicode.IDENTs/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/unicode.IDENTs/Makefile" ;; "examples/python/xml/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/python/xml/Makefile" ;; "scripts/csc.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/csc.sh" ;; "lib/csharp/Makefile" ) CONFIG_FILES="$CONFIG_FILES lib/csharp/Makefile" ;; "lib/csharp/src/Makefile" ) CONFIG_FILES="$CONFIG_FILES lib/csharp/src/Makefile" ;; "lib/csharp/ASTFrame/Makefile" ) CONFIG_FILES="$CONFIG_FILES lib/csharp/ASTFrame/Makefile" ;; "examples/csharp/ASTsupport/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/ASTsupport/Makefile" ;; "examples/csharp/HTML/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/HTML/Makefile" ;; "examples/csharp/IDL/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/IDL/Makefile" ;; "examples/csharp/ParseTreeDebug/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/ParseTreeDebug/Makefile" ;; "examples/csharp/TokenStreamRewrite/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/TokenStreamRewrite/Makefile" ;; "examples/csharp/calc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/calc/Makefile" ;; "examples/csharp/columns/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/columns/Makefile" ;; "examples/csharp/exprAST/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/exprAST/Makefile" ;; "examples/csharp/filter/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/filter/Makefile" ;; "examples/csharp/filterWithRule/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/filterWithRule/Makefile" ;; "examples/csharp/heteroAST/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/heteroAST/Makefile" ;; "examples/csharp/java/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/java/Makefile" ;; "examples/csharp/multiLexer/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/multiLexer/Makefile" ;; "examples/csharp/parseBinary/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/parseBinary/Makefile" ;; "examples/csharp/preserveWhiteSpace/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/preserveWhiteSpace/Makefile" ;; "examples/csharp/tinyc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/tinyc/Makefile" ;; "examples/csharp/unicode/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/unicode/Makefile" ;; "examples/csharp/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/csharp/Makefile" ;; "scripts/java.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/java.sh" ;; "scripts/jar.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/jar.sh" ;; "scripts/javac.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/javac.sh" ;; "scripts/antlr.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/antlr.sh" ;; "scripts/cxx.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/cxx.sh" ;; "scripts/link.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/link.sh" ;; "scripts/c.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/c.sh" ;; "scripts/lib.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/lib.sh" ;; "scripts/cpp.sh" ) CONFIG_FILES="$CONFIG_FILES scripts/cpp.sh" ;; "lib/cpp/Makefile" ) CONFIG_FILES="$CONFIG_FILES lib/cpp/Makefile" ;; "lib/cpp/antlr/Makefile" ) CONFIG_FILES="$CONFIG_FILES lib/cpp/antlr/Makefile" ;; "lib/cpp/src/Makefile" ) CONFIG_FILES="$CONFIG_FILES lib/cpp/src/Makefile" ;; "examples/cpp/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/Makefile" ;; "examples/cpp/ASTsupport/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/ASTsupport/Makefile" ;; "examples/cpp/calc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/calc/Makefile" ;; "examples/cpp/exprAST/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/exprAST/Makefile" ;; "examples/cpp/filter/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/filter/Makefile" ;; "examples/cpp/filterWithRule/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/filterWithRule/Makefile" ;; "examples/cpp/flexLexer/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/flexLexer/Makefile" ;; "examples/cpp/HTML/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/HTML/Makefile" ;; "examples/cpp/IDL/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/IDL/Makefile" ;; "examples/cpp/imagNodeAST/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/imagNodeAST/Makefile" ;; "examples/cpp/includeFile/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/includeFile/Makefile" ;; "examples/cpp/inherit.tinyc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/inherit.tinyc/Makefile" ;; "examples/cpp/java/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/java/Makefile" ;; "examples/cpp/lexRewrite/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/lexRewrite/Makefile" ;; "examples/cpp/multiLexer/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/multiLexer/Makefile" ;; "examples/cpp/multiParser/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/multiParser/Makefile" ;; "examples/cpp/parseBinary/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/parseBinary/Makefile" ;; "examples/cpp/preserveWhiteSpace/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/preserveWhiteSpace/Makefile" ;; "examples/cpp/tinyc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/tinyc/Makefile" ;; "examples/cpp/tokenStreamRewrite/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/tokenStreamRewrite/Makefile" ;; "examples/cpp/transform/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/transform/Makefile" ;; "examples/cpp/treewalk/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/treewalk/Makefile" ;; "examples/cpp/unicode/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/cpp/unicode/Makefile" ;; "lib/Makefile" ) CONFIG_FILES="$CONFIG_FILES lib/Makefile" ;; "examples/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/Makefile" ;; "doc/Makefile" ) CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;; "Makefile" ) CONFIG_FILES="$CONFIG_FILES Makefile" ;; "scripts/antlr-config" ) CONFIG_FILES="$CONFIG_FILES scripts/antlr-config" ;; "scripts/run-antlr" ) CONFIG_FILES="$CONFIG_FILES scripts/run-antlr" ;; "scripts/antlr.spec" ) CONFIG_FILES="$CONFIG_FILES scripts/antlr.spec" ;; "antlr/Makefile" ) CONFIG_FILES="$CONFIG_FILES antlr/Makefile" ;; "examples/java/ASTsupport/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/ASTsupport/Makefile" ;; "examples/java/HTML/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/HTML/Makefile" ;; "examples/java/IDL/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/IDL/Makefile" ;; "examples/java/calc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/calc/Makefile" ;; "examples/java/columns/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/columns/Makefile" ;; "examples/java/exprAST/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/exprAST/Makefile" ;; "examples/java/filter/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/filter/Makefile" ;; "examples/java/filterWithRule/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/filterWithRule/Makefile" ;; "examples/java/heteroAST/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/heteroAST/Makefile" ;; "examples/java/imagNodeAST/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/imagNodeAST/Makefile" ;; "examples/java/includeFile/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/includeFile/Makefile" ;; "examples/java/inherit.tinyc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/inherit.tinyc/Makefile" ;; "examples/java/java/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/java/Makefile" ;; "examples/java/lexRewrite/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/lexRewrite/Makefile" ;; "examples/java/linkChecker/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/linkChecker/Makefile" ;; "examples/java/multiLexer/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/multiLexer/Makefile" ;; "examples/java/parseBinary/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/parseBinary/Makefile" ;; "examples/java/pascal/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/pascal/Makefile" ;; "examples/java/preserveWhiteSpace/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/preserveWhiteSpace/Makefile" ;; "examples/java/tinybasic/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/tinybasic/Makefile" ;; "examples/java/tinyc/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/tinyc/Makefile" ;; "examples/java/transform/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/transform/Makefile" ;; "examples/java/treewalk/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/treewalk/Makefile" ;; "examples/java/unicode.IDENTs/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/unicode.IDENTs/Makefile" ;; "examples/java/unicode/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/unicode/Makefile" ;; "examples/java/xml/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/xml/Makefile" ;; "examples/java/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/Makefile" ;; "examples/java/cpp/Makefile" ) CONFIG_FILES="$CONFIG_FILES examples/java/cpp/Makefile" ;; "${ANTLR_CONFIG_FILES}" ) CONFIG_FILES="$CONFIG_FILES ${ANTLR_CONFIG_FILES}" ;; "notice" ) CONFIG_COMMANDS="$CONFIG_COMMANDS notice" ;; *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 echo "$as_me: error: invalid argument: $ac_config_target" >&2;} { (exit 1); exit 1; }; };; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason to put it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Create a temporary directory, and hook for its removal unless debugging. $debug || { trap 'exit_status=$?; rm -rf $tmp && exit $exit_status' 0 trap '{ (exit 1); exit 1; }' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d -q "./confstatXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" } || { tmp=./confstat$$-$RANDOM (umask 077 && mkdir $tmp) } || { echo "$me: cannot create a temporary directory in ." >&2 { (exit 1); exit 1; } } _ACEOF cat >>$CONFIG_STATUS <<_ACEOF # # CONFIG_FILES section. # # No need to generate the scripts if there are no CONFIG_FILES. # This happens for instance when ./config.status config.h if test -n "\$CONFIG_FILES"; then # Protect against being on the right side of a sed subst in config.status. sed 's/,@/@@/; s/@,/@@/; s/,;t t\$/@;t t/; /@;t t\$/s/[\\\\&,]/\\\\&/g; s/@@/,@/; s/@@/@,/; s/@;t t\$/,;t t/' >\$tmp/subs.sed <<\\CEOF s,@SHELL@,$SHELL,;t t s,@PATH_SEPARATOR@,$PATH_SEPARATOR,;t t s,@PACKAGE_NAME@,$PACKAGE_NAME,;t t s,@PACKAGE_TARNAME@,$PACKAGE_TARNAME,;t t s,@PACKAGE_VERSION@,$PACKAGE_VERSION,;t t s,@PACKAGE_STRING@,$PACKAGE_STRING,;t t s,@PACKAGE_BUGREPORT@,$PACKAGE_BUGREPORT,;t t s,@exec_prefix@,$exec_prefix,;t t s,@prefix@,$prefix,;t t s,@program_transform_name@,$program_transform_name,;t t s,@bindir@,$bindir,;t t s,@sbindir@,$sbindir,;t t s,@libexecdir@,$libexecdir,;t t s,@datadir@,$datadir,;t t s,@sysconfdir@,$sysconfdir,;t t s,@sharedstatedir@,$sharedstatedir,;t t s,@localstatedir@,$localstatedir,;t t s,@libdir@,$libdir,;t t s,@includedir@,$includedir,;t t s,@oldincludedir@,$oldincludedir,;t t s,@infodir@,$infodir,;t t s,@mandir@,$mandir,;t t s,@build_alias@,$build_alias,;t t s,@host_alias@,$host_alias,;t t s,@target_alias@,$target_alias,;t t s,@DEFS@,$DEFS,;t t s,@ECHO_C@,$ECHO_C,;t t s,@ECHO_N@,$ECHO_N,;t t s,@ECHO_T@,$ECHO_T,;t t s,@LIBS@,$LIBS,;t t s,@ANTLR_JAR@,$ANTLR_JAR,;t t s,@ANTLR_LIB@,$ANTLR_LIB,;t t s,@ANTLR_NET@,$ANTLR_NET,;t t s,@ANTLR_PY@,$ANTLR_PY,;t t s,@ASTFRAME_NET@,$ASTFRAME_NET,;t t s,@antlr_jar@,$antlr_jar,;t t s,@antlr_lib@,$antlr_lib,;t t s,@antlr_net@,$antlr_net,;t t s,@antlr_py@,$antlr_py,;t t s,@astframe_net@,$astframe_net,;t t s,@ANTLRFLAGS@,$ANTLRFLAGS,;t t s,@ANTLR@,$ANTLR,;t t s,@ANTLR_ACTION_FILES@,$ANTLR_ACTION_FILES,;t t s,@ANTLR_ANTLR_FILES@,$ANTLR_ANTLR_FILES,;t t s,@ANTLR_COMPILE_CMD@,$ANTLR_COMPILE_CMD,;t t s,@ANTLR_CYGWIN@,$ANTLR_CYGWIN,;t t s,@ANTLR_MINGW@,$ANTLR_MINGW,;t t s,@ANTLR_TOKDEF_FILES@,$ANTLR_TOKDEF_FILES,;t t s,@ANTLR_WIN32@,$ANTLR_WIN32,;t t s,@ANTLR_WITH_ANTLR_CMD@,$ANTLR_WITH_ANTLR_CMD,;t t s,@ANTLR_WITH_ANTLR_JAR@,$ANTLR_WITH_ANTLR_JAR,;t t s,@ARFLAGS@,$ARFLAGS,;t t s,@AR@,$AR,;t t s,@AS@,$AS,;t t s,@BOOTCLASSPATH@,$BOOTCLASSPATH,;t t s,@CSHARPCFLAGS@,$CSHARPCFLAGS,;t t s,@CSHARPC@,$CSHARPC,;t t s,@CSHARP_COMPILE_CMD@,$CSHARP_COMPILE_CMD,;t t s,@CLR@,$CLR,;t t s,@CXX_COMPILE_CMD@,$CXX_COMPILE_CMD,;t t s,@CXX_LIB_CMD@,$CXX_LIB_CMD,;t t s,@CXX_LINK_CMD@,$CXX_LINK_CMD,;t t s,@CYGPATH@,$CYGPATH,;t t s,@C_COMPILE_CMD@,$C_COMPILE_CMD,;t t s,@DEBUG@,$DEBUG,;t t s,@EXEEXT@,$EXEEXT,;t t s,@JARFLAGS@,$JARFLAGS,;t t s,@JAR@,$JAR,;t t s,@JAR_CMD@,$JAR_CMD,;t t s,@JAVACFLAGS@,$JAVACFLAGS,;t t s,@JAVAC@,$JAVAC,;t t s,@JAVAFLAGS@,$JAVAFLAGS,;t t s,@JAVA@,$JAVA,;t t s,@JAVA_CMD@,$JAVA_CMD,;t t s,@JAVA_COMPILE_CMD@,$JAVA_COMPILE_CMD,;t t s,@LIBEXT@,$LIBEXT,;t t s,@MAKE@,$MAKE,;t t s,@OBJEXT@,$OBJEXT,;t t s,@PATCHLEVEL@,$PATCHLEVEL,;t t s,@PYTHONFLAGS@,$PYTHONFLAGS,;t t s,@PYTHON@,$PYTHON,;t t s,@SUBVERSION@,$SUBVERSION,;t t s,@TIMESTAMP@,$TIMESTAMP,;t t s,@TOUCH@,$TOUCH,;t t s,@VERBOSE@,$VERBOSE,;t t s,@VERSION@,$VERSION,;t t s,@WITH_EXAMPLES@,$WITH_EXAMPLES,;t t s,@abs_this_builddir@,$abs_this_builddir,;t t s,@cxx@,$cxx,;t t s,@jar@,$jar,;t t s,@java@,$java,;t t s,@javac@,$javac,;t t s,@TAR@,$TAR,;t t s,@build@,$build,;t t s,@build_cpu@,$build_cpu,;t t s,@build_vendor@,$build_vendor,;t t s,@build_os@,$build_os,;t t s,@host@,$host,;t t s,@host_cpu@,$host_cpu,;t t s,@host_vendor@,$host_vendor,;t t s,@host_os@,$host_os,;t t s,@CYGPATH_M@,$CYGPATH_M,;t t s,@CYGPATH_W@,$CYGPATH_W,;t t s,@just_make@,$just_make,;t t s,@CXX@,$CXX,;t t s,@CXXFLAGS@,$CXXFLAGS,;t t s,@LDFLAGS@,$LDFLAGS,;t t s,@CPPFLAGS@,$CPPFLAGS,;t t s,@ac_ct_CXX@,$ac_ct_CXX,;t t s,@CC@,$CC,;t t s,@CFLAGS@,$CFLAGS,;t t s,@ac_ct_CC@,$ac_ct_CC,;t t s,@CPP@,$CPP,;t t s,@EGREP@,$EGREP,;t t s,@LIBOBJS@,$LIBOBJS,;t t s,@DOXYGEN@,$DOXYGEN,;t t s,@INSTALL_PROGRAM@,$INSTALL_PROGRAM,;t t s,@INSTALL_SCRIPT@,$INSTALL_SCRIPT,;t t s,@INSTALL_DATA@,$INSTALL_DATA,;t t s,@RANLIB@,$RANLIB,;t t s,@ac_ct_RANLIB@,$ac_ct_RANLIB,;t t s,@MKDIR@,$MKDIR,;t t s,@RM@,$RM,;t t s,@CHMOD@,$CHMOD,;t t s,@SED@,$SED,;t t s,@CAT@,$CAT,;t t s,@GREP@,$GREP,;t t s,@LTLIBOBJS@,$LTLIBOBJS,;t t /@stdvars@/r $stdvars s,@stdvars@,,;t t /@stddeps@/r $stddeps s,@stddeps@,,;t t /@stdmake@/r $stdmake s,@stdmake@,,;t t CEOF _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF # Split the substitutions into bite-sized pieces for seds with # small command number limits, like on Digital OSF/1 and HP-UX. ac_max_sed_lines=48 ac_sed_frag=1 # Number of current file. ac_beg=1 # First line for current file. ac_end=$ac_max_sed_lines # Line after last line for current file. ac_more_lines=: ac_sed_cmds= while $ac_more_lines; do if test $ac_beg -gt 1; then sed "1,${ac_beg}d; ${ac_end}q" $tmp/subs.sed >$tmp/subs.frag else sed "${ac_end}q" $tmp/subs.sed >$tmp/subs.frag fi if test ! -s $tmp/subs.frag; then ac_more_lines=false else # The purpose of the label and of the branching condition is to # speed up the sed processing (if there are no `@' at all, there # is no need to browse any of the substitutions). # These are the two extra sed commands mentioned above. (echo ':t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b' && cat $tmp/subs.frag) >$tmp/subs-$ac_sed_frag.sed if test -z "$ac_sed_cmds"; then ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed" else ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed" fi ac_sed_frag=`expr $ac_sed_frag + 1` ac_beg=$ac_end ac_end=`expr $ac_end + $ac_max_sed_lines` fi done if test -z "$ac_sed_cmds"; then ac_sed_cmds=cat fi fi # test -n "$CONFIG_FILES" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF for ac_file in : $CONFIG_FILES; do test "x$ac_file" = x: && continue # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in". case $ac_file in - | *:- | *:-:* ) # input from stdin cat >$tmp/stdin ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; * ) ac_file_in=$ac_file.in ;; esac # Compute @srcdir@, @top_srcdir@, and @INSTALL@ for subdirectories. ac_dir=`(dirname "$ac_file") 2>/dev/null || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| \ . : '\(.\)' 2>/dev/null || echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } /^X\(\/\/\)[^/].*/{ s//\1/; q; } /^X\(\/\/\)$/{ s//\1/; q; } /^X\(\/\).*/{ s//\1/; q; } s/.*/./; q'` { if $as_mkdir_p; then mkdir -p "$ac_dir" else as_dir="$ac_dir" as_dirs= while test ! -d "$as_dir"; do as_dirs="$as_dir $as_dirs" as_dir=`(dirname "$as_dir") 2>/dev/null || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| \ . : '\(.\)' 2>/dev/null || echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } /^X\(\/\/\)[^/].*/{ s//\1/; q; } /^X\(\/\/\)$/{ s//\1/; q; } /^X\(\/\).*/{ s//\1/; q; } s/.*/./; q'` done test ! -n "$as_dirs" || mkdir $as_dirs fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5 echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;} { (exit 1); exit 1; }; }; } ac_builddir=. if test "$ac_dir" != .; then ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` # A "../" for each directory in $ac_dir_suffix. ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` else ac_dir_suffix= ac_top_builddir= fi case $srcdir in .) # No --srcdir option. We are building in place. ac_srcdir=. if test -z "$ac_top_builddir"; then ac_top_srcdir=. else ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` fi ;; [\\/]* | ?:[\\/]* ) # Absolute path. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ;; *) # Relative path. ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_builddir$srcdir ;; esac # Do not use `cd foo && pwd` to compute absolute paths, because # the directories may not exist. case `pwd` in .) ac_abs_builddir="$ac_dir";; *) case "$ac_dir" in .) ac_abs_builddir=`pwd`;; [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; *) ac_abs_builddir=`pwd`/"$ac_dir";; esac;; esac case $ac_abs_builddir in .) ac_abs_top_builddir=${ac_top_builddir}.;; *) case ${ac_top_builddir}. in .) ac_abs_top_builddir=$ac_abs_builddir;; [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; esac;; esac case $ac_abs_builddir in .) ac_abs_srcdir=$ac_srcdir;; *) case $ac_srcdir in .) ac_abs_srcdir=$ac_abs_builddir;; [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; esac;; esac case $ac_abs_builddir in .) ac_abs_top_srcdir=$ac_top_srcdir;; *) case $ac_top_srcdir in .) ac_abs_top_srcdir=$ac_abs_builddir;; [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; esac;; esac case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_builddir$INSTALL ;; esac if test x"$ac_file" != x-; then { echo "$as_me:$LINENO: creating $ac_file" >&5 echo "$as_me: creating $ac_file" >&6;} rm -f "$ac_file" fi # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ if test x"$ac_file" = x-; then configure_input= else configure_input="$ac_file. " fi configure_input=$configure_input"Generated from `echo $ac_file_in | sed 's,.*/,,'` by configure." # First look for the input files in the build tree, otherwise in the # src tree. ac_file_inputs=`IFS=: for f in $ac_file_in; do case $f in -) echo $tmp/stdin ;; [\\/$]*) # Absolute (can't be DOS-style, as IFS=:) test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 echo "$as_me: error: cannot find input file: $f" >&2;} { (exit 1); exit 1; }; } echo "$f";; *) # Relative if test -f "$f"; then # Build tree echo "$f" elif test -f "$srcdir/$f"; then # Source tree echo "$srcdir/$f" else # /dev/null tree { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 echo "$as_me: error: cannot find input file: $f" >&2;} { (exit 1); exit 1; }; } fi;; esac done` || { (exit 1); exit 1; } _ACEOF cat >>$CONFIG_STATUS <<_ACEOF sed "$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s,@configure_input@,$configure_input,;t t s,@srcdir@,$ac_srcdir,;t t s,@abs_srcdir@,$ac_abs_srcdir,;t t s,@top_srcdir@,$ac_top_srcdir,;t t s,@abs_top_srcdir@,$ac_abs_top_srcdir,;t t s,@builddir@,$ac_builddir,;t t s,@abs_builddir@,$ac_abs_builddir,;t t s,@top_builddir@,$ac_top_builddir,;t t s,@abs_top_builddir@,$ac_abs_top_builddir,;t t s,@INSTALL@,$ac_INSTALL,;t t " $ac_file_inputs | (eval "$ac_sed_cmds") >$tmp/out rm -f $tmp/stdin if test x"$ac_file" != x-; then mv $tmp/out $ac_file else cat $tmp/out rm -f $tmp/out fi # Run the commands associated with the file. case $ac_file in scripts/python.sh ) ${CHMOD} a+x scripts/python.sh ;; scripts/pyantlr.sh ) ${CHMOD} a+x scripts/pyantlr.sh ;; scripts/csc.sh ) ${CHMOD} a+x scripts/csc.sh ;; scripts/java.sh ) ${CHMOD} a+x scripts/java.sh ;; scripts/jar.sh ) ${CHMOD} a+x scripts/jar.sh ;; scripts/javac.sh ) ${CHMOD} a+x scripts/javac.sh ;; scripts/antlr.sh ) ${CHMOD} a+x scripts/antlr.sh ;; scripts/cxx.sh ) ${CHMOD} a+x scripts/cxx.sh ;; scripts/link.sh ) ${CHMOD} a+x scripts/link.sh ;; scripts/c.sh ) ${CHMOD} a+x scripts/c.sh ;; scripts/lib.sh ) ${CHMOD} a+x scripts/lib.sh ;; scripts/cpp.sh ) ${CHMOD} a+x scripts/cpp.sh ;; ${ANTLR_CONFIG_FILES} ) ### echo "config.status: chmod a+w ${ac_file} .." ${CHMOD} a+w "${ac_file}" ;; esac done _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF # # CONFIG_COMMANDS section. # for ac_file in : $CONFIG_COMMANDS; do test "x$ac_file" = x: && continue ac_dest=`echo "$ac_file" | sed 's,:.*,,'` ac_source=`echo "$ac_file" | sed 's,[^:]*:,,'` ac_dir=`(dirname "$ac_dest") 2>/dev/null || $as_expr X"$ac_dest" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_dest" : 'X\(//\)[^/]' \| \ X"$ac_dest" : 'X\(//\)$' \| \ X"$ac_dest" : 'X\(/\)' \| \ . : '\(.\)' 2>/dev/null || echo X"$ac_dest" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } /^X\(\/\/\)[^/].*/{ s//\1/; q; } /^X\(\/\/\)$/{ s//\1/; q; } /^X\(\/\).*/{ s//\1/; q; } s/.*/./; q'` { if $as_mkdir_p; then mkdir -p "$ac_dir" else as_dir="$ac_dir" as_dirs= while test ! -d "$as_dir"; do as_dirs="$as_dir $as_dirs" as_dir=`(dirname "$as_dir") 2>/dev/null || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| \ . : '\(.\)' 2>/dev/null || echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } /^X\(\/\/\)[^/].*/{ s//\1/; q; } /^X\(\/\/\)$/{ s//\1/; q; } /^X\(\/\).*/{ s//\1/; q; } s/.*/./; q'` done test ! -n "$as_dirs" || mkdir $as_dirs fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5 echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;} { (exit 1); exit 1; }; }; } ac_builddir=. if test "$ac_dir" != .; then ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` # A "../" for each directory in $ac_dir_suffix. ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` else ac_dir_suffix= ac_top_builddir= fi case $srcdir in .) # No --srcdir option. We are building in place. ac_srcdir=. if test -z "$ac_top_builddir"; then ac_top_srcdir=. else ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` fi ;; [\\/]* | ?:[\\/]* ) # Absolute path. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ;; *) # Relative path. ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_builddir$srcdir ;; esac # Do not use `cd foo && pwd` to compute absolute paths, because # the directories may not exist. case `pwd` in .) ac_abs_builddir="$ac_dir";; *) case "$ac_dir" in .) ac_abs_builddir=`pwd`;; [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; *) ac_abs_builddir=`pwd`/"$ac_dir";; esac;; esac case $ac_abs_builddir in .) ac_abs_top_builddir=${ac_top_builddir}.;; *) case ${ac_top_builddir}. in .) ac_abs_top_builddir=$ac_abs_builddir;; [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; esac;; esac case $ac_abs_builddir in .) ac_abs_srcdir=$ac_srcdir;; *) case $ac_srcdir in .) ac_abs_srcdir=$ac_abs_builddir;; [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; esac;; esac case $ac_abs_builddir in .) ac_abs_top_srcdir=$ac_top_srcdir;; *) case $ac_top_srcdir in .) ac_abs_top_srcdir=$ac_abs_builddir;; [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; esac;; esac { echo "$as_me:$LINENO: executing $ac_dest commands" >&5 echo "$as_me: executing $ac_dest commands" >&6;} case $ac_dest in notice ) { echo "$as_me:$LINENO: --------------------------------------------------------- * WARNING * This package has been configured to be build by using $MAKE It is very likely that just running \"make\" from the command line will fail. Please remember therefore to use the configured version. ========================================================= " >&5 echo "$as_me: --------------------------------------------------------- * WARNING * This package has been configured to be build by using $MAKE It is very likely that just running \"make\" from the command line will fail. Please remember therefore to use the configured version. ========================================================= " >&6;} ;; esac done _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF { (exit 0); exit 0; } _ACEOF chmod +x $CONFIG_STATUS ac_clean_files=$ac_clean_files_save # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || { (exit 1); exit 1; } fi nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/configure.in000066400000000000000000001065531161462365500223210ustar00rootroot00000000000000dnl --*- sh -*-- ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx## ## This file is part of ANTLR. See LICENSE.txt for licence ## ## details. Written by W. Haefelinger ## ## ## ## ...............Copyright (C) Wolfgang Haefelinger, 2004 ## ## ## ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx## ## Process this file with autoconf to produce a configure ## script. AC_INIT(antlr, 2.7.5) AC_CONFIG_SRCDIR([LICENSE.txt]) AC_CONFIG_AUX_DIR(scripts) ## This shall be the very first config file. Do not change ## this. AC_CONFIG_FILES([scripts/config.vars]) AC_CONFIG_FILES([scripts/config.deps]) AC_CONFIG_FILES([scripts/config.make]) AC_CONFIG_FILES([antlr/Version.java]) AC_SUBST_FILE([stdvars]) AC_SUBST_FILE([stddeps]) AC_SUBST_FILE([stdmake]) ## ANTLR's core libraries for each supporte language. The variable ## in uppercase letters denotes the absolute name of the library. ## When in lower cases letters - see below - the variable just ## holds the basename. AC_SUBST([ANTLR_JAR]) AC_SUBST([ANTLR_LIB]) AC_SUBST([ANTLR_NET]) AC_SUBST([ANTLR_PY]) AC_SUBST([ASTFRAME_NET]) AC_SUBST([antlr_jar]) AC_SUBST([antlr_lib]) AC_SUBST([antlr_net]) AC_SUBST([antlr_py]) AC_SUBST([astframe_net]) AC_SUBST([ANTLRFLAGS]) AC_SUBST([ANTLR]) AC_SUBST([ANTLR_ACTION_FILES]) AC_SUBST([ANTLR_ANTLR_FILES]) AC_SUBST([ANTLR_COMPILE_CMD]) AC_SUBST([ANTLR_CYGWIN]) AC_SUBST([ANTLR_MINGW]) AC_SUBST([ANTLR_TOKDEF_FILES]) AC_SUBST([ANTLR_WIN32]) AC_SUBST([ANTLR_WITH_ANTLR_CMD]) AC_SUBST([ANTLR_WITH_ANTLR_JAR]) AC_SUBST([ARFLAGS]) AC_SUBST([AR]) AC_SUBST([AS]) AC_SUBST([BOOTCLASSPATH]) AC_SUBST([CSHARPCFLAGS]) AC_SUBST([CSHARPC]) AC_SUBST([CSHARP_COMPILE_CMD]) AC_SUBST([CLR]) AC_SUBST([CXX_COMPILE_CMD]) AC_SUBST([CXX_LIB_CMD]) AC_SUBST([CXX_LINK_CMD]) AC_SUBST([CYGPATH]) AC_SUBST([C_COMPILE_CMD]) AC_SUBST([DEBUG]) AC_SUBST([EXEEXT]) AC_SUBST([JARFLAGS]) AC_SUBST([JAR]) AC_SUBST([JAR_CMD]) AC_SUBST([JAVACFLAGS]) AC_SUBST([JAVAC]) AC_SUBST([JAVAFLAGS]) AC_SUBST([JAVA]) AC_SUBST([JAVA_CMD]) AC_SUBST([JAVA_COMPILE_CMD]) AC_SUBST([LIBEXT]) AC_SUBST([MAKE]) AC_SUBST([OBJEXT]) AC_SUBST([PATCHLEVEL]) AC_SUBST([PYTHONFLAGS]) AC_SUBST([PYTHON]) AC_SUBST([SUBVERSION]) AC_SUBST([TIMESTAMP]) AC_SUBST([TOUCH]) AC_SUBST([VERBOSE]) AC_SUBST([VERSION]) AC_SUBST([WITH_EXAMPLES]) AC_SUBST([abs_this_builddir]) AC_SUBST([cxx]) AC_SUBST([jar]) AC_SUBST([java]) AC_SUBST([javac]) AC_SUBST([TAR]) ## introduce package information as autoconf vars. VERSION=`echo $PACKAGE_VERSION | cut -d . -f 1` SUBVERSION=`echo $PACKAGE_VERSION | cut -d . -f 2` PATCHLEVEL=`echo $PACKAGE_VERSION | cut -d . -f 3` TIMESTAMP=`date +%Y%m%d` ## @abs_this_builddir@ - absolute path to top of build directory. ## According to GNU autoconf we can rely on that there's a proper ## pwd around. abs_this_builddir=`pwd` ## This is how we compile Java files .. JAVA_COMPILE_CMD="/bin/sh $abs_this_builddir/scripts/javac.sh" ## This is how we run Java .. JAVA_CMD="/bin/sh $abs_this_builddir/scripts/java.sh" ## This is how we pack Java (class) files .. JAR_CMD="/bin/sh $abs_this_builddir/scripts/jar.sh" ## And this is how we are going to compile ANTLR grammar files .. ANTLR_COMPILE_CMD="/bin/sh $abs_this_builddir/scripts/antlr.sh" ## This is how we compile CSHARP files .. CSHARP_COMPILE_CMD="/bin/sh $abs_this_builddir/scripts/csc.sh" ## This is how we compile C++ files and how we are going to create ## libantlr.a or antlr.lib etc. .. CXX_COMPILE_CMD="/bin/sh $abs_this_builddir/scripts/cxx.sh" CXX_LIB_CMD="/bin/sh $abs_this_builddir/scripts/lib.sh" CXX_LINK_CMD="/bin/sh $abs_this_builddir/scripts/link.sh" C_COMPILE_CMD="/bin/sh $abs_this_builddir/scripts/c.sh" ANTLR_JAR="$abs_this_builddir/antlr/antlr.jar" ANTLR_NET="$abs_this_builddir/lib/antlr.runtime.dll" ASTFRAME_NET="$abs_this_builddir/lib/antlr.astframe.dll" ANTLR_PY="$abs_this_builddir/lib/python/antlr/python.py" ## Note: values might be overriden in C++ section. OBJEXT=".o" LIBEXT=".a" ANTLR_LIB="$abs_this_builddir/lib/cpp/src/libantlr.a" stdvars="scripts/config.vars" stddeps="scripts/config.deps" stdmake="scripts/config.make" ## ## option --enable-java ## AX_ARG_ENABLE( [java], [LANG_JAVA], [enable or disable ANTLR for Java (enabled)], [1], ) ## ## option --enable-cxx ## AX_ARG_ENABLE( [cxx], [LANG_CXX], [enable or disable ANTLR for C++ (enabled)], [1], ) ## ## option --enable-python ## AX_ARG_ENABLE( [python], [LANG_PY], [enable or disable ANTLR for Python (enabled).], [1], ) ## ## option --enable-csharp ## AX_ARG_ENABLE( [csharp], [LANG_CS], [enable or disable ANTLR for C# (enabled)], [1], ) ## ## option --enable-verbose= ## AX_ARG_ENABLE( [verbose], [VERBOSE], [turn on verbosity when building package.], [0], ) ## ## option --enable-debug= ## AX_ARG_ENABLE( [debug], [DEBUG], [set debug level - any value greater zero enables a debug version], [0], ) ## ## option --enable-examples ## WITH_EXAMPLES=1 AX_ARG_ENABLE( [examples], [WITH_EXAMPLES], [include examples into this configuration (enabled)], [1], ) ## ## option --with-antlr-jar ## ANTLR_WITH_ANTLR_JAR="" AC_ARG_WITH( [antlr-jar], [AC_HELP_STRING( [--with-antlr-jar=ARG], [use given file (antlr.jar) to bootstrap]) ],[ if test -n "${ANTLR_WITH_ANTLR_CMD}" ; then opts="--with-antlr-jar,--with-antlr-cmd" AC_MSG_ERROR( [this configuration options mutually exclusive: $opts]) fi ANTLR_WITH_ANTLR_JAR="${withval}"] ) ## ## option --with-antlr-cmd ## ANTLR_WITH_ANTLR_CMD="" AC_ARG_WITH( [antlr-cmd], [AC_HELP_STRING( [--with-antlr-cmd=ARG], [use given command to compile ANTLR grammar files while bootstrapping..]) ],[ if test -n "${ANTLR_WITH_ANTLR_JAR}" ; then opts="--with-antlr-jar,--with-antlr-cmd" AC_MSG_ERROR( [this configuration options mutually exclusive: $opts]) fi ANTLR_WITH_ANTLR_CMD="${withval}" ] ) AC_ARG_WITH( [bootclasspath], [AC_HELP_STRING( [--bootclasspath=ARG], [use this option to set bootclasspath when using jikes. ARG is a white space seperated list of absolute file or directory names, typically /opt/jdk1.3/jre/lib/rt.jar. In most cases this option is not requird as configure tries to detect rt.jar itself. If configure fails or detects the wrong boot library you may use this option. Note that this option is only used when using jikes. ]) ],[ BOOTCLASSPATH="${withval}" ] ) AX_ARG_WITH( [cxx], [CXX], ) AX_ARG_WITH( [make], [MAKE], ) AX_ARG_WITH( [java], [JAVA], ) AX_ARG_WITH( [javac], [JAVAC], ) AX_ARG_WITH( [jar], [JAR], ) AX_ARG_WITH( [python], [PYTHON], ) AX_ARG_WITH( [csharpc], [CSHARPC], ) #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # S T A R T T E S T S # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # get host_os set AC_CANONICAL_HOST # Detect cygwin or mingw ANTLR_CYGWIN=no ANTLR_MINGW=no AC_MSG_CHECKING(whether this is Cygwin) case $host_os in *cygwin* ) ANTLR_CYGWIN=yes ;; *) AC_MSG_RESULT(no) ;; esac AC_MSG_CHECKING(whether this is MinGW) case $host_os in *mingw* ) ANTLR_MINGW=yes ;; *) AC_MSG_RESULT(no) ;; esac ## Set common file extensions depending on OS we are running on. ## File extensions depend on C++/C compiler in use. This values ## are just guesses and redefined further below. case "${host_os}" in *mingw*|*cygwin*) OBJEXT=".o" LIBEXT=".a" EXEEXT=".exe" ;; *) OBJEXT=".o" LIBEXT=".a" EXEEXT="" ;; esac ## Test whether we have cygpath test -z "$CYGPATH" && AC_PATH_PROGS(CYGPATH, cygpath$EXEEXT ) AC_SUBST([CYGPATH_M]) AC_SUBST([CYGPATH_W]) if test -n "$CYGPATH" ; then CYGPATH_M="${CYGPATH} -m" CYGPATH_W="${CYGPATH} -w" else CYGPATH_M="echo" CYGPATH_W="echo" fi AC_ARG_VAR( [ANTLRFLAGS], [Use environment variable ANTLRFLAGS to pass some extra flags to antlr when compiling grammar (*.g) files. ] ) #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # MAKE # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# ## Check whether there's a make program around. We search for a ## couple of well know names within $PATH. A user may skip this ## search by providing variable $MAKE. AC_ARG_VAR( [MAKE], [By default we search for "make", "gmake" and "gnumake" in your PATH as well as "/bin/make" and "/usr/bin/make". You may override this search by using enviromnent variable $MAKE. Note that a GNU make is required to build this package. However, when providing your own candidate a check for GNU make is skipped and all bets are on. ] ) ## @MAKE@ shall contain absolut path name of make program found. ## Search for well known make programs - take user given MAKE ## into account. The result will be a list of valid make prog- ## grams found and will be stored in variable MAKE. user_make="${MAKE}" AX_PATH_PROGS( [MAKE], [make gmake gnumake /bin/make /usr/bin/make] ) ## right now we need to have a GNU make around, other makes are ## not supported and likely to fail. if test "x${user_make}" == "x" ; then AX_GNU_MAKE( [MAKE], [AC_MSG_ERROR( [package requires GNU make])] ) fi ## we lookup 'make' in PATH. If the one found is not the same ## as the configured one we issue a warning message. AC_PATH_PROGS([just_make],[make],[%]) case "${just_make}" in ${MAKE}) ;; *) AC_CONFIG_COMMANDS([notice],[ AC_MSG_NOTICE([ --------------------------------------------------------- * WARNING * This package has been configured to be build by using $MAKE It is very likely that just running "make" from the command line will fail. Please remember therefore to use the configured version. ========================================================= ]) ],[ MAKE="${MAKE}" ] ) ;; esac #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # JAVA # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# ## @JAVAC@ shall contain absolut path name of javac program and ## similar to CXXFLAGS, @JAVACFLAGS@ shall contain all options ## required to compile JAVA source files. AC_ARG_VAR( [JAVAC], [By default we search for "jikes", "javac" and "gcj" in your $PATH on how to comile Java source files. You may override this search by using enviromnent variable $JAVAC. JAVAC may contain a list of candidates, either as absolute path names or as a relative one. In case a relative name is given, a search in $PATH will take place, otherwise the absolute name is tried. ] ) AC_ARG_VAR( [JAVACFLAGS], [Environment variable JAVACFLAGS can be used to change or override all flags required to compile Java source files. Note that JAVACFLAGS understands the following: "+ flag1 flag2 .." append "flag1 flag2 .." to precomputed list "- flag1 flag2 .." prepend "flag1 flag2 .." to precomputed list "= flag1 flag2 .. override with flag1 flag2 ..". If there is a need to hardwire additional flags then edit scripts/javac.sh.in and run "CONFIG_FILES=scripts/javac.sh ./config.status" again. ] ) ## @JAVA@ shall contain absolut path name of java program and ## similar to CXXFLAGS, @JAVAFLAGS@ shall contain all options ## required to run JAVA class files. AC_ARG_VAR( [JAVA], [By default we search for "java" and "gij" in your PATH on how to run Java class files. You may override this search by using enviromnent variable $JAVA. JAVA may contain a list of candidates, either as absolute path name or as a relative one. In case of a relative name, a search in $PATH will take place. Otherwise the absolute name will be accepted if existing. ] ) AC_ARG_VAR( [JAVAFLAGS], [Shall contain all flags required to run Java class files. You may override by using environment variable JAVAFLAGS. ] ) AX_JAVA_PROGS( [JAVA], [java gij], [AX_VAR_HEAD([JAVA])] ) AX_JAVA_PROGS( [JAVAC], [jikes javac gcj], [AX_VAR_HEAD([JAVAC])] ) AX_JAVA_PROGS( [JAR], [jar], [ AX_VAR_HEAD([JAR]) ] ) case $LANG_JAVA in 1) jar="`basename $JAR`" jar="`echo ${jar}|sed 's,\..*$,,'`" ## This macro tries to determine which javac compiler is ## being used. Well known compilers are gcj, jikes and ## javac. A unknown compiler is treated as if javac has ## been given in the very, very naive hope that all ## javac compiler have at least the same options as the ## original, ie. javac. ## If your compiler is not in the list and does not be- ## have like javac, then you need to extend this macro ## by writing a specialized test. AX_WHICH_JAVAC([javac]) ## Jikes cannot live without having a Java around. Have ## therefore a look into Java installations found for ## a 'rt.jar'. test -n "${BOOTCLASSPATH}" && { for f in ${BOOTCLASSPATH} ; do AC_MSG_CHECKING([bootclasspath \"$f\"]) test -f "${f}" -o -d "${f}" || { AC_MSG_RESULT([does not exist]) AC_MSG_ERROR( [ =================================================================== Please check arguments given to --with-bootclasspath or \${BOOTCLASSPATH} Each argument must be a valid file or directory. Use whitespace to seperate your args. =================================================================== ]) } AC_MSG_RESULT([good]) done } test -z "${BOOTCLASSPATH}" && { case "${javac}" in jikes) BOOTCLASSPATH="" set x ${JAVA} while test $# -gt 1 ; do x="$2" ; shift d=`dirname $x` test -d "$d" || continue d=`(cd $d && cd .. && pwd)` test -d "$d" || continue test -f "$d/jre/lib/rt.jar" && { BOOTCLASSPATH="$d/jre/lib/rt.jar" ## we need to try whether jikes accept .. (tbd) break } test -f "$d/lib/rt.jar" && { BOOTCLASSPATH="$d/lib/rt.jar" ## we need to try whether jikes accept .. (tbd) break } done ## go for some unusual locations (MacOS) test -z "${BOOTCLASSPATH}" && { fwdir=/System/Library/Frameworks/JavaVM.framework/Versions for x in 1.4.1 1.3.1 ; do if test -f "$fwdir/$x/Classes/classes.jar" ; then BOOTCLASSPATH="$fwdir/$x/Classes/classes.jar" break fi done } ## give up in case we can't set. test -z "${BOOTCLASSPATH}" && { AC_MSG_ERROR( [Unable to set BOOTCLASSPATH - there is no rt.jar around.]) } ;; *) BOOTCLASSPATH="" ;; esac } test -n "${BOOTCLASSPATH}" && { ## Finalize BOOTCLASSPATH. Depending on platform join arguments using ## a different seperator. case $build_os in cygwin) sep=";" ;; *) sep=":" ;; esac set x $BOOTCLASSPATH ; shift BOOTCLASSPATH="$1" shift while test $# -gt 0 ; do BOOTCLASSPATH="${BOOTCLASSPATH}${sep}${1}" shift done } ## Use Java first in list. AX_VAR_HEAD([JAVA]) ;; esac #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # C++ # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# case $LANG_CXX in 1) AX_PATH_PROGS( [AR], [tlib lib ar /usr/bin/ar] ) ## Try to figure out what C++ compiler shall be used. Note that CC ## clashes on cygwin. While CC is usually SUN's C++ compiler name, ## CC is also present on Cygwin - it's just an alias for gcc. The ## real alias is actually 'cc' but names are searched in non- ## sensitive manner. To solve this problem we use kind of hack ## here and list compilers availabe to known operating systems. case $build_os in cygwin) ## On Cygwin/Microsoft we are aware of Borland C++, Microsoft ## C++ and GNU. cxx_compiler_list="bcc32 cl g++" # FIXME: for bcc32 c_compiler_list="cl gcc" ;; *) ## On other platforms we now HP C++ (aCC), IBM C++ (xlC*) and ## of course GNU. If there's a GNU compiler around we prefer ## GNU. This avoids also a problem with vendors having CC ## a symbolic link to "gcc" instead of "g++". cxx_compiler_list="g++ aCC CC xlC xlC_r cxx c++" # FIXME: for other unix flavours c_compiler_list="cc gcc xlc_r acc" ;; esac ## Find a compiler for me. If compiler is not in list you can al- ## ways override by using environment varialbe CXX. AC_PROG_CXX([${cxx_compiler_list}]) AC_PROG_CC([${c_compiler_list}]) ## just overrule what autoconf figured out - we never asked for ## this anyway. Our handling of compiler options is done below ## in the fine tuning section. CXXFLAGS="" ## 'cxx' shall be the canonical compiler name. For example, gcc ## cl, bcc, CC, etc. Note that this is in general not equal to CXX. ## For example, CYGWIN appears to have c++ as name for g++ and cc ## as alias for gcc. ## CXX is used to call the compiler, 'cxx' shall be used for ## decisions based on compiler in use. cxx="" if test "x$GXX" = xyes; then cxx="gcc" else cxx=`basename $CXX` cxx=`echo ${cxx}|sed 's,\.@<:@^.@:>@*$,,'` fi case ${cxx} in gcc*) cxx='gcc' ;; cl*|CL*) cxx='cl' ## check whether this is Microsoft C++ (tbd) ;; bcc32*|BCC32*) cxx='bcc32' ## check whether this is Borland C++ (tbd) ;; CC*) ## check whether this is SUN C++ (tbd) cxx="CC" ;; xlC*|xlC_r*) cxx="xlC" ## check whether this is IBM C++ (tbd) ;; aCC*) cxx='aCC' ## check whether this is HP C++ (tbd) ;; *) ## unknown compiler - good luck. AX_MSG_UNKOWN_CXX ;; esac ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx## ## COMPILER TUNING SECTION ## ##==============================================================## ## compiler tuning has basically removed from this configure ## script as it appears not to be handy and practical. All ## compiler flags are set in cxx.sh.in. If there is any ## change required, go there and change. ## Note that flags given in this file may overrule settings ## given in cxx.sh.in. Therefore, if you "add" flags here, ## put a "+" in front of variable CXXFLAGS. For example, let's ## say you want to add "-g". Then do this: ## ## CXXFLAGS="-g" ## .. ## CXXFLAGS="+ ${CXXFLAGS}" ## ## The addition of "+" CXXFLAGS should be the last action for ## that variable. The net effect is that "-g" will be added to ## flags set in cxx.sh.in. So the result may look like ## gcc -Wall -c -g .. ## ## Similar, put a "-" in front to get "gcc -g -Wall -c .." and ## put nothing or a "=" in front to get "gcc -g ..". ## ## Similar to CXXFLAGS are LDFLAGS and ARFLAGS for linking ## and making a static library. case "${cxx}" in cl|bcc32) OBJEXT=".obj" LIBEXT=".lib" EXEEXT=".exe" ANTLR_LIB="$abs_this_builddir/lib/cpp/src/antlr.lib" ;; *) OBJEXT=".o" ;; esac LDFLAGS= AX_VAR_HEAD([AR]) ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx## ## END COMPILER TUNING SECTION ## ##==============================================================## # Checks for header files. AC_HEADER_STDC AC_CHECK_HEADERS([stdlib.h unistd.h]) # Checks for typedefs, structures, and compiler characteristics. AC_C_CONST AC_C_INLINE # Checks for library functions. AC_FUNC_MALLOC AC_CHECK_FUNCS([strcasecmp]) ## Some further specific test required as are using std C++. ## (tbd) ;; esac ## test -z "$DOXYGEN" && AC_PATH_PROG(DOXYGEN, doxygen, doxygen, ) # This seems to convince configure to use an absolute path to the backup # install-sh script. ac_install_sh="$PWD/scripts/install-sh" AC_PROG_INSTALL AC_PROG_RANLIB test -z "$MKDIR" && AC_PATH_PROG(MKDIR, mkdir$EXEEXT, mkdir$EXEEXT ) test -z "$RM" && AC_PATH_PROG(RM, rm$EXEEXT, rm$EXEEXT ) AX_PATH_PROGS( [TAR], [gnutar tar], [AX_VAR_HEAD([TAR])] ) AX_PATH_PROGS( [TOUCH], [/bin/touch /usr/bin/touch touch], [AX_VAR_HEAD([TOUCH])] ) test -z "$CHMOD" && AC_PATH_PROG(CHMOD, chmod$EXEEXT, chmod$EXEEXT ) test -z "$SED" && AC_PATH_PROG(SED, sed$EXEEXT, sed$EXEEXT ) test -z "$CAT" && AC_PATH_PROG(CAT, cat$EXEEXT, cat$EXEEXT ) test -z "$GREP" && AC_PATH_PROG(GREP, grep$EXEEXT, grep$EXEEXT ) #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # PYTHON # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# AC_ARG_VAR([PYTHON], [By default we search for "python" in $PATH to execute Python files. Override this by providing a list of candidates in environment variable $PYTHON and use whitespace as spereration character. A candidate can be either a relative or absolute path name. In the former case a lookup in $PATH takes place, in the latter, the absolute path name must exist.]) AC_ARG_VAR([PYTHONFLAGS],[ Shall contain all flags required to run Python. Override the default by using environment variable $PYTHONFLAGS. ]) AX_PYTHON_PROGS( [PYTHON], [python], [AX_VAR_HEAD([PYTHON])] ) case $LANG_PY in 1) # We need a script that wrap Python calls in order to make Python # ANTLR aware. This script needs to be executable. AC_CONFIG_FILES( [scripts/python.sh], [${CHMOD} a+x scripts/python.sh], [CHMOD=${CHMOD}] ) AC_CONFIG_FILES( [scripts/pyantlr.sh:scripts/pyinst.sh.in], [${CHMOD} a+x scripts/pyantlr.sh], [CHMOD=${CHMOD}] ) AC_CONFIG_FILES( [lib/python/Makefile] ) # We have a Makefile that loops through all python examples. case $WITH_EXAMPLES in 1 ) AC_CONFIG_FILES( [examples/python/Makefile] ) AC_CONFIG_FILES([ examples/python/asn1/Makefile \ examples/python/ASTsupport/Makefile \ examples/python/calc/Makefile \ examples/python/columns/Makefile \ examples/python/exprAST/Makefile \ examples/python/filter/Makefile \ examples/python/filterWithRule/Makefile \ examples/python/heteroAST/Makefile \ examples/python/HTML/Makefile \ examples/python/IDL/Makefile \ examples/python/imagNodeAST/Makefile \ examples/python/includeFile/Makefile \ examples/python/inherit.tinyc/Makefile \ examples/python/java/Makefile \ examples/python/lexerTester/Makefile \ examples/python/lexRewrite/Makefile \ examples/python/linkChecker/Makefile \ examples/python/multiLexer/Makefile \ examples/python/multiParser/Makefile \ examples/python/parseBinary/Makefile \ examples/python/pascal/Makefile \ examples/python/cpp/Makefile \ examples/python/preserveWhiteSpace/Makefile \ examples/python/tinybasic/Makefile \ examples/python/tinyc/Makefile \ examples/python/transform/Makefile \ examples/python/treewalk/Makefile \ examples/python/unicode/Makefile \ examples/python/unicode.IDENTs/Makefile \ examples/python/xml/Makefile ]) ;; esac ;; esac #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# # CSHARP # #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# AC_ARG_VAR([CSHARPC], [By default we search for "cscc", "msc" and "csc" in $PATH to compile C# files. Override this by providing a list of candidates in environment variable $CSHARP and use whitespace as spereration character. A candidate can be either a relative or absolute path name. In the former case a lookup in $PATH takes place, in the latter, the absolute path name must exist.]) AC_ARG_VAR([CSHARPCFLAGS],[ Shall contain all flags required to compile a #C file. Override the default by using environment variable $CSHARPCFLAGS. ]) AX_CSHARP_PROGS( [CSHARPC], [cscc mcs csc /usr/local/bin/cscc /usr/local/bin/mcs /opt/bin/cscc /opt/bin/mcs], [AX_VAR_HEAD([CSHARPC])] ) ## get the basename of C# compiler. Depending on basename we try to ## decide about the CLR. test -n "${CSHARPC}" && { csharpc=`basename ${CSHARPC}` csharpc_d=`dirname ${CSHARPC}` } case $csharpc in cscc*) AX_CSHARP_PROGS( [CLR], [${csharpc_d}/ilrun ilrun /usr/local/bin/ilrun /opt/bin/ilrun], [AX_VAR_HEAD([CLR])] ) ;; mcs*) AX_CSHARP_PROGS( [CLR], [${csharpc_d}/mono mono /usr/local/bin/mono /opt/bin/mono], [AX_VAR_HEAD([CLR])] ) ;; esac case $LANG_CS in 1) AC_CONFIG_FILES( [scripts/csc.sh], [${CHMOD} a+x scripts/csc.sh], [CHMOD=${CHMOD}] ) AC_CONFIG_FILES([lib/csharp/Makefile]) AC_CONFIG_FILES([lib/csharp/src/Makefile]) AC_CONFIG_FILES([lib/csharp/ASTFrame/Makefile]) # We have a Makefile that loops through all python examples. case $WITH_EXAMPLES in 1) AC_CONFIG_FILES([examples/csharp/ASTsupport/Makefile]) AC_CONFIG_FILES([examples/csharp/HTML/Makefile]) AC_CONFIG_FILES([examples/csharp/IDL/Makefile]) AC_CONFIG_FILES([examples/csharp/ParseTreeDebug/Makefile]) AC_CONFIG_FILES([examples/csharp/TokenStreamRewrite/Makefile]) AC_CONFIG_FILES([examples/csharp/calc/Makefile]) AC_CONFIG_FILES([examples/csharp/columns/Makefile]) AC_CONFIG_FILES([examples/csharp/exprAST/Makefile]) AC_CONFIG_FILES([examples/csharp/filter/Makefile]) AC_CONFIG_FILES([examples/csharp/filterWithRule/Makefile]) AC_CONFIG_FILES([examples/csharp/heteroAST/Makefile]) AC_CONFIG_FILES([examples/csharp/java/Makefile]) AC_CONFIG_FILES([examples/csharp/multiLexer/Makefile]) AC_CONFIG_FILES([examples/csharp/parseBinary/Makefile]) AC_CONFIG_FILES([examples/csharp/preserveWhiteSpace/Makefile]) AC_CONFIG_FILES([examples/csharp/tinyc/Makefile]) AC_CONFIG_FILES([examples/csharp/unicode/Makefile]) AC_CONFIG_FILES([examples/csharp/Makefile]) ;; esac ;; esac # We need a script that wrap java calls in order to make Java # ANTLR aware. This script needs to be executable. AC_CONFIG_FILES( [scripts/java.sh], [${CHMOD} a+x scripts/java.sh]) # We need a script that wrap jar calls in order to make Java # ANTLR aware. This script needs to be executable. AC_CONFIG_FILES( [scripts/jar.sh], [${CHMOD} a+x scripts/jar.sh]) # We need a script that wrap javac calls in order to make Javac # ANTLR aware. This script needs to be executable. AC_CONFIG_FILES( [scripts/javac.sh], [${CHMOD} a+x scripts/javac.sh]) # We need a script that wraps antlr calls AC_CONFIG_FILES( [scripts/antlr.sh], [${CHMOD} a+x scripts/antlr.sh]) case $LANG_CXX in 1) # We need a script that wraps how we compile C++ AC_CONFIG_FILES([scripts/cxx.sh],[${CHMOD} a+x scripts/cxx.sh]) # # We need a script that wraps how we link C++ AC_CONFIG_FILES([scripts/link.sh],[${CHMOD} a+x scripts/link.sh]) # There's a few C files around so make sure we can compile those as well AC_CONFIG_FILES([scripts/c.sh],[${CHMOD} a+x scripts/c.sh]) # We need a script that wraps how we build a (static?) library AC_CONFIG_FILES([scripts/lib.sh],[${CHMOD} a+x scripts/lib.sh]) # We need a script that wraps how we run the preprocessor AC_CONFIG_FILES([scripts/cpp.sh],[${CHMOD} a+x scripts/cpp.sh]) # C++ library AC_CONFIG_FILES([lib/cpp/Makefile]) AC_CONFIG_FILES([lib/cpp/antlr/Makefile]) AC_CONFIG_FILES([lib/cpp/src/Makefile]) # C++ examples case $WITH_EXAMPLES in 1) AC_CONFIG_FILES([examples/cpp/Makefile]) AC_CONFIG_FILES([examples/cpp/ASTsupport/Makefile]) AC_CONFIG_FILES([examples/cpp/calc/Makefile]) AC_CONFIG_FILES([examples/cpp/exprAST/Makefile]) AC_CONFIG_FILES([examples/cpp/filter/Makefile]) AC_CONFIG_FILES([examples/cpp/filterWithRule/Makefile]) AC_CONFIG_FILES([examples/cpp/flexLexer/Makefile]) AC_CONFIG_FILES([examples/cpp/HTML/Makefile]) AC_CONFIG_FILES([examples/cpp/IDL/Makefile]) AC_CONFIG_FILES([examples/cpp/imagNodeAST/Makefile]) AC_CONFIG_FILES([examples/cpp/includeFile/Makefile]) AC_CONFIG_FILES([examples/cpp/inherit.tinyc/Makefile]) AC_CONFIG_FILES([examples/cpp/java/Makefile]) AC_CONFIG_FILES([examples/cpp/lexRewrite/Makefile]) AC_CONFIG_FILES([examples/cpp/multiLexer/Makefile]) AC_CONFIG_FILES([examples/cpp/multiParser/Makefile]) AC_CONFIG_FILES([examples/cpp/parseBinary/Makefile]) AC_CONFIG_FILES([examples/cpp/preserveWhiteSpace/Makefile]) AC_CONFIG_FILES([examples/cpp/tinyc/Makefile]) AC_CONFIG_FILES([examples/cpp/tokenStreamRewrite/Makefile]) AC_CONFIG_FILES([examples/cpp/transform/Makefile]) AC_CONFIG_FILES([examples/cpp/treewalk/Makefile]) AC_CONFIG_FILES([examples/cpp/unicode/Makefile]) ;; esac ;; esac # Makefile to build supplementary libraries .. AC_CONFIG_FILES([lib/Makefile]) case $WITH_EXAMPLES in 1) AC_CONFIG_FILES([examples/Makefile]) ;; esac AC_CONFIG_FILES([doc/Makefile]) AC_CONFIG_FILES([Makefile]) AC_CONFIG_FILES([scripts/antlr-config scripts/run-antlr scripts/antlr.spec]) case $LANG_JAVA in 1) AC_CONFIG_FILES([antlr/Makefile]) case $WITH_EXAMPLES in 1) AC_CONFIG_FILES([examples/java/ASTsupport/Makefile]) AC_CONFIG_FILES([examples/java/HTML/Makefile]) AC_CONFIG_FILES([examples/java/IDL/Makefile]) AC_CONFIG_FILES([examples/java/calc/Makefile]) AC_CONFIG_FILES([examples/java/columns/Makefile]) AC_CONFIG_FILES([examples/java/exprAST/Makefile]) AC_CONFIG_FILES([examples/java/filter/Makefile]) AC_CONFIG_FILES([examples/java/filterWithRule/Makefile]) AC_CONFIG_FILES([examples/java/heteroAST/Makefile]) AC_CONFIG_FILES([examples/java/imagNodeAST/Makefile]) AC_CONFIG_FILES([examples/java/includeFile/Makefile]) AC_CONFIG_FILES([examples/java/inherit.tinyc/Makefile]) AC_CONFIG_FILES([examples/java/java/Makefile]) AC_CONFIG_FILES([examples/java/lexRewrite/Makefile]) AC_CONFIG_FILES([examples/java/linkChecker/Makefile]) AC_CONFIG_FILES([examples/java/multiLexer/Makefile]) AC_CONFIG_FILES([examples/java/parseBinary/Makefile]) AC_CONFIG_FILES([examples/java/pascal/Makefile]) AC_CONFIG_FILES([examples/java/preserveWhiteSpace/Makefile]) AC_CONFIG_FILES([examples/java/tinybasic/Makefile]) AC_CONFIG_FILES([examples/java/tinyc/Makefile]) AC_CONFIG_FILES([examples/java/transform/Makefile]) AC_CONFIG_FILES([examples/java/treewalk/Makefile]) AC_CONFIG_FILES([examples/java/unicode.IDENTs/Makefile]) AC_CONFIG_FILES([examples/java/unicode/Makefile]) AC_CONFIG_FILES([examples/java/xml/Makefile]) AC_CONFIG_FILES([examples/java/Makefile]) AC_CONFIG_FILES([examples/java/cpp/Makefile]) ;; esac ANTLR_ACTION_FILES="" ANTLR_ACTION_FILES="${ANTLR_ACTION_FILES} actions/cpp/ActionLexer.java" ANTLR_ACTION_FILES="${ANTLR_ACTION_FILES} actions/cpp/ActionLexerTokenTypes.java" ANTLR_ACTION_FILES="${ANTLR_ACTION_FILES} actions/csharp/ActionLexer.java" ANTLR_ACTION_FILES="${ANTLR_ACTION_FILES} actions/csharp/ActionLexerTokenTypes.java" ANTLR_ACTION_FILES="${ANTLR_ACTION_FILES} actions/java/ActionLexer.java" ANTLR_ACTION_FILES="${ANTLR_ACTION_FILES} actions/java/ActionLexerTokenTypes.java" ANTLR_ACTION_FILES="${ANTLR_ACTION_FILES} actions/python/ActionLexer.java" ANTLR_ACTION_FILES="${ANTLR_ACTION_FILES} actions/python/ActionLexerTokenTypes.java" ANTLR_ACTION_FILES="${ANTLR_ACTION_FILES} actions/python/CodeLexer.java" ANTLR_ACTION_FILES="${ANTLR_ACTION_FILES} actions/python/CodeLexerTokenTypes.java" ANTLR_ANTLR_FILES="" ANTLR_ANTLR_FILES="${ANTLR_ANTLR_FILES} ANTLRParser.java" ANTLR_ANTLR_FILES="${ANTLR_ANTLR_FILES} ANTLRTokenTypes.java" ANTLR_ANTLR_FILES="${ANTLR_ANTLR_FILES} ANTLRLexer.java" ANTLR_TOKDEF_FILES="" ANTLR_TOKDEF_FILES="${ANTLR_TOKDEF_FILES} ANTLRTokdefParser.java" ANTLR_TOKDEF_FILES="${ANTLR_TOKDEF_FILES} ANTLRTokdefLexer.java" ANTLR_TOKDEF_FILES="${ANTLR_TOKDEF_FILES} ANTLRTokdefParserTokenTypes.java" ## This variables can be used in antlr/Makefile file_list="${ANTLR_ACTION_FILES} ${ANTLR_ANTLR_FILES} ${ANTLR_TOKDEF_FILES}" if test "x${file_list}" == "x" ; then : else ANTLR_CONFIG_FILES="" ANTLR_FILE_LIST="" ## iterate over my file list. If a file exists then don't copy ## this file - autoconf's behaviour is to delete existing files. for x in ${file_list} ; do f="antlr/${x}" if test -f "${f}" ; then : else f="${f}:${f}" ANTLR_CONFIG_FILES="${ANTLR_CONFIG_FILES} ${f}" ANTLR_FILE_LIST="${ANTLR_FILE_LIST} antlr/${x}" fi done ## copy files into build directory and make them writeable (in ## case we copy them from our depot. The actions necessary here ## to execute a command (chmod) on a list of files is bit ## hackish - it may depend on autoconf version in use (works ## fine for autoconf 2.59). ## The problem is that autoconf takes the file list literally, ## ie. we end up in config.status by something like ## ## case $ac_file in ## .. ## $ANTLR_CONFIG_FILES) chmod a+w .. ;; ## esac ## ## To make this work I'm introducing ANTLR_CONFIG_FILES as kind ## of 'catch-all' variable. The side effect is that every ## file with no explicit action will get a "chmod a+w ..." But ## that should be ok for Makefiles etc. AC_CONFIG_FILES([ ${ANTLR_CONFIG_FILES} ],[ ### echo "config.status: chmod a+w ${ac_file} .." ${CHMOD} a+w "${ac_file}" ],[ ANTLR_CONFIG_FILES='*' ANTLR_FILE_LIST="${ANTLR_FILE_LIST}" CHMOD="${CHMOD}" ] ) fi ;; esac ## compute basename of core libraries antlr_jar=`basename ${ANTLR_JAR}` antlr_net=`basename ${ANTLR_NET}` antlr_lib=`basename ${ANTLR_LIB}` antlr_py=`basename ${ANTLR_PY}` astframe_net=`basename ${ASTFRAME_NET}` test -z "${JAVA}" && { JAVA=java } ### cygwin has no (supported) Java - users are requested to have java ### in their PATH in order to execute "bin/antlr.sh". To support this ### I'm making sure that just the basename is used. case $host_os in *cygwin* |*mingw*|*msys*) AX_BASENAME([JAVA]) ;; esac AC_OUTPUT nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/000077500000000000000000000000001161462365500205435ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/ANTLRException.gif000066400000000000000000000342501161462365500237750ustar00rootroot00000000000000GIF89a!,@(p @*DpaC #J8 D/R{X+߽4⟒O^װ5_&씫o+Ί}Y:G#jʝF*1k+_ΜwFyB 1㱁f7m=)["PРa ȟO_W_LueSTdfzi_gH _}g=ErWء7abٌGceMMw\jm$O9iߋ <SB eO>Ie9BXneRQZYdl (|Nyxډgc&hЙ/Y_"gQ ע_hy2YnY7v(ҹuw=ꫢh㏬X gʕD ٬zwꮽ[њ‰}%&doQi–ⱍ`LWx\:敫*ak吰vg$z[G)?Mz穨=^˟1 gy.isʒaۯξ Vʶ⦅śc^!Lp"b̸+7Z tlc eKI;֖ terV1GэnJLuokdW]NeCw1X /0DY6+n7`Z<n . wX1y,uáU1mUh{>Q{,D%Ml[TƘ)RHC;Pd:T,L4^WJu)g#gH9z8 iǾXҒK )um2wi AC|@l4lxA,D [S/Eor!K{j& H w$GM%3dpF$ # CD 1պZdiŏ8>+'^ |‹$w<`H@ שE.^&PXq`'2QqqqQfTj5%Rj@t+ IwtD*Z)5Ǝv$$Z`cxXKJurYfD'Š B^] W+qk-$ZO2phy#dˢ6}FjSqvy,2#+AY>4Ec{f6(CMH,AKuWoEp*8Vz"Gs)նV.nWO\".jM 0%ź񁷻g|CҐkaKjk_>P!8㝉S94q֧[ т;RAL\˚%ZAvD-I]qmңFŻfT{I/ڼ7Nx1hc߀+Jg6.'ŒZiLTs)z׿ֵ{ b;>l,#q-g6Yvb}Qu e*NvMzۛ&w~7OxN#wiea!{7# zn m:Wuuk:wUYG[[ѷBU.p63#Lc]N8sj8mYksfE6rS,{JW,UuR ɘV2e;F{xG7ή GpNtz·Ʈw}|AXr.Ms2;'41>p2 ?o3_n8q_IG2GtՌMIǧ<}h{F-yƒ=暭xʇW*z}(t.yUb!?&=a5OlSz{|r3xy^\CM=fQ %uȲSΔZ>jgBLdqHyA_Ty5C48 8H!cx~tvRQYڷzpF2GYg^X|gGjDu{h{_8XXG=MׁM@J]~V++?#!\Te[qFq]w(v;Hueu vjr+EgwV"5ruUW%,WJg'$Hz΄&_P#}oUuy4!N8gvtE[tw}Q88ac{?z4D|jgI1tfOkT|ܦEwvNe7yNF/}aaD,4G"ڵ,xqX5RkhLxۗ@gAI+SxcxvTX?T%fEu9u9uypr@NǘWӸNK}mghH_Ӌf˜&^Eum2.IGU`IUcz{V'V-!!kT5%${fo4`J#FTo /)q6'hqI])"mFh넒rOfWHihVKwIEJgmThYv}y=rH:x9q)3JVi=]8[$$3ۈY/"t4Zx4كeWgcғ{Vf{2.P[1)rdxӅY3ɑ KIWEw&RUa)~, y\>`H}׆Q鑧UZY)ُ)LQxYvס *84D8:>(t\ćSD|Fih{yy`劐weݗzȚ0g YǚcivylY=:Yu7e~Od))w3VvLɎJwg'71{+TwfŬG{ +[N۹;{Y*[|ښy1$Uiiۦ҇8Z-Ȑ8U.฼ؗ*;sQ4yXFW4C4oJӺW::r%ڣGw Gr+rj HvLU,X\[RwK+?vL;*$A8JKK)IھJW™cӔ6G䑅zg+z:\S@l=xPLyH7*06wHRZ{dn]\?Nkz)ubI"ڟM**mڧKgl`8yI 4:ZbtZ |fNp a&f*:A&_(i?*(Ǥ3*&S,d+|k4K)%2KY͜W1M74f%|'O#L3_UqKuYViAxŸ$a)a9|5"R!<ƻz|'z֋͡ܮ:&]}`~c wȈkx+ ) <˼$Iͦ ǖ3:S6D#irZ:͍= }:5vӨݫJTʁI{܋:nlϜ$QV(* qlu2{,BK bU2O^:Usք9Lm⅖\݉:Wȣgaɱ lŨт;5ULoxLCmx]nѹEҐ\t t*U$64kܬ}x'MȺ} moL҅ Oſ ۻ}ii&ئiB"]<7Jl{؆,ZalA:,"}{`)i+%ߊnMtہLĜ˨J]<[ WF)Ck"["CRa&|ΥG.rL¡<ww)z$Q9ߵ{\Æޡwл<LLrڭkmպ:J=B>ǒn)a38ޮŒH=C̪'נ4%- ['.K)LTBL=^unCDiKN]{T]VdW 9dQ5vft7NWiTMc^ N9cOMW\ݘ`l~Ӡ3׍^TR ̷Up>zA ,ێ2N4T-AnIUVϕ;NS,lCV:}umٽ.梗4{V8T([0,H]1>Mw>5`J8?./ \M^:q"6 nm=oF nn %Jl"ݧ ίfMmn۩QWވMy_O2@ DPB >TDJxF`̸Ǐ AZ<aJWibJ,ksfB9Rd2'N'<*TR.52zSaω'.ŚUV]"Zѯdez j٬j*dRbZ7[zlHhǼ FmԪ׶(}!OlCC T?3Bl6ȘM'H@P ̽w=U֍xr4rixl\uO3]vkɂIs049s@`|^]5? ηL3FkNǎ2m<Ƴjoٺ#zo>t!`{?eqgZtQGc0c pD:SpC4Tһ(aDo+J-*I|?JDdL̼R'rĭ\mL?tB4C4Q)ۼNN%#ɳdRKrETTNt-5Og2S9\p I%+U[/5W]uMr-w=kE8|sYO#.2m 7y#vDY CAEs9qT}7`?LTpU/@Z - M88ELq:͈iez_=Q!UpI03]t^65m1Y\G}\F}F!)Vod膅H/ N&d6 H oŪ(e ͛/..4Ѝ^UvWoyoowa%ell;F'n/)#% "ǜH=tÞ(Oguv=vMʣZ mu6d!eq\7gϐDմS^j+Ur˻ז-^½{3%j6>Y6oXJGf5wf?OC$`΁O2v>쬂NV~$ $G#X&B iO{"%P7 B#>ZO?*zHHE4ڍRbCCЄP`VqfYipb X׹. )tO|otc'Ց)jbبdc 9rDd"5E6Ҋ4dxHJVҒE'wS"@zYd#8GVte,a9KU򕶔%. Wʾ/aUBObDfvXd(d49ͼ!1ԢX3@ְl?8DјB!G)ʻQӞgC1*,K&-VMSҙE(-Єҡ }hC!:QW.|BEʋvԣiHi;Ԥ'EiJ;Q/Kc ST6MuSԧ8)p8RըGuȐT6թOjT:Ur*ک?B;hCcuXeo:Y|T.oc"-JqR*$50KϜ7}U2XAu_[]$;XI.Ns+cYi#͑2C[Yn'xULhV]a7ٷvw)lűߚťק2TMlt76gWVe~l$e=|aJ~MK-{Z\5oYfܟ# `RW{ҩNM+[j}_{`y-.w[3kc{!Vx5qw 'GyU~r˦=Iwr@FWmk n;L*ʫvf¼{%xj\~xKnn!'0fcU. kŘ&2ZX);wq9Zrv3HM p/? φflF:W/с#ƃ$Ɣ-:JѩI`KGkZ˫dw0fyI<->=z>kvӂ<;y}!!kAd|Oߝqz fg>\D/g?vX2;#1Zo<, m&> 雚!" <('c8D (! L'@׃Sz(B"02 pk!>dT@ '(@!)< 4&$?L@)?3,C'<4@AR-c5|5; h:s>Dl ACĿ i?ov"CklD Z<Ӧ#CD8=[GH4D@T4=+@ \ï bʗN*O7<(cl#T E!H e f4cŪ´|=YϺF?l9X= yGBµtܻ9s 5T$DȄTȅaȇȈHa<:,{;1M\+H5f*ZNA>WʼnI+$S/,I^U-MYz[-֒c!=E|< ! ]mQ֡gasS3K%x3'ӝ}ܯ&PƝߝ5(ARsP8ܡӍX\DϔY JQTMߪk-I' =-M;U+dn7( \H*t"MDB|H~|Ǐ={YUmW\/+Y<0G:pGL`_}I{i; qTYH5XiC} V JI,ѢauӯzOUt-T㵬;G//IU:nOvc M+&(uZB.3ݑ\V7f]e]Ռ;LݣTOW}#] /PR_9Ƴ_^`gV^6QmmCɖl= eVҸ01^j'L!k-_e߳énX$MXn|9ܱ/A]]hBICSM.iˍE`s?x O.\RWN@D`#Q5eDψNS4D\9j8uL;>؎g@D͞≗6w\;/pRd]S˄w\s$Yh^O!ߧauZrud5CwM=bv2FB=d穆lԚMr+=[pOZ%?\g=miw}2>j9iVƼ&gqf#ܗvuJ|}pjߏ[Tn.jT1>fʼj"o',_&^EFoj]@.]Q떞S2v `JwT^"_u߷GՕ%S+_7m?O7tPzzf4܎OI/,Oa~{HZǗ n&kCd"ӧI\D_ӷfjs'[:2%^CNQAfnECO Z4b>w.G=Re9py֢ b>7~o^w ,hA*,pC,|(qE#^#č/nTX$ʃ"!RL9̘2gҬi&Μ:w'РBcbtɐc¢FU2Et)ʓONR-G&jװbz=ZVôh*ٸrҭ,ۺx+R[UŽ\6w}26’?e5rH;|Lz(ԪWI0Ð's֌س#Oz߱{3>\٠ە;*owG''}sVf;z/Nv|_=T )JUs6 R|mF! W 00!& v|`W y(ۇF`q9GoI&RB`$(7y8rQrER8`DȩR]gdY5JdZs;h؁^~R7!Ȧy'/dZב6Y!ڡGv0LQ[%&Bj褎.ʨ )Uފ\`}:G{W'S +^Y`l՛:P ':u*)jJ(2~-ke :ҖVhZ)gg神 c\*zg"̭i\Z03쩴Rߌنa Ybٜ8^,ȲovK0D~A;ۨ>L"}>3LoW" jaOeJc^{3U(xUuwvgg35y F6i+?W&G)p#x+%㡋9%^iŜ|~p2uVc ?<;-X^j`Mnσ7Nߞܠqq>DGW IK1ȅrK4%I ITI-F,:IKt \l=iQMLi-a֥3%U#O]&*οH2Ks W ƸR&^"KZHL(wtST%dզUi)WE8ӡj?4_4)׸uuͣ1 8i #55񅠫fE~DA;VO kY̒5qђvO?-WmF q׻c`_<*s\"2%l p+ xĻVm.tg6ulFZrUwI+W=(حܠ}/|uX䆗-KЀw 8 n!׼kV 0c8FT( -vs+bGmB8@qQ'׊Ȋ71Ծm1ٕPQm*uC'Bc 59"H%֡m!]@Sn$Qz1jf C`xHF3 $ ;pÀ9k[*.󗄦nY(fٸ˝(DJz&&6zi50C#zҟ岈 LZ&H<׼vf8!BL3L+}RgvlTa.ȩQeض$HI]1j;PgQlyTrj[Qhu-M:B6+Ig-yO 5H8Tj;, fVx!{fvv.Sjbc>l+g#tUz4I!ʫbhnG6w&>3ZV{M5Kթ^OTVD=he`^iׯk}tX s;ԿKr[㔷I~,h[1o$3ss;/οy/SNLQ=WF wq{=oiƷ0^ 3W|b/w]7[3$X -~!%\yB4_oC0MeY_QM`XauE6!FIͭTQIȹQqe ۛQ Z.}эZ ӷ,!0=~ }6!ޟ_R2W8I]U-Z]UM J̴i^_ ֡) a\~r 2؏!$Fb Qm ڶ`EIu-= ]ΡVy"]]a>!,΢֢,b,smYc(8-ޢ0G/X$63N=cE`4^#66m#7Y0.8#9c88#;;0΅W6#=֣=YQݣ>#?$;#<d;A $BdB$CBҕ?FDN$EiU$FfF69dC.G$HC~dIIIFK$LƤL$MJM$NNdJdN%OBdMQ;nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/Makefile.in000077500000000000000000000032631161462365500226170ustar00rootroot00000000000000############################################################################### # $Id:$ ############################################################################### ## do not change this value subdir=doc ## get configured vars @stdvars@ ## get configured rules @stdmake@ antlr_doc_DIR = $(datadir)/doc/$(versioneddir) antlr_doc_FILES = \ @abs_top_srcdir@/doc/closure.gif \ @abs_top_srcdir@/doc/cpp-runtime.html \ @abs_top_srcdir@/doc/csharp-runtime.html \ @abs_top_srcdir@/doc/err.html \ @abs_top_srcdir@/doc/glossary.html \ @abs_top_srcdir@/doc/hidden.stream.gif \ @abs_top_srcdir@/doc/index.html \ @abs_top_srcdir@/doc/inheritance.html \ @abs_top_srcdir@/doc/j-guru-blue.jpg \ @abs_top_srcdir@/doc/jguru-logo.gif \ @abs_top_srcdir@/doc/lexer.html \ @abs_top_srcdir@/doc/lexer.to.parser.tokens.gif \ @abs_top_srcdir@/doc/logo.gif \ @abs_top_srcdir@/doc/metalang.html \ @abs_top_srcdir@/doc/optional.gif \ @abs_top_srcdir@/doc/options.html \ @abs_top_srcdir@/doc/posclosure.gif \ @abs_top_srcdir@/doc/python-runtime.html \ @abs_top_srcdir@/doc/runtime.html \ @abs_top_srcdir@/doc/sor.html \ @abs_top_srcdir@/doc/stream.perspectives.gif \ @abs_top_srcdir@/doc/stream.selector.gif \ @abs_top_srcdir@/doc/streams.html \ @abs_top_srcdir@/doc/stream.splitter.gif \ @abs_top_srcdir@/doc/subrule.gif \ @abs_top_srcdir@/doc/trees.html \ @abs_top_srcdir@/doc/vocab.html \ $(eol) install :: @$(MKDIR) -p "$(antlr_doc_DIR)" @echo "install doc files .. " @for f in $(antlr_doc_FILES) ; do \ echo "install $${f}" ; \ if test -f "$${f}" ; then \ $(INSTALL) -m 444 "$${f}" "$(antlr_doc_DIR)" ; \ fi ;\ done ## get configured dependencies .. @stddeps@ nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/closure.gif000066400000000000000000000041661161462365500227150ustar00rootroot00000000000000GIF89a,@H*\ȰÇ#JHŋ3JDǏ9t(rɓQ.,ʗ]9SL8 ΁?{ ThQ*H4# 6eTU+!ZItk̈́O.UdD v,ƴXv| 5ݻx+nҷq5 q0aNLpqP:~̘֥mV*'g^Vh)ۍ}hpmԜqGfO;Җb{-ދ6ycG^e}5eZny"mܕir_ dK&ْ!Rf6f᭖G%)ޞ|*蠄jk袌6h!I>馜v_"`:*Iz*b *nԩSq2ץ┝8*WE,MRxUd^U,kJ[kHXg|:B&V\f;ҋpZ%m%{Sj.Fj%{+w;p&;&iݐ<߭O^ _l~+ݴu{p~6<#Z .^Zz~\R+p:sϲ  \0\q0;LhwioKDa Ҏ}6}2~e)2ϭ|߀.Xꮊ/>T_ qN5:+=lo 9k~9=;{᭫:鹛~;:^Xq¡ r`q?]t7ߕ 5v3lby_ꩵAW%5Tk w^;_XJr:鋷s{^f-g|NQ`3腏5BVupΟGM0 g8ûEzŲrCЇԒ ErЃD؀u$g£th;)VbҾ<\8 E,)α@BU%JHF-fNA(F'}jgƨ]r#s(8Dz!lLcpH> %JҍM^ҍNyW DLMXTZ0!t`H:qb08(wPG`6Sd.Gx^f2LcSa9IuqʛCOB=R<7\bҗz4y[IT,GS+]Tbթ*p - r$#1j@դ@ A^&d5ЋЃ[|$ifh$]<M:;nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/cpp-runtime.html000066400000000000000000000320551161462365500237010ustar00rootroot00000000000000 C++ Notes

C++ Notes


The C++ runtime and generated grammars look very much the same as the java ones. There are some subtle differences though, but more on this later.

Building the runtime

The following is a bit unix centric. For Windows some contributed project files can be found in lib/cpp/contrib. These may be slightly outdated.

The runtime files are located in the lib/cpp subdirectory of the ANTLR distribution. Building it is in general done via the toplevel configure script and the Makefile generated by the configure script. Before configuring please read INSTALL.txt in the toplevel directory. The file lib/cpp/README may contain some extra information on specific target machines.

./configure --prefix=/usr/local
make

Installing ANTLR and the runtime is then done by typing

make install
This installs the runtime library libantlr.a in /usr/local/lib and the header files in /usr/local/include/antlr. Two convenience scripts antlr and antlr-config are also installed into /usr/local/bin. The first script takes care of invoking antlr and the other can be used to query the right options for your compiler to build files with antlr.

Using the runtime

Generally you will compile the ANTLR generated files with something similar to:
c++ -c MyParser.cpp -I/usr/local/include
Linking is done with something similar to:
c++ -o MyExec <your .o files> -L/usr/local/lib -lantlr

Getting ANTLR to generate C++

To get ANTLR to generate C++ code you have to add

language="Cpp";
to the global options section. After that things are pretty much the same as in java mode except that a all token and AST classes are wrapped by a reference counting class (this to make live easier (in some ways and much harder in others)). The reference counting class uses
operator->
to reference the object it is wrapping. As a result of this you use -> in C++ mode in stead of the '.' of java. See the examples in examples/cpp for some illustrations.

AST types

New as of ANTLR 2.7.2 is that if you supply the

buildAST=true
option to a parser then you have to set and initialize an ASTFactory for the parser and treewalkers that use the resulting AST.
ASTFactory my_factory;	// generates CommonAST per default..
MyParser parser( some-lexer );
// Do setup from the AST factory repeat this for all parsers using the AST
parser.initializeASTFactory( my_factory );
parser.setASTFactory( &my_factory );

In C++ mode it is also possible to override the AST type used by the code generated by ANTLR. To do this you have to do the following:

  • Define a custom AST class like the following:
    #ifndef __MY_AST_H__
    #define __MY_AST_H__
    
    #include <antlr/CommonAST.hpp>
    
    class MyAST;
    
    typedef ANTLR_USE_NAMESPACE(antlr)ASTRefCount<MyAST> RefMyAST;
    
    /** Custom AST class that adds line numbers to the AST nodes.
     * easily extended with columns. Filenames will take more work since
     * you'll need a custom token class as well (one that contains the
     * filename)
     */
    class MyAST : public ANTLR_USE_NAMESPACE(antlr)CommonAST {
    public:
       // copy constructor
       MyAST( const MyAST& other )
       : CommonAST(other)
       , line(other.line)
       {
       }
       // Default constructor
       MyAST( void ) : CommonAST(), line(0) {}
       virtual ~MyAST( void ) {}
       // get the line number of the node (or try to derive it from the child node
       virtual int getLine( void ) const
       {
          // most of the time the line number is not set if the node is a
          // imaginary one. Usually this means it has a child. Refer to the
          // child line number. Of course this could be extended a bit.
          // based on an example by Peter Morling.
          if ( line != 0 )
             return line;
          if( getFirstChild() )
             return ( RefMyAST(getFirstChild())->getLine() );
          return 0;
       }
       virtual void setLine( int l )
       {
          line = l;
       }
       /** the initialize methods are called by the tree building constructs
        * depending on which version is called the line number is filled in.
        * e.g. a bit depending on how the node is constructed it will have the
        * line number filled in or not (imaginary nodes!).
        */
       virtual void initialize(int t, const ANTLR_USE_NAMESPACE(std)string& txt)
       {
          CommonAST::initialize(t,txt);
          line = 0;
       }
       virtual void initialize( ANTLR_USE_NAMESPACE(antlr)RefToken t )
       {
          CommonAST::initialize(t);
          line = t->getLine();
       }
       virtual void initialize( RefMyAST ast )
       {
          CommonAST::initialize(ANTLR_USE_NAMESPACE(antlr)RefAST(ast));
          line = ast->getLine();
       }
       // for convenience will also work without
       void addChild( RefMyAST c )
       {
          BaseAST::addChild( ANTLR_USE_NAMESPACE(antlr)RefAST(c) );
       }
       // for convenience will also work without
       void setNextSibling( RefMyAST c )
       {
          BaseAST::setNextSibling( ANTLR_USE_NAMESPACE(antlr)RefAST(c) );
       }
       // provide a clone of the node (no sibling/child pointers are copied)
       virtual ANTLR_USE_NAMESPACE(antlr)RefAST clone( void )
       {
          return ANTLR_USE_NAMESPACE(antlr)RefAST(new MyAST(*this));
       }
       static ANTLR_USE_NAMESPACE(antlr)RefAST factory( void )
       {
          return ANTLR_USE_NAMESPACE(antlr)RefAST(RefMyAST(new MyAST()));
       }
    private:
       int line;
    };
    #endif
    
  • Tell ANTLR's C++ codegenerator to use your RefMyAST by including the following in the options section of your grammars:
    ASTLabelType = "RefMyAST";
    
    After that you only need to tell the parser before every invocation of a new instance that it should use the AST factory defined in your class. This is done like this:
    // make factory with default type of MyAST
    ASTFactory my_factory( "MyAST", MyAST::factory );
    My_Parser parser(lexer);
    // make sure the factory knows about all AST types in the parser..
    parser.initializeASTFactory(my_factory);
    // and tell the parser about the factory..
    parser.setASTFactory( &my_factory );
    

    After these steps you can access methods/attributes of (Ref)MyAST directly (without typecasting) in parser/treewalker productions.

    Forgetting to do a setASTFactory results in a nice SIGSEGV or you OS's equivalent. The default constructor of ASTFactory initializes itself to generate CommonAST objects.

    If you use a 'chain' of parsers/treewalkers then you have to make sure they all share the same AST factory. Also if you add new definitions of ASTnodes/tokens in downstream parsers/treewalkers you have to apply the respective initializeASTFactory methods to this factory.

    This all is demonstrated in the examples/cpp/treewalk example.

Using Heterogeneous AST types

This should now (as of 2.7.2) work in C++ mode. With probably some caveats.

The heteroAST example show how to set things up. A short excerpt:

ASTFactory ast_factory;

parser.initializeASTFactory(ast_factory);
parser.setASTFactory(&ast_factory);

A small excerpt from the generated initializeASTFactory method:

void CalcParser::initializeASTFactory( antlr::ASTFactory& factory )
{
   factory.registerFactory(4, "PLUSNode", PLUSNode::factory);
   factory.registerFactory(5, "MULTNode", MULTNode::factory);
   factory.registerFactory(6, "INTNode", INTNode::factory);
   factory.setMaxNodeType(11);
}

After these steps ANTLR should be able to decide what factory to use at what time.

Extra functionality in C++ mode.

In C++ mode ANTLR supports some extra functionality to make life a little easier.

Inserting Code

In C++ mode some extra control is supplied over the places where code can be placed in the gerenated files. These are extensions on the header directive. The syntax is:
header "<identifier>" {  }

identifier where
pre_include_hpp Code is inserted before ANTLR generated includes in the header file.
post_include_hpp Code is inserted after ANTLR generated includes in the header file, but outside any generated namespace specifications.
pre_include_cpp Code is inserted before ANTLR generated includes in the cpp file.
post_include_cpp Code is inserted after ANTLR generated includes in the cpp file, but outside any generated namespace specifications.

Pacifying the preprocessor

Sometimes various tree building constructs with '#' in them clash with the C/C++ preprocessor. ANTLR's preprocessor for actions is slightly extended in C++ mode to alleviate these pains.

NOTE: At some point I plan to replace the '#' by something different that gives less trouble in C++.

The following preprocessor constructs are not touched. (And as a result you cannot use these as labels for AST nodes.

  • if
  • define
  • ifdef
  • ifndef
  • else
  • elif
  • endif
  • warning
  • error
  • ident
  • pragma
  • include

As another extra it's possible to escape '#'-signs with a backslash e.g. "\#". As the action lexer sees these they get translated to simple '#' characters.

A template grammar file for C++

header "pre_include_hpp" {
    // gets inserted before antlr generated includes in the header file
}
header "post_include_hpp" {
    // gets inserted after antlr generated includes in the header file
     // outside any generated namespace specifications
}

header "pre_include_cpp" {
    // gets inserted before the antlr generated includes in the cpp file
}

header "post_include_cpp" {
    // gets inserted after the antlr generated includes in the cpp file
}

header {
    // gets inserted after generated namespace specifications in the header
    // file. But outside the generated class.
}

options {
   language="Cpp";
    namespace="something";      // encapsulate code in this namespace
//  namespaceStd="std";         // cosmetic option to get rid of long defines
                                // in generated code
//  namespaceAntlr="antlr";     // cosmetic option to get rid of long defines
                                // in generated code
    genHashLines = true;        // generated #line's or turn it off.
}

{
   // global stuff in the cpp file
   ...
}
class MyParser extends Parser;
options {
   exportVocab=My;
}
{
   // additional methods and members
   ...
}
... rules ...

{
   // global stuff in the cpp file
   ...
}
class MyLexer extends Lexer;
options {
   exportVocab=My;
}
{
   // additional methods and members
   ...
}
... rules ...

{
   // global stuff in the cpp file
   ...
}
class MyTreeParser extends TreeParser;
options {
   exportVocab=My;
}
{
   // additional methods and members
   ...
}
... rules ...

nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/csharp-runtime.html000077500000000000000000000157321161462365500244050ustar00rootroot00000000000000 Notes for using the ANTLR C# Code Generator

C# Code Generator for ANTLR 2.x

Since the release of ANTLR 2.7.3, it has been possible to generate your Lexers, Parsers and TreeParsers in the ECMA-standard C# language developed by Microsoft. This feature extends the benefits of ANTLR's predicated-LL(k) parsing technology to applications and components running on the Microsoft .NET platform and, the Mono and dotGNU open-source C#/CLI platforms.

To be able to build and use the C# language Lexers, Parsers and TreeParsers, you will need to link to the ANTLR C# runtime library. The C# runtime model is based on the existing runtime models for Java and C++ and is thus immediately familiar. The C# runtime and the Java runtime in particular are very similar although there a number of subtle (and not so subtle) differences. Some of these result from differences in the respective runtime environments.

ANTLR C# support was contributed (and is maintained) by Kunle Odutola, Micheal Jordan and Anthony Oguntimehin.

Building the ANTLR C# Runtime

The ANTLR C# runtime source and build files are located in the lib/csharp subdirectory of the ANTLR distribution. This sub-directory is known as the ANTLR C# runtime directory. The first step in building the ANTLR C# runtime library is to ensure that ANTLR has been properly installed and built. This process is described in the ANTLR Installation Guide that comes with the distribution. Once ANTLR has been properly built, the ANTLR C# runtime can be built using any one of two distinct methods:

  • Using the Microsoft Visual Studio .NET development tool.

    A Visual Studio.NET solution file named antlr.net-runtime-2.7.<X>.sln is provided in the ANTLR C# runtime directory. This allows you to build the ANTLR C# runtime library and test it with a semi-complex grammar. The solution file references three Visual Studio .NET project files:

    • lib/csharp/src/antlr.runtime-2.7.<X>.csproj - for the ANTLR C# runtime library itself (where X is a version number),
    • lib/csharp/ASTFrame/antlr.astframe.csproj - for the ANTLR C# ASTFrame library (used for displaying ASTs) and,
    • examples/csharp/java/JavaParser.csproj - for the Java grammar project located within the ANTLR C# examples directory tree.

  • Using the freely available NAnt build tool.

    A build file named antlr.runtime.build is located in the ANTLR C# runtime directory. To build the ANTLR C# runtime, run

    nant build
    from a command shell in the ANTLR C# runtime directory. You can also run
    nant release
    nant docs
    to build a release version and documentation in lib/csharp/release.

All the example grammars located in the ANTLR C# examples directory - examples\csharp are also supplied with a NAnt build file. Once the ANTLR C# library has been built, you can test it by running

nant
from a command shell in any of the example directories.

Specifying Code Generation

You can instruct ANTLR to generate your Lexers, Parsers and TreeParsers using the C# code generator by adding the following entry to the global options section at the beginning of your grammar file.

{
    language  =  "CSharp";
}
After that things are pretty much the same as in the default java code generation mode. See the examples in examples/csharp for some illustrations.
  • TIP: If you are new to NAnt, ANTLR or the .NET platform, you might want to build your ANTLR projects with something similar to the NANT build files used for the C# examples. The build file for java example in particular also shows one way to automatically copy and reference both the antlr.runtime.dll and antlr.astframe.dll assemblies during your build.

C#-Specific ANTLR Options

  • header - specify additional using directives

    You can instruct the ANTLR C# code generator to include additional using directives in your generated Lexer/Parser/TreeParser by listing the directives within the header section which must be the first section at the beginning of your ANTLR grammar file. Please note that using directives are the only source code elements that can currently be safely included in the header section for C# code generation.

    header
    {
       using SymbolTable =  kunle.parser.SymbolTable;
       using kunle.compiler;
    }
    

  • namespace - specify an enclosing C# Namespace

    You can instruct the ANTLR C# code generator to place your Lexer/Parser/TreeParser in a specific C# namespace by adding a namespace option to either the global options section at the beginiing of your ANTLR grammar file or, to the grammar options section for individual Lexers/Parsers/TreeParsers.

    {
       namespace  =  "kunle.smalltalk.parser";
    }
    

A Template C# ANTLR Grammar File

header 
{
    // gets inserted in the C# source file before any
    // generated namespace declarations
    // hence -- can only be using directives
}

options {
    language  = "CSharp";
    namespace = "something";          // encapsulate code in this namespace
    classHeaderPrefix = "protected"; // use to specify access level for generated class
}

{
   // global code stuff that will be included in the source file just before the 'MyParser' class below
   ...
}
class MyParser extends Parser;
options {
   exportVocab=My;
}
{
   // additional methods and members for the generated 'MyParser' class
   ...
}

... generated RULES go here ...

{
   // global code stuff that will be included in the source file just before the 'MyLexer' class below
   ...
}
class MyLexer extends Lexer;
options {
   exportVocab=My;
}
{
   // additional methods and members for the generated 'MyParser' class
   ...
}

... generated RULES go here ...

{
   // global code stuff that will be included in the source file just before the 'MyTreeParser' class below
   ...
}
class MyTreeParser extends TreeParser;
options {
   exportVocab=My;
}
{
   // additional methods and members for the generated 'MyParser' class
   ...
}

... generated RULES go here ...

nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/err.html000066400000000000000000000347341161462365500222340ustar00rootroot00000000000000 Error Handling and Recovery

Error Handling and Recovery

All syntactic and semantic errors cause parser exceptions to be thrown. In particular, the methods used to match tokens in the parser base class (match et al) throw MismatchedTokenException. If the lookahead predicts no alternative of a production in either the parser or lexer, then a NoViableAltException is thrown. The methods in the lexer base class used to match characters (match et al) throw analogous exceptions.

ANTLR will generate default error-handling code, or you may specify your own exception handlers. Either case results (where supported by the language) in the creation of a try/catch block. Such try{} blocks surround the generated code for the grammar element of interest (rule, alternate, token reference, or rule reference). If no exception handlers (default or otherwise) are specified, then the exception will propagate all the way out of the parser to the calling program.

ANTLR's default exception handling is good to get something working, but you will have more control over error-reporting and resynchronization if you write your own exception handlers.

Note that the '@' exception specification of PCCTS 1.33 does not apply to ANTLR.

ANTLR Exception Hierarchy

ANTLR-generated parsers throw exceptions to signal recognition errors or other stream problems.  All exceptions derive from ANTLRException.   The following diagram shows the hierarchy:

ANTLRException.gif (14504 bytes)

Exception Description
ANTLRException Root of the exception hiearchy.  You can directly subclass this if you want to define your own exceptions unless they live more properly under one of the specific exceptions below.
CharStreamException Something bad that happens on the character input stream.  Most of the time it will be an IO problem, but you could define an exception for input coming from a dialog box or whatever.
CharStreamIOException The character input stream had an IO exception (e.g., CharBuffer.fill() can throw this).  If nextToken() sees this, it will convert it to a TokenStreamIOException.
RecognitionException A generic recognition problem with the input.  Use this as your "catch all" exception in your main() or other method that invokes a parser, lexer, or treeparser.  All parser rules throw this exception.
MismatchedCharException Thrown by CharScanner.match() when it is looking for a character, but finds a different one on the input stream.
MismatchedTokenException Thrown by Parser.match() when it is looking for a token, but finds a different one on the input stream.
NoViableAltException The parser finds an unexpected token; that is, it finds a token that does not begin any alternative in the current decision.
NoViableAltForCharException The lexer finds an unexpected character; that is, it finds a character that does not begin any alternative in the current decision.
SemanticException Used to indicate syntactically valid, but nonsensical or otherwise bogus input was found on the input stream.  This exception is thrown automatically by failed, validating semantic predicates such as:
a : A {false}? B ;

ANTLR generates:

match(A);
if (!(false)) throw new
  SemanticException("false");
match(B);

You can throw this exception yourself during the parse if one of your actions determines that the input is wacked.

TokenStreamException Indicates that something went wrong while generating a stream of tokens.
TokenStreamIOException Wraps an IOException in a TokenStreamException
TokenStreamRecognitionException Wraps a RecognitionException in a TokenStreamException so you can pass it along on a stream.
TokenStreamRetryException Signals aborted recognition of current token. Try to get one again. Used by TokenStreamSelector.retry() to force nextToken() of stream to re-enter and retry.  See the examples/java/includeFile directory.

This a great way to handle nested include files and so on or to try out multiple grammars to see which appears to fit the data.  You can have something listen on a socket for multiple input types without knowing which type will show up when.

The typical main or parser invoker has try-catch around the invocation:

    try {
       ...
    }
    catch(TokenStreamException e) {
      System.err.println("problem with stream: "+e);
    }
    catch(RecognitionException re) {
      System.err.println("bad input: "+re);
    }

Lexer rules throw RecognitionException, CharStreamException, and TokenStreamException.

Parser rules throw RecognitionException and TokenStreamException.

Modifying Default Error Messages With Paraphrases

The name or definition of a token in your lexer is rarely meaningful to the user of your recognizer or translator.  For example, instead of seeing

T.java:1:9: expecting ID, found ';'

you can have the parser generate:

T.java:1:9: expecting an identifier, found ';'

ANTLR provides an easy way to specify a string to use in place of the token name.  In the definition for ID, use the paraphrase option:

ID
options {
  paraphrase = "an identifier";
}
  : ('a'..'z'|'A'..'Z'|'_')
    ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
  ;

Note that this paraphrase goes into the token types text file (ANTLR's persistence file).  In other words, a grammar that uses this vocabulary will also use the paraphrase.

Parser Exception Handling

ANTLR generates recursive-descent recognizers. Since recursive-descent recognizers operate by recursively calling the rule-matching methods, this results in a call stack that is populated by the contexts of the recursive-descent methods. Parser exception handling for grammar rules is a lot like exception handling in a language like C++ or Java. Namely, when an exception is thrown, the normal thread of execution is stopped, and functions on the call stack are exited sequentially until one is encountered that wants to catch the exception. When an exception is caught, execution resumes at that point.

In ANTLR, parser exceptions are thrown when (a) there is a syntax error, (b) there is a failed validating semantic predicate, or (c) you throw a parser exception from an action.

In all cases, the recursive-descent functions on the call stack are exited until an exception handler is encountered for that exception type or one of its base classes (in non-object-oriented languages, the hierarchy of execption types is not implemented by a class hierarchy). Exception handlers arise in one of two ways. First, if you do nothing, ANTLR will generate a default exception handler for every parser rule. The default exception handler will report an error, sync to the follow set of the rule, and return from that rule. Second, you may specify your own exception handlers in a variety of ways, as described later.

If you specify an exception handler for a rule, then the default exception handler is not generated for that rule. In addition, you may control the generation of default exception handlers with a per-grammar or per-rule option.

Specifying Parser Exception-Handlers

You may attach exception handlers to a rule, an alternative, or a labeled element. The general form for specifying an exception handler is:


exception [label]
catch [exceptionType exceptionVariable]
  { action }
catch ...
catch ...

where the label is only used for attaching exceptions to labeled elements. The exceptionType is the exception (or class of exceptions) to catch, and the exceptionVariable is the variable name of the caught exception, so that the action can process the exception if desired. Here is an example that catches an exception for the rule, for an alternate and for a labeled element:


rule:   a:A B C
    |   D E
        exception // for alternate
          catch [RecognitionException ex] {
            reportError(ex.toString());
        }
    ;
    exception // for rule
    catch [RecognitionException ex] {
       reportError(ex.toString());
    }
    exception[a] // for a:A
    catch [RecognitionException ex] {
       reportError(ex.toString());
    }
  

Note that exceptions attached to alternates and labeled elements do not cause the rule to exit. Matching and control flow continues as if the error had not occurred. Because of this, you must be careful not to use any variables that would have been set by a successful match when an exception is caught.

Default Exception Handling in the Lexer

Normally you want the lexer to keep trying to get a valid token upon lexical error.   That way, the parser doesn't have to deal with lexical errors and ask for another token.  Sometimes you want exceptions to pop out of the lexer--usually when you want to abort the entire parsing process upon syntax error.  To get ANTLR to generate lexers that pass on RecognitionException's to the parser as TokenStreamException's, use the defaultErrorHandler=false grammar option.  Note that IO exceptions are passed back as TokenStreamIOException's regardless of this option.

Here is an example that uses a bogus semantic exception (which is a subclass of RecognitionException) to demonstrate blasting out of the lexer:

class P extends Parser;
{
public static void main(String[] args) {
        L lexer = new L(System.in);
        P parser = new P(lexer);
        try {
                parser.start();
        }
        catch (Exception e) {
                System.err.println(e);
        }
}
}

start : "int" ID (COMMA ID)* SEMI ;

class L extends Lexer;
options {
        defaultErrorHandler=false;
}

{int x=1;}

ID  : ('a'..'z')+ ;

SEMI: ';'
      {if ( expr )
       throw new
          SemanticException("test",
                            getFilename(),
                            getLine());} ;

COMMA:',' ;

WS  : (' '|'\n'{newline();})+
      {$setType(Token.SKIP);}
    ;

When you type in, say, "int b;" you get the following as output:

antlr.TokenStreamRecognitionException: test
Version: $Id: //depot/code/org.antlr/release/antlr-2.7.5/doc/err.html#1 $
nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/glossary.html000066400000000000000000000557701161462365500233120ustar00rootroot00000000000000 ANTLR-centric Language Glossary

ANTLR-centric Language Glossary

Terence Parr

This glossary defines some of the more important terms used in the ANTLR documentation. I have tried to be very informal and provide references to other pages that are useful. For another great source of information about formal computer languages, see Wikipedia.

Ambiguous

A language is ambiguous if the same sentence or phrase can be interpreted in more than a single way. For example, the following sentence by Groucho Marx is easily interpreted in two ways: "I once shot an elephant in my pajamas. How he got in my pajamas I'll never know!" In the computer world, a typical language ambiguity is the if-then-else ambiguity where the else-clause may be attached to either the most recent if-then or an older one. Reference manuals for computer languages resolve this ambiguity by stating that else-clauses always match up with the most recent if-then.

A grammar is ambiguous if the same input sequence can be derived in multiple ways. Ambiguous languages always yield ambiguous grammars unless you can find a way to encode semantics (actions or predicates etc...) that resolve the ambiguity. Most language tools like ANTLR resolve the if-then-else ambiguity by simply choosing to match greedily (i.e., as soon as possible). This matches the else with the most recent if-then. See nondeterministic.

ANTLR

ANother Tool for Language Recognition, a predicated-LL(k) parser generator that handles lexers, parsers, and tree parsers. ANTLR has been available since 1990 and led to a resurgence of recursive-descent parsing after decades dominated by LR and other DFA-based strategies.

AST

Abstract Syntax Tree. ASTs are used as internal representations of an input stream, normally constructed during a parsing phase. Because AST are two-dimensional trees they can encode the structure (as determined by the parser) of the input as well as the input symbols.

A homogeneous AST is in one in which the physical objects are all of the same type; e.g., CommonAST in ANTLR. A heterogeneous tree may have multiple types such as PlusNode, MultNode etc...

An AST is not a parse tree, which encodes the sequence of rules used to match input symbols. See What's the difference between a parse tree and an abstract syntax tree (AST)? Why doesn't ANTLR generate trees with nodes for grammar rules like JJTree does?.

An AST for input 3+4 might be represented as

   +
  / \
 3   4
or more typically (ala ANTLR) in child-sibling form:
+
|
3--4
Operators are usually subtree roots and operands are usually leaves.

Bit set

Bit sets are an extremely efficient representation for dense integer sets. You can easily encode sets of strings also by mapping unique strings to unique integers. ANTLR uses bitsets for lookahead prediction in parsers and lexers. Simple bit set implementations do not work so well for sparse sets, particularly when the maximum integer stored in the set is large.

ANTLR's bit set represents membership with a bit for each possible integer value. For a maximum value of n, a bit set needs n/64 long words or n/8 bytes. For ASCII bit sets with a maximum value of 127, you only need 16 bytes or 2 long words. UNICODE has a max value of \uFFFF or 65535, requiring 8k bytes, and these sets are typically sparse. Fortunately most lexers only need a few of these space inefficient (but speedy) bitsets and so it's not really a problem.

Child-sibling Tree

A particularly efficient data structure for representing trees. See AST.

Context-free grammar

A grammar where recognition of a particular construct does not depend on whether it is in a particular syntactic context. A context-free grammar has a set of rules like
stat : IF expr THEN stat
     | ...
     ;
where there is no restriction on when the IF alternative may be applied--if you are in rule stat, you may apply the alternative.

Context-sensitive

A grammar where recognition of a particular construct may depend on a syntactic context. You never see these grammars in practice because they are impractical (Note, an Earley parser is O(n^3) worst-case for context-free grammars). A context-free rule looks like:
Α → γ
but a context-sensitive rule may have context on the left-side:
αΑβ → αγβ
meaning that rule Α may only be applied (converted to γ) in between α and β.

In an ANTLR sense, you can recognize context-sensitive constructs with a semantic predicate. The action evaluates to true or false indicating the validity of applying the alternative.

See Context-sensitive gramar.

DFA

Deterministic Finite Automata. A state machine used typically to formally describe lexical analyzers. lex builds a DFA to recognize tokens whereas ANTLR builds a recursive descent lexer similar to what you would build by hand. See Finite state machine and ANTLR's lexer documentation.

FIRST

The set of symbols that may be matched on the left-edge of a rule. For example, the FIRST(decl) is set {ID, INT} for the following:
decl : ID ID SEMICOLON
     | INT ID SEMICOLON
     ;
The situation gets more complicated when you have optional constructs. The FIRST(a) below is {A,B,C}
a : (A)? B
  | C
  ;
because the A is optional and the B may be seen on the left-edge.

Naturally k>1 lookahead symbols makes this even more complicated. FIRST_k must track sets of k-sequences not just individual symbols.

FOLLOW

The set of input symbols that may follow any reference to the specified rule. For example, FOLLOW(decl) is {RPAREN, SEMICOLON):
methodHead : ID LPAREN decl RPAREN ;
var : decl SEMICOLON ;
decl : TYPENAME ID ;
because RPAREN and SEMICOLON both follow references to rule decl. FIRST and FOLLOW computations are used to analyze grammars and generate parsing decisions.

This grammar analysis all gets very complicated when k>1.

Grammar

A finite means of formally describing the structure of a possibly infinite language. Parser generators build parsers that recognize sentences in the language described by a grammar. Most parser generators allow you to add actions to be executed during the parse.

Hoisting

Semantic predicates describe the semantic context in which a rule or alternative applies. The predicate is hoisted into a prediction expression. Hoisting typically refers to pulling a predicate out of its enclosing rule and into the prediction expression of another rule. For example,
decl     : typename ID SEMICOLON
         | ID ID SEMICOLON
         ;
typename : {isType(LT(1))}? ID
         ;
The predicate is not needed in typename as there is no decision, however, rule decl needs it to distinguish between its two alternatives. The first alternative would look like:
if ( LA(1)==ID && isType(LT(1)) ) {
  typename();
  match(ID);
  match(SEMICOLON);
}
PCCTS 1.33 did, but ANTLR currently does not hoist predicates into other rules.

Inheritance, grammar

The ability of ANTLR to define a new grammar as it differs from an existing grammar. See the ANTLR documentation.

LA(n)

The nth lookahead character, token type, or AST node type depending on the grammar type (lexer, parser, or tree parser respectively).

Left-prefix, left factor

A common sequence of symbols on the left-edge of a set of alternatives such as:
a : A B X
  | A B Y
  ;
The left-prefix is A B, which you can remove by left-factoring:
a : A B (X|Y)
  ;
Left-factoring is done to reduce lookahead requirements.

Literal

Generally a literal refers to a fixed string such as begin that you wish to match. When you reference a literal in an ANTLR grammar via "begin", ANTLR assigns it a token type like any other token. If you have defined a lexer, ANTLR provides information about the literal (type and text) to the lexer so it may detect occurrences of the literal.

Linear approximate lookahead

An approximation to full lookahead (that can be applied to both LL and LR parsers) for k>1 that reduces the complexity of storing and testing lookahead from O(n^k) to O(nk); exponential to linear reduction. When linear approximate lookahead is insufficient (results in a nondeterministic parser), you can use the approximate lookahead to attenuate the cost of building the full decision.

Here is a simple example illustrating the difference between full and approximate lookahead:

a : (A B | C D)
  | A D
  ;
This rule is LL(2) but not linear approximate LL(2). The real FIRST_2(a) is {AB,CD} for alternative 1 and {AD} for alternative 2. No intersection, so no problem. Linear approximate lookahead collapses all symbols at depth i yielding k sets instead of a possibly n^k k-sequences. The approximation (compressed) sets are {AB,AD,CD,CB} and {AD}. Note the introduction of the spurious k-sequences AD and CB. Unfortunately, this compression introduces a conflict upon AD between the alternatives. PCCTS did full LL(k) and ANTLR does linear approximate only as I found that linear approximate lookahead works for the vast majority of parsing decisions and is extremely fast. I find one or two problem spots in a large grammar usually with ANTLR, which forces me to reorganize my grammar in a slightly unnatural manner. Unfortunately, your brain does full LL(k) and ANTLR does a slightly weaker linear approximate lookahead--a source of many (invalid) bug reports ;)

This compression was the subject of my doctoral dissertation (PDF 477k) at Purdue.

LL(k)

Formally, LL(k) represents a class of parsers and grammars that parse symbols from left-to-right (beginning to end of input stream) using a leftmost derivation and using k symbols of lookahead. A leftmost derivation is one in which derivations (parses) proceed by attempting to replace rule references from left-to-right within a production. Given the following rule
stat : IF expr THEN stat
     | ...
     ;
an LL parser would match the IF then attempt to parse expr rather than a rightmost derivation, which would attempt to parse stat first.

LL(k) is synonymous with a "top-down" parser because the parser begins at the start symbol and works its way down the derivation/parse tree (tree here means the stack of method activations for recursive descent or symbol stack for a table-driven parser). A recursive-descent parser is particular implementation of an LL parser that uses functions or method calls to implement the parser rather than a table.

ANTLR generates predicate-LL(k) parsers that support syntactic and sematic predicates allowing you to specify many context-free and context-sensitive grammars (with a bit of work).

LT(n)

In a parser, this is the nth lookahead Token object.

Language

A possibly infinite set of valid sentences. The vocabulary symbols may be characters, tokens, and tree nodes in an ANTLR context.

Lexer

A recognizer that breaks up a stream of characters into vocabulary symbols for a parser. The parser pulls vocabulary symbols from the lexer via a queue.

Lookahead

When parsing a stream of input symbols, a parser has matched (and no longer needs to consider) a portion of the stream to the left of its read pointer. The next k symbols to the right of the read pointer are considered the fixed lookahead. This information is used to direct the parser to the next state. In an LL(k) parser this means to predict which path to take from the current state using the next k symbols of lookahead.

ANTLR supports syntactic predicates, a manually-specified form of backtracking that effectively gives you infinite lookahead. For example, consider the following rule that distinguishes between sets (comma-separated lists of words) and parallel assignments (one list assigned to another):

stat:   ( list "=" )=> list "=" list
    |   list
    ;
If a list followed by an assignment operator is found on the input stream, the first production is predicted. If not, the second alternative production is attempted.

nextToken

A lexer method automatically generated by ANTLR that figures out which of the lexer rules to apply. For example, if you have two rules ID and INT in your lexer, ANTLR will generate a lexer with methods for ID and INT as well as a nextToken method that figures out which rule method to attempt given k input characters.

NFA

Nondeterministic Finite Automata. See Finite state machine.

Nondeterministic

A parser is nondeterministic if there is at least one decision point where the parser cannot resolve which path to take. Nondeterminisms arise because of parsing strategy weaknesses.
  • If your strategy works only for unambiguous grammars, then ambiguous grammars will yield nondeterministic parsers; this is true of the basic LL, LR strategies. Even unambiguous grammars can yield nondeterministic parsers though. Here is a nondeterministic LL(1) grammar:
    decl : ID ID SEMICOLON
         | ID SEMICOLON
         ;
    
    Rule decl is, however, LL(2) because the second lookahead symbol (either ID or SEMICOLON) uniquely determines which alternative to predict. You could also left-factor the rule to reduce the lookahead requirements.

  • If you are willing to pay a performance hit or simply need to handle ambiguous grammars, you can use an Earley parser or a Tomita parser (LR-based) that match all possible interpretations of the input, thus, avoiding the idea of nondeterminism altogether. This does present problems when trying to execute actions, however, because multiple parses are, in effect, occurring in parallel.

Note that a parser may have multiple decision points that are nondeterministic.

Parser

A recognizer that applies a grammatical structure to a stream of vocabulary symbols called tokens.

Predicate, semantic

A semantic predicate is a boolean expression used to alter the parse based upon semantic information. This information is usually a function of the constructs/input that have already been matched, but can even be a flag that turns on and off subsets of the language (as you might do for a grammar handling both K&R and ANSI C). One of the most common semantic predicates uses a symbol table to help distinguish between syntactically, but semantically different productions. In FORTRAN, array references and function calls look the same, but may be distinguished by checking what the type of the identifier is.
expr : {isVar(LT(1))}? ID LPAREN args RPAREN  // array ref
     | {isFunction(LT(1))}? ID LPAREN args RPAREN // func call
     ;

Predicate, syntactic

A selective form of backtracking used to recognize language constructs that cannot be distinguished without seeing all or most of the construct. For example, in C++ some declarations look exactly like expressions. You have to check to see if it is a declaration. If it parses like a declaration, assume it is a declaration--reparse it with "feeling" (execute your actions). If not, it must be an expression or an error:
stat : (declaration) => declaration
     | expression
     ;

Production

An alternative in a grammar rule.

Protected

A protected lexer rule does not represent a complete token--it is a helper rule referenced by another lexer rule. This overloading of the access-visibility Java term occurs because if the rule is not visible, it cannot be "seen" by the parser (yes, this nomeclature sucks).

Recursive-descent

See LL(k).

Regular

A regular language is one that can be described by a regular grammar or regular expression or accepted by a DFA-based lexer such as those generated by lex. Regular languages are normally used to describe tokens.

In practice you can pick out a regular grammar by noticing that references to other rules are not allowed accept at the end of a production. The following grammar is regular because reference to B occurs at the right-edge of rule A.

A : ('a')+ B ;
B : 'b' ;
Another way to look at it is, "what can I recognize without a stack (such as a method return address stack)?".

Regular grammars cannot describe context-free languages, hence, LL or LR based grammars are used to describe programming languages. ANTLR is not restricted to regular languages for tokens because it generates recursive-descent lexers. This makes it handy to recognize HTML tags and so on all in the lexer.

Rule

A rule describes a partial sentence in a language such as a statement or expression in a programming language. Rules may have one or more alternative productions.

Scanner

See Lexer.

Semantics

See What do "syntax" and "semantics" mean and how are they different?.

Subrule

Essentially a rule that has been expanded inline. Subrules are enclosed in parenthesis and may have suffixes like star, plus, and question mark that indicate zero-or-more, one-or-more, or optional. The following rule has 3 subrules:
a : (A|B)+ (C)* (D)?
  ;

Syntax

See What do "syntax" and "semantics" mean and how are they different?.

Token

A vocabulary symbol for a language. This term typically refers to the vocabulary symbols of a parser. A token may represent a constant symbol such as a keyword like begin or a "class" of input symbols like ID or INTEGER_LITERAL.

Token stream

See Token Streams in the ANTLR documentation.

Tree

See AST and What's the difference between a parse tree and an abstract syntax tree (AST)? Why doesn't ANTLR generate trees with nodes for grammar rules like JJTree does?.

Tree parser

A recognizer that applies a grammatical structure to a two-dimensional input tree. Grammatical rules are like an "executable comment" that describe the tree structure. These parsers are useful during translation to (i) annotate trees with, for example, symbol table information, (2) perform tree rewrites, and (3) generate output.

Vocabulary

The set of symbols used to construct sentences in a language. These symbols are usually called tokens or token types. For lexers, the vocabulary is a set of characters.

Wow

See ANTLR.
nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/hidden.stream.gif000066400000000000000000000071231161462365500237620ustar00rootroot00000000000000GIF89ay ,y@H*\ȰÇ#JHŋ3j(GlCCDِG!l6\IE2}Yp'̓B$=9Sȥ7? #Q"jb՞#~AթOur(Q Q4 mږ 2ژakTcl+ߖ3y>Ô#K+5+\:MJڽx+k:y; HsO>wk><߁KNԳGٛ-vs>8bon>2t+C6?xwLx(F&`w 62ANhU!f #h$XFn)8Q(#f,UQ6\X#AvdYF9܌G4VH}ۍ9!#yJep[reW]@XUJdKGd3TNbgw'6 bQ堨55$VߣF*)ut٤fh rڣT`2X*RKZ-}q6*]o~WkfYVݢW%a`i覑jEr֧[Vym}kOE{hg^R(螻ɾƆmgW&k{F::s4'bw{ iݦi};6gw+Oj14kzʨ#@doDmtF'͡L7=:;-uQ8MW=`X֝"\\w'$q`a_or+,&o"%Фpd4}Znu{vK[9Rhb_n)5.z騟:U3,{܁>ūn{~m',|h׻~fÃ2~ʿQ?oXꋽz_,R,8m7F1f~ʙC^^ýVI~J#$Iz@ˉhr^iy_ @~90aH`9$_qa!M8]j3!&D` Hi-D -!xY/rp;#G$8Zk 6=8$Etil7 U[LPN;I%e2MuhZIJ9 TڢIIb-$I+_ZT+s[@bӗ;0cfC|4'Rش6nnsf3Irs::v3p*^8iɓ$$O1'@ЂRMhIw: D'P 5<Q]G&j2!H7RlD]IqYL4x i@0d! OwZg"UeJU:)F! RRMHHJՈx Rݪ,3.STh&2?R eo-en bYz f7[,g9CRBot%(KI[IL%%nwst'3Тfy@Uuk,hnӡ^AWx ,FPhr6%Vncjd:Yt;hE}Wdc2Y)5x1`!ߺW+Fw-ۓBMxI,,Rdb)#{Y2/\/`G2voXY RՎ;rKquN9̆gq(d}PFYUE#ڸY uLuŠ13dLe2YFn|%\{k3/IJ!>I]1#ղ&ORg7񯑮GhGK8m~G&nzVbY1j1ssU ɝ\57&JBVK^ H_}ǒ3Eg]Xߝm|7&qX ۘ7j>rSnT ;pv|qp}Vu6n>WwsApȕ]sq[zY@ >|SR}N{Kߛ%o jC6SKC$Q) E"Z`WT3`,cMle訰c%}]x;ڃ듈NˋAck,d[F|5E^򦥼s&[Wy}S"۟GXzʦUf[О}SmgY[ ?|{zЏONM_}mO~O6 vvAoAK''PpFd⅀8 M RNcxT9xR;ķn;3FG5 ;1pASzɑ'e~x2;hLlBRVt.x?FduV'0SH*''ov|wC)a|s8)H25*8A4'%M%d36t`KfS#f,y#78bCH[E-eV&cH7<9HU%W$35òhAH4uk‚gwxoOAx9K&%(J^Ňц[U]V[kDVLXj%XjkYhr"Zx8vVtHRK[(Q&Xws2 bI',ؘQX,hȎ%Xe؎38xOƏBxY81dHJ ِ )!Q21h{ zzّaau9w(Z'(y(&RxY76I78Y8:ٓ93:dq{F9;s{wG7sGuTNUXYK;nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/index.html000066400000000000000000000477061161462365500225560ustar00rootroot00000000000000 ANTLR Reference Manual

ANTLR
Reference Manual

Credits

Project Lead and Supreme Dictator
Terence Parr
University of San Franciso

Support from
jGuru.com
Your View of the Java Universe

Help with initial coding
John Lilly, Empathy Software

C++ code generator by
Peter Wells and Ric Klaren

C# code generation by
Micheal Jordan, Kunle Odutola and Anthony Oguntimehin.

Python's universe has been extended by
Wolfgang Häfelinger and Marq Kole

Infrastructure support from Perforce:
The world's best source code control system

Substantial intellectual effort donated by
Loring Craymer
Monty Zukowski
Jim Coker
Scott Stanchfield
John Mitchell
Chapman Flack (UNICODE, streams)

Source changes for Eclipse and NetBeans by
Marco van Meegen and Brian Smith

 

ANTLR Version 2.7.5
December 22, 2004

What's ANTLR

ANTLR, ANother Tool for Language Recognition, (formerly PCCTS) is a language tool that provides a framework for constructing recognizers, compilers, and translators from grammatical descriptions containing Java, C++, or C# actions [You can use PCCTS 1.xx to generate C-based parsers].

Computer language translation has become a common task. While compilers and tools for traditional computer languages (such as C or Java) are still being built, their number is dwarfed by the thousands of mini-languages for which recognizers and translators are being developed. Programmers construct translators for database formats, graphical data files (e.g., PostScript, AutoCAD), text processing files (e.g., HTML, SGML).  ANTLR is designed to handle all of your translation tasks.

Terence Parr has been working on ANTLR since 1989 and, together with his colleagues, has made a number of fundamental contributions to parsing theory and language tool construction, leading to the resurgence of LL(k)-based recognition tools.

Here is a chronological history and credit list for ANTLR/PCCTS.

See ANTLR software rights.

Check out Getting started for a list of tutorials and get your questions answered at the ANTLR FAQ at jguru.com

See also http://www.ANTLR.org and glossary.

If you are looking for the previous main version (PCCTS 1.33) of ANTLR rather than the new Java-based version, see Getting started with PCCTS.


Download ANTLR.

ANTLR 2.7.5 release notes

ANTLR Meta-Language

Lexical Analysis with ANTLR

ANTLR Tree Parsers

Token Streams

Token Vocabularies

Error Handling and Recovery

Java Runtime Model

C++ Runtime model

C# Runtime model

Python Runtime model

ANTLR Tree Construction

Grammar Inheritance

Options

nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/inheritance.html000066400000000000000000000165741161462365500237370ustar00rootroot00000000000000 ANTLR Specification: Grammar Inheritance

Grammar Inheritance

Object-oriented programming languages such as C++ and Java allow you to define a new object as it differs from an existing object, which provides a number of benefits. "Programming by difference" saves development/testing time and future changes to the base or superclass are automatically propogated to the derived or subclass.

Introduction and motivation

Allowing the ANTLR programmer to define a new grammar as it differs from an existing grammar provides significant benefits. Development time goes down because the programmer only has to specify the rules that are different or that need to be added. Further, when the base grammar changes, all derived grammars will automatically reflect the change. Grammar inheritance is also an interesting way to change the behavior of an existing grammar. A rule or set of rules can be respecified with the same structure, but with different actions.

The most obvious use of grammar inheritance involves describing multiple dialects of the same language. Previous solutions would require multiple grammar versions or a single grammar that recognized all dialects at once (using semantics to constrain the input to a single dialect). With grammar inheritance, one could write a base grammar for the common components and then have a derived grammar for each dialect. Code sharing would occur at the grammar and output parser class level.

Consider a simple subset of English:

class PrimarySchoolEnglish;

sentence
    :   subject predicate
    ;
subject
    :   NOUN
    ;
predicate
    :   VERB
    ;

This grammar recognizes sentences like: Dilbert speaks.

To extend this grammar to include sentences manageable by most American college students, we might add direct objects to the definition of a sentence. Rather than copying and modifying the PrimarySchoolEnglish grammar, we can simply extend it:


class AmericanCollegeEnglish extends
        PrimarySchoolEnglish;

sentence
    :   subject predicate object
    ;
object
    :   PREPOSITION ARTICLE NOUN
    ;

This grammar describes sentences such as Dilbert speaks to a dog. While this looks trivial to implement (just add the appropriate extends clause in Java to the output parser class), it involves grammar analysis to preserve grammatical correctness. For example, to generate correct code, ANTLR needs to pull in the base grammar and modify it according to the overridden rules. To see this, consider the following grammar for a simple language:

class Simple;

stat:   expr ASSIGN expr
    |   SEMICOLON
    ;

expr:   ID
    ;

Clearly, the ID token is the lookahead set that predicts the recognition of the first alternative of stat. Now, examine a derived dialect of Simple:

class Derived extends Simple;

expr:   ID
    |   INT
    ;  

In this case, { ID, INT } predicts the first alternative of stat. Unfortunately, a derived grammar affects the recognition of rules inherited from the base grammar! ANTLR must not only override expr in Derived, but it must override stat.

Determinining which rules in the base grammar are affected is not easy, so our implementation  simply makes a copy of the base grammar and generates a whole new parser with the appropriate modifications. From the programmer's perspective, code/grammar sharing would have occurred, however, from an implementation perspective a copy of the base grammar would be made.

Functionality

Grammar Derived inherits from Grammar Base all of the rules, options, and actions of Base including formal/actual rule parameters and rule actions. Derived may override any option or rule and specify new options, rules, and member action. The subgrammar does not inherit actions outside of classes or file options. Consider rule Base defined as:

class Base extends Parser;
options {
  k = 2;
}
{
  int count = 0;
}
a : A B {an-action}
  | A C
  ;
c : C
  ;

A new grammar may be derived as follows:

class Derived extends Base;
options {
  k = 3;        // need more lookahead; override
  buildAST=true;// add an option
}
{
  int size = 0; // override; no 'count' def here
}
a : A B {an-action}
  | A C {an-extra-action}
  | Z           // add an alt to rule a
  ;
b : a
  | A B D       // requires LL(3)
  ;

ANTLR will actually interpret the subgrammar as if you had typed:

class Derived extends Parser;
options {
        k=3;
        buildAST=true;
}
{
  int size = 0; // override Base action
}
a : A B {an-action}
  | A C {an-extra-action}
  | Z           // add an alt to rule a
  ;

b : a
  | A B D       // requires LL(3)
  ;

// inherited from grammar Base
c : C
  ;

Rules may be overridden to change their signatures such as their parameters or return types:

class Base extends Parser;
a[int x] returns [int y]
  : A
  ;

class Derived extends Base;
a[float z]
  : A
  ;

ANTLR will generate a warning, however:

warning: rule Derived.a has different signature than Base.a

Because of this ability, the subgrammars do not actually inherit, in the Java-sense, from the supergrammar.  Different signatures on the generated methods would prevent the parser from compiling.

Where Are Those Supergrammars?

The set of potential "supergrammars" available to some grammar P includes any other grammar in the same file as P and any listed on the ANTLR command line with

-glib f1.g;f2.g

where the files must include path names if they are located in another directory.

How is supergrammar P found? The grammars defined in the supergrammar list are read in and an inheritance hierarchy is constructed; any repeated grammar definition in this is ignored.  The grammars in the normally specified grammar file are also included in the hierarchy.  Incomplete hierarchies results in an error message from ANTLR.   Grammars in the same file as P are given precendence to those obtained from other files.

The type of grammar (Lexer,Parser,TreeParser) is determined by the type of the highest grammar in the inheritance chain.

Error Messages

ANTLR generates a file called expandedT.g, given a grammar input file (not the -glib files) called T.g.  All error messages are relative to this as you really want to see the whole grammar when dealing with ambiguities etc...  In the future, we may have a better solution.

Version: $Id: //depot/code/org.antlr/release/antlr-2.7.5/doc/inheritance.html#1 $ nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/j-guru-blue.jpg000066400000000000000000000176261161462365500234170ustar00rootroot00000000000000JFIFHH Photoshop 3.08BIMHH8BIM x8BIM8BIM 8BIM' 8BIMH/fflff/ff2Z5-8BIMp8BIM@@8BIM 8BIM DFPB@(JFIFHH&File written by Adobe Photoshop 5.2Adobed            PF"?   3!1AQa"q2B#$Rb34rC%Scs5&DTdE£t6UeuF'Vfv7GWgw5!1AQaq"2B#R3$brCScs4%&5DTdEU6teuFVfv'7GWgw ?TI%"67VW, n(ή?Y.~[qXwmYn6|~gO!!sG,cdÈBG`I$6U;+[N`0\?9zVfGVg{Ϻ ;]zn1z>v C_ݹ.;}m*Wߐ+=@?غ ef--8heˈcMU6|cS4bTs9]M"buc}muV>sJl|zqm4424yǕ=H#RI$VP껄c<༓Lqu-I&n~bO]:NCi.Nyo"q]N̏k}GP=klgJMoJuo{sO2V|"XvXױyU{3:E6Ɲw"]}*^Աq;j ?Iv-3􎙑6m/ߦ~[]_-_⎼nnn-iwk}5[}]̦ה.8IC1WOq%>Mm'X׷oIf=MഒI/E5o=ޮRz.ۿqsw*3dXmamq;A+kX[7?|eSq.k'9lIO=Fnf7+aKҨO+|ֱ5[j$8BIM XICC_PROFILE HLinomntrRGB XYZ  1acspMSFTIEC sRGB-HP cprtP3desclwtptbkptrXYZgXYZ,bXYZ@dmndTpdmddvuedLview$lumimeas $tech0 rTRC< gTRC< bTRC< textCopyright (c) 1998 Hewlett-Packard CompanydescsRGB IEC61966-2.1sRGB IEC61966-2.1XYZ QXYZ XYZ o8XYZ bXYZ $descIEC http://www.iec.chIEC http://www.iec.chdesc.IEC 61966-2.1 Default RGB colour space - sRGB.IEC 61966-2.1 Default RGB colour space - sRGBdesc,Reference Viewing Condition in IEC61966-2.1,Reference Viewing Condition in IEC61966-2.1view_. \XYZ L VPWmeassig CRT curv #(-27;@EJOTY^chmrw| %+28>ELRY`gnu| &/8AKT]gqz !-8COZfr~ -;HUcq~ +:IXgw'7HYj{+=Oat 2FZn  % : O d y  ' = T j " 9 Q i  * C \ u & @ Z t .Id %A^z &Ca~1Om&Ed#Cc'Ij4Vx&IlAe@e Ek*Qw;c*R{Gp@j>i  A l !!H!u!!!"'"U"""# #8#f###$$M$|$$% %8%h%%%&'&W&&&''I'z''( (?(q(())8)k))**5*h**++6+i++,,9,n,,- -A-v--..L.../$/Z///050l0011J1112*2c223 3F3334+4e4455M555676r667$7`7788P8899B999:6:t::;-;k;;<' >`>>?!?a??@#@d@@A)AjAAB0BrBBC:C}CDDGDDEEUEEF"FgFFG5G{GHHKHHIIcIIJ7J}JK KSKKL*LrLMMJMMN%NnNOOIOOP'PqPQQPQQR1R|RSS_SSTBTTU(UuUVV\VVWDWWX/X}XYYiYZZVZZ[E[[\5\\]']x]^^l^__a_``W``aOaabIbbcCccd@dde=eef=ffg=ggh?hhiCiijHjjkOkklWlmm`mnnknooxop+ppq:qqrKrss]sttptu(uuv>vvwVwxxnxy*yyzFz{{c{|!||}A}~~b~#G k͂0WGrׇ;iΉ3dʋ0cʍ1fΏ6n֑?zM _ɖ4 uL$h՛BdҞ@iءG&vVǥ8nRĩ7u\ЭD-u`ֲK³8%yhYѹJº;.! zpg_XQKFAǿ=ȼ:ɹ8ʷ6˶5̵5͵6ζ7ϸ9к<Ѿ?DINU\dlvۀ܊ݖޢ)߯6DScs 2F[p(@Xr4Pm8Ww)Km&File written by Adobe Photoshop 5.2Adobed         ""   PF   s!1AQa"q2B#R3b$r%C4Scs5D'6Tdt& EFVU(eufv7GWgw8HXhx)9IYiy*:JZjzm!1AQa"q2#BRbr3$4CS%cs5DT &6E'dtU7()󄔤euFVfvGWgw8HXhx9IYiy*:JZjz ?ثU+if`R A n2"Omg2c|_5W%}s)+Cp0TU@W>)3Xq>(k1C cثV){{յ#On;[IUq^ldvz|r@\(3S 1F 2؛i 3<,ѷ1,=Xc桩(Tتvм1*N, "1Hno^/7P[u^iYVGrIĂSdN "`M #w/Tr\;_/nS1/M未Yll*ܦ.$lM쟣+s*lثAA4Y +53#N.N>c}п?*B{'VVhr㔜MN+NQ(JE2 VMz8ۃ^?e4捇^ 1zfypy&]S _SV5ҍwEX7?sjۛ|*9NT),ت~PX/;Kk/`\<7'VhY}^Ӯef0Ky}H} c>]Ln[5EB1WДyUu켻ura"v9_'"EOɊUl6Fos +H7z=XLU-oOJ}DoRTYXZSXefpp{||{ّ`YSLF@93-p&` P@0 g h k khmlnpio s"u"vj!m%r,y-{-z,r88@BCCDZ!,gs@ H*\ȰÇ CFAIS ?gc9cڰÈ"BX "K2s f?jM8Νy󑤖,B!.Fd͘9vHRرb!a& 4jiS-ƍ| }y֖yeS͑5hҀb"<ȐF%Hvf5Nf*j#I*9,d5[Wm Y/f58 }z5QA,-IF(,p h@[<Ѓ (b6`Æ8$`b b872Ό4,@iH&9S3OZC ,M$4Ad2)V e5e Ȝ An>И)P5^ BӰ)H@4 e$ 93=ez9(:7QF}jxN$'XtFyTIouA>IXQzEaNp„+EF  +@ϸ.TFWJ VWD;8ARekDo%VAG,qլc%%2ٕ쐃zH,c!LCŸ*2-0ϝjC7G@=cs$D t* F=!hA 60NlUM0{h^4mL-/052RXB r` $0@8ŏ^K$3I9 BP.B؈-#.2n2%i43%c;3i`*3 42nSJtJ% jGj.xz'~d5BY4,j203n?ăȜ ĀbG0.a-";d7t0iaBt(D!X= Cqn-sH,l+ ZH3|$0|8NQ28i ne E#r#:!Er#B"j#E)X  "^vQ_ Fvdeh*Z  /{! 8DBxLvPIJ SmD3s "RX]<_"^b\ EB?p2yH4yE:1 /p73)r"h"ߠ ih$}C9&t eTH:Y%gc 7@'bT67 س990jB}IG-aO:GӐ`-0  Аr 6/ M}Ԧ5h @0Ј$N< OP1* ;j i{PԦs@"1J(Dd<9M"6Z0!݁o`6lBp6ֶ6EX\7Ma#D94vYzEF,5f1Z9FSrc !(,fFwn$[&7!po0b{[w ND(H3 i8cHJ MpC$'bߓqO  /o A%-Qq:_=`H1{ Qq4bnHA i|-6 "o 5L)g,yS.B2/DSVK%i P` 9bPDzA %.ћy2 g)b{Q-bF0d!!ϛJz {WR$IJN⼐ZClkr]eaV'aDBޚ"$%L1@,hRYt~AAm-Zcխok\(׹um?k.UF6)qf<1 |48 = ~#ٖO҆+(t9RX*["@bk@ȑ_ˉPboARBbކM8:ZT 0+EB+@ڎIuqtDeR~mx ,*? 㫤yx&7eAIczd`㩞Z!cJ ju(9yaCbQ&qO")ף0R ;PO 9A%0 !ƣ *VeU&U*ä+# 8$C>zc>V =4I z;nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/lexer.html000066400000000000000000002253471161462365500225650ustar00rootroot00000000000000 Lexical Analysis with ANTLR

Lexical Analysis with ANTLR

A lexer (often called a scanner) breaks up an input stream of characters into vocabulary symbols for a parser, which applies a grammatical structure to that symbol stream. Because ANTLR employs the same recognition mechanism for lexing, parsing, and tree parsing, ANTLR-generated lexers are much stronger than DFA-based lexers such as those generated by DLG (from PCCTS 1.33) and lex.

The increase in lexing power comes at the cost of some inconvenience in lexer specification and indeed requires a serious shift your thoughts about lexical analysis. See a comparison of LL(k) and DFA-based lexical analysis.

ANTLR generates predicated-LL(k) lexers, which means that you can have semantic and syntactic predicates and use k>1 lookahead. The other advantages are:

  • You can actually read and debug the output as its very similar to what you would build by hand.
  • The syntax for specifying lexical structure is the same for lexers, parsers, and tree parsers.
  • You can have actions executed during the recognition of a single token.
  • You can recognize complicated tokens such as HTML tags or "executable" comments like the javadoc @-tags inside /** ... */ comments. The lexer has a stack, unlike a DFA, so you can match nested structures such as nested comments.

The overall structure of a lexer is:

class MyLexer extends Lexer;
options {
  some options
}
{
  lexer class members
}
lexical rules

Lexical Rules

Rules defined within a lexer grammar must have a name beginning with an uppercase letter. These rules implicitly match characters on the input stream instead of tokens on the token stream. Referenced grammar elements include token references (implicit lexer rule references), characters, and strings. Lexer rules are processed in the exact same manner as parser rules and, hence, may specify arguments and return values; further, lexer rules can also have local variables and use recursion. The following rule defines a rule called ID that is available as a token type in the parser.

ID : ( 'a'..'z' )+
   ;

This rule would become part of the resulting lexer and would appear as a method called mID() that looks sort of like this:

    public final void mID(...)
        throws RecognitionException,
               CharStreamException, TokenStreamException
    {
        ...
        _loop3:
        do {
            if (((LA(1) >= 'a' && LA(1) <= 'z'))) {
                matchRange('a','z');
            }
        } while (...);
        ...
    }

It is a good idea to become familiar with ANTLR's output--the generated lexers are human-readable and make a lot of concepts more transparent.

Skipping characters

To have the characters matched by a rule ignored, set the token type to Token.SKIP. For example,

WS : ( ' ' | '\t' | '\n' { newline(); } | '\r' )+
     { $setType(Token.SKIP); }
   ;
Skipped tokens force the lexer to reset and try for another token. Skipped tokens are never sent back to the parser.

Distinguishing between lexer rules

As with most lexer generators like lex, you simply list a set of lexical rules that match tokens. The tool then automatically generates code to map the next input character(s) to a rule likely to match. Because ANTLR generates recursive-descent lexers just like it does for parsers and tree parsers, ANTLR automatically generates a method for a fictitious rule called nextToken that predicts which of your lexer rules will match upon seeing the character lookahead. You can think of this method as just a big "switch" that routes recognition flow to the appropriate rule (the code may be much more complicated than a simple switch-statement, however). Method nextToken is the only method of TokenStream (in Java):

public interface TokenStream {
    public Token nextToken() throws TokenStreamException;
}
A parser feeds off a lookahead buffer and the buffer pulls from any TokenStream. Consider the following two ANTLR lexer rules:
INT : ('0'..'9')+;
WS : ' ' | '\t' | '\r' | '\n';

You will see something like the following method in lexer generated by ANTLR:

public Token nextToken() throws TokenStreamException {
    ...
    for (;;) {
        Token _token = null;
        int _ttype = Token.INVALID_TYPE;
        resetText();
        ...
        switch (LA(1)) {
          case '0': case '1': case '2': case '3':
          case '4': case '5': case '6': case '7':
          case '8': case '9':
            mINT(); break;
          case '\t': case '\n': case '\r': case ' ':
            mWS(); break;
          default: // error
        }
        ...
    }
}

What happens when the same character predicts more than a single lexical rule? ANTLR generates an nondeterminism warning between the offending rules, indicating you need to make sure your rules do not have common left-prefixes. ANTLR does not follow the common lexer rule of "first definition wins" (the alternatives within a rule, however, still follow this rule). Instead, sufficient power is given to handle the two most common cases of ambiguity, namely "keywords vs. identifiers", and "common prefixes"; and for especially nasty cases you can use syntactic or semantic predicates.

What if you want to break up the definition of a complicated rule into multiple rules? Surely you don't want every rule to result in a complete Token object in this case. Some rules are only around to help other rules construct tokens. To distinguish these "helper" rules from rules that result in tokens, use the protected modifier. This overloading of the access-visibility Java term occurs because if the rule is not visible, it cannot be "seen" by the parser (yes, this nomeclature sucks). See also What is a "protected" lexer rule.

Another, more practical, way to look at this is to note that only non-protected rules get called by nextToken and, hence, only non-protected rules can generate tokens that get shoved down the TokenStream pipe to the parser.

Return values

All rules return a token object (conceptually) automatically, which contains the text matched for the rule and its token type at least.  To specify a user-defined return value, define a return value and set it in an action:

protected
INT returns [int v]
    :   (‘0’..’9’)+ { v=Integer.valueOf($getText); }
    ;

Note that only protected rules can have a return type since regular lexer rules generally are invoked by nextToken() and the parser cannot access the return value, leading to confusion.

Predicated-LL(k) Lexing

Lexer rules allow your parser to match context-free structures on the input character stream as opposed to the much weaker regular structures (using a DFA--deterministic finite automaton). For example, consider that matching nested curly braces with a DFA must be done using a counter whereas nested curlies are trivially matched with a context-free grammar:

ACTION
    :   '{' ( ACTION | ~'}' )* '}'
    ;    

The recursion from rule ACTION to ACTION, of course, is the dead giveaway that this is not an ordinary lexer rule.

Because the same algorithms are used to analyze lexer and parser rules, lexer rules may use more than a single symbol of lookahead, can use semantic predicates, and can specify syntactic predicates to look arbitrarily ahead, thus, providing recognition capabilities beyond the LL(k) languages into the context-sensitive. Here is a simple example that requires k>1 lookahead:

ESCAPE_CHAR
    :   '\\' 't' // two char of lookahead needed,
    |   '\\' 'n' // due to common left-prefix
    ;    

To illustrate the use of syntactic predicates for lexer rules, consider the problem of distinguishing between floating point numbers and ranges in Pascal. Input 3..4 must be broken up into 3 tokens: INT, RANGE, followed by INT. Input 3.4, on the other hand, must be sent to the parser as a REAL. The trouble is that the series of digits before the first '.' can be arbitrarily long. The scanner then must consume the first '.' to see if the next character is a '.', which would imply that it must back up and consider the first series of digits an integer. Using a non-backtracking lexer makes this task very difficult; without bracktracking, your lexer has to be able to respond with more than a single token at one time. However, a syntactic predicate can be used to specify what arbitrary lookahead is necessary:

class Pascal extends Parser;

prog:   INT
        (   RANGE INT
            { System.out.println("INT .. INT"); }
        |   EOF
            { System.out.println("plain old INT"); }
        )
    |   REAL { System.out.println("token REAL"); }
    ;

class LexPascal extends Lexer;

WS  :   (' '
    |   '\t'
    |   '\n'
    |   '\r')+
        { $setType(Token.SKIP); }
    ;

protected
INT :   ('0'..'9')+
    ;

protected
REAL:   INT '.' INT
    ;

RANGE
    :   ".."
    ;

RANGE_OR_INT
    :   ( INT ".." ) => INT  { $setType(INT); }
    |   ( INT '.' )  => REAL { $setType(REAL); }
    |   INT                  { $setType(INT); }
    ;    

ANTLR lexer rules are even able to handle FORTRAN assignments and other difficult lexical constructs. Consider the following DO loop:

DO 100 I = 1,10

If the comma were replaced with a period, the loop would become an assignment to a weird variable called "DO100I":

DO 100 I = 1.10

The following rules correctly differentiate the two cases:

DO_OR_VAR
    :   (DO_HEADER)=> "DO" { $setType(DO); }
    |   VARIABLE { $setType(VARIABLE); }
    ;

protected
DO_HEADER
options { ignore=WS; }
    :   "DO" INT VARIABLE '=' EXPR ','
    ;

protected INT : ('0'..'9')+;

protected WS : ' ';

protected
VARIABLE
    :   'A'..'Z'
        ('A'..'Z' | ' ' | '0'..'9')*
        { /* strip space from end */ }
    ;

// just an int or float
protected EXPR
    :   INT ( '.' (INT)? )?
    ;

The previous examples discuss differentiating lexical rules via lots of lookahead (fixed k or arbitrary). There are other situations where you have to turn on and off certain lexical rules (making certain tokens valid and invalid) depending on prior context or semantic information. One of the best examples is matching a token only if it starts on the left edge of a line (i.e., column 1). Without being able to test the state of the lexer's column counter, you cannot do a decent job. Here is a simple DEFINE rule that is only matched if the semantic predicate is true.

DEFINE
    :   {getColumn()==1}? "#define" ID
    ;

Semantic predicates on the left-edge of single-alternative lexical rules get hoisted into the nextToken prediction mechanism. Adding the predicate to a rule makes it so that it is not a candidate for recognition until the predicate evaluates to true. In this case, the method for DEFINE would never be entered, even if the lookahead predicted #define, if the column > 1.

Another useful example involves context-sensitive recognition such as when you want to match a token only if your lexer is in a particular context (e.g., the lexer previously matched some trigger sequence). If you are matching tokens that separate rows of data such as "----", you probably only want to match this if the "begin table" sequence has been found.

BEGIN_TABLE
    :   '[' {this.inTable=true;} // enter table context
    ;

ROW_SEP
    :   {this.inTable}? "----"
    ;

END_TABLE
    :   ']' {this.inTable=false;} // exit table context
    ;
This predicate hoisting ability is another way to simulate lexical states from DFA-based lexer generators like lex, though predicates are much more powerful. (You could even turn on certain rules according to the phase of the moon). ;)

Keywords and literals

Many languages have a general "identifier" lexical rule, and keywords that are special cases of the identifier pattern. A typical identifier token is defined as:

ID : LETTER (LETTER | DIGIT)*;

This is often in conflict with keywords. ANTLR solves this problem by letting you put fixed keywords into a literals table. The literals table (which is usally implemented as a hash table in the lexer) is checked after each token is matched, so that the literals effectively override the more general identifier pattern. Literals are created in one of two ways. First, any double-quoted string used in a parser is automatically entered into the literals table of the associated lexer. Second, literals may be specified in the lexer grammar by means of the literal option. In addition, the testLiterals option gives you fine-grained control over the generation of literal-testing code.

Common prefixes

Fixed-length common prefixes in lexer rules are best handled by increasing the lookahead depth of the lexer. For example, some operators from Java:

class MyLexer extends Lexer;
options {
  k=4;
}
GT : ">";
GE : ">=";
RSHIFT : ">>";
RSHIFT_ASSIGN : ">>=";
UNSIGNED_RSHIFT : ">>>";
UNSIGNED_RSHIFT_ASSIGN : ">>>=";

Token definition files

Token definitions can be transferred from one grammar to another by way of token definition files. This is accomplished using the importVocab and exportVocab options.

Character classes

Use the ~ operator to invert a character or set of characters.  For example, to match any character other than newline, the following rule references ~'\n'.

SL_COMMENT: "//" (~'\n')* '\n';

The ~ operator also inverts a character set:

NOT_WS: ~(' ' | '\t' | '\n' | '\r');

The range operator can be used to create sequential character sets:

DIGIT : '0'..'9' ;

Token Attributes

See the next section.

Lexical lookahead and the end-of-token symbol

A unique situation occurs when analyzing lexical grammars, one which is similar to the end-of-file condition when analyzing regular grammars.  Consider how you would compute lookahead sets for the ('b' | ) subrule in following rule B:

class L extends Lexer;

A	:	B 'b'
	;

protected  // only called from another lex rule
B	:	'x' ('b' | )
	;

The lookahead for the first alternative of the subrule is clearly 'b'.  The second alternative is empty and the lookahead set is the set of all characters that can follow references to the subrule, which is the follow set for rule B.  In this case, the 'b' character follows the reference to B and is therefore the lookahead set for the empty alt indirectly.  Because 'b' begins both alternatives, the parsing decision for the subrule is nondeterminism or ambiguous as we sometimes say.  ANTLR will justly generate a warning for this subrule (unless you use the warnWhenFollowAmbig option).

Now, consider what would make sense for the lookahead if rule A did not exist and rule B was not protected (it was a complete token rather than a "subtoken"):

B	:	'x' ('b' | )
	;

In this case, the empty alternative finds only the end of the rule as the lookahead with no other rules referencing it.  In the worst case, any character could follow this rule (i.e., start the next token or error sequence).  So, should not the lookahead for the empty alternative be the entire character vocabulary?   And should not this result in a nondeterminism warning as it must conflict with the 'b' alternative?  Conceptually, yes to both questions.  From a practical standpoint, however, you are clearly saying "heh, match a 'b' on the end of token B if you find one."  I argue that no warning should be generated and ANTLR's policy of matching elements as soon as possible makes sense here as well.

Another reason not to represent the lookahead as the entire vocabulary is that a vocabulary of '\u0000'..'\uFFFF' is really big (one set is 2^16 / 32 long words of memory!).  Any alternative with '<end-of-token>' in its lookahead set will be pushed to the ELSE or DEFAULT clause by the code generator so that huge bitsets can be avoided.

The summary is that lookahead purely derived from hitting the end of a lexical rule (unreferenced by other rules) cannot be the cause of a nondeterminism.  The following table summarizes a bunch of cases that will help you figure out when ANTLR will complain and when it will not.

X	:	'q' ('a')? ('a')?
        ;
The first subrule is nondeterministic as 'a' from second subrule (and end-of-token) are in the lookahead for exit branch of (...)?
X	:	'q' ('a')? ('c')?
        ;
No nondeterminism.
Y	:    'y' X 'b'
	;

protected
X	:    'b'
	|
	;
Nondeterminism in rule X.
X	:	'x' ('a'|'c'|'d')+
	|	'z' ('a')+
	;
No nondeterminism as exit branch of loops see lookahead computed purely from end-of-token.
Y	:	'y' ('a')+ ('a')?
	;
Nondeterminism between 'a' of (...)+ and exit branch as the exit can see the 'a' of the optional subrule.  This would be a problem even if ('a')? were simply 'a'.  A (...)* loop would report the same problem.
X	:	'y' ('a' 'b')+ 'a' 'c'
	;
At k=1, this is a nondeterminism for the (...)? since 'a' predicts staying in and exiting the loop.  At k=2, no nondeterminism.
Q	:	'q' ('a' | )?
	;
Here, there is an empty alternative inside an optional subrule.  A nondeterminism is reported as two paths predict end-of-token.

You might be wondering why the first subrule below is ambiguous:

('a')? ('a')?

The answer is that the NFA to DFA conversion would result in a DFA with the 'a' transitions merged into a single state transition!  This is ok for a DFA where you cannot have actions anywhere except after a complete match.  Remember that ANTLR lets you do the following:

('a' {do-this})? ('a' {do-that})?

One other thing is important to know.  Recall that alternatives in lexical rules are reordered according to their lookahead requirements, from highest to lowest.

A	:	'a'
	|	'a' 'b'
	;

At k=2, ANTLR can see 'a' followed by '<end-of-token>' for the first alternative and 'a' followed by 'b' in the second.  The lookahead at depth 2 for the first alternative being '<end-of-token>' suppressing a warning that depth two can match any character for the first alternative.  To behave naturally and to generate good code when no warning is generated, ANTLR reorders the alternatives so that the code generated is similar to:

A() {
	if ( LA(1)=='a' && LA(2)=='b' ) { // alt 2
		match('a'); match('b');
	}
	else if ( LA(1)=='a' ) { // alt 1
		match('a')
	}
	else {error;}
}

Note the lack of lookahead test for depth 2 for alternative 1.  When an empty alternative is present, ANTLR moves it to the end.  For example,

A	:	'a'
	|
	|	'a' 'b'
	;

results in code like this:

A() {
	if ( LA(1)=='a' && LA(2)=='b' ) { // alt 2
		match('a'); match('b');
	}
	else if ( LA(1)=='a' ) { // alt 1
		match('a')
	}
	else {
	}
}

Note that there is no way for a lexing error to occur here (which makes sense because the rule is optional--though this rule only makes sense when protected).

Semantic predicates get moved along with their associated alternatives when the alternatives are sorted by lookahead depth.  It would be weird if the addition of a {true}? predicate (which implicitly exists for each alternative) changed what the lexer recognized!  The following rule is reorder so that alternative 2 is tested for first.

B	:	{true}? 'a'
	|	'a' 'b'
	;

Syntactic predicates are not reordered.  Mentioning the predicate after the rule it conflicts with results in an ambiguity such as is in this rule:

F	:	'c'
	|	('c')=> 'c'
	;

Other alternatives are, however, reordered with respect to the syntactic predicates even when a switch is generated for the LL(1) components and the syntactic predicates are pushed the default case.  The following rule illustrates the point.

F	:	'b'
	|	{/* empty-path */}
	|	('c')=> 'c'
	|	'c'
	|	'd'
	|	'e'
	;

Rule F's decision is generated as follows:

        switch ( la_1) {
        case 'b':
        {
            match('b');
            break;
        }
        case 'd':
        {
            match('d');
            break;
        }
        case 'e':
        {
            match('e');
            break;
        }
        default:
            boolean synPredMatched15 = false;
            if (((la_1=='c'))) {
                int _m15 = mark();
                synPredMatched15 = true;
                guessing++;
                try {
                    match('c');
                }
                catch (RecognitionException pe) {
                    synPredMatched15 = false;
                }
                rewind(_m15);
                guessing--;
            }
            if ( synPredMatched15 ) {
                match('c');
            }
            else if ((la_1=='c')) {
                match('c');
            }
            else {
                if ( guessing==0 ) {
                    /* empty-path */
                }
            }
        }

Notice how the empty path got moved after the test for the 'c' alternative.

Scanning Binary Files

Character literals are not limited to printable ASCII characters.  To demonstrate the concept, imagine that you want to parse a binary file that contains strings and short integers.  To distinguish between them, marker bytes are used according to the following format:

format description
'\0' highbyte lowbyte Short integer
'\1' string of non-'\2' chars '\2' String

Sample input (274 followed by "a test") might look like the following in hex (output from UNIX od -h command):

0000000000    00 01 12 01 61 20 74 65 73 74 02 

or as viewed as characters:

0000000000    \0 001 022 001 a      t  e  s  t 002

The parser is trivially just a (...)+ around the two types of input tokens:

class DataParser extends Parser;

file:   (   sh:SHORT
            {System.out.println(sh.getText());}
        |   st:STRING
            {System.out.println("\""+
               st.getText()+"\"");}
        )+
    ;

All of the interesting stuff happens in the lexer.  First, define the class and set the vocabulary to be all 8 bit binary values:

class DataLexer extends Lexer;
options {
    charVocabulary = '\u0000'..'\u00FF';
}

Then, define the two tokens according to the specifications, with markers around the string and a single marker byte in front of the short:

SHORT
    :   // match the marker followed by any 2 bytes
        '\0' high:. lo:.
        {
        // pack the bytes into a two-byte short
        int v = (((int)high)<<8) + lo;
        // make a string out of the value
        $setText(""+v);
        }
    ;

STRING
    :   '\1'!   // begin string (discard)
        ( ~'\2' )*
        '\2'!   // end string (discard)
    ;

To invoke the parser, use something like the following:

import java.io.*;

class Main {
    public static void main(String[] args) {
        try {
            // use DataInputStream to grab bytes
            DataLexer lexer =
              new DataLexer(
                new DataInputStream(System.in)
              );
            DataParser parser =
                new DataParser(lexer);
            parser.file();
        } catch(Exception e) {
            System.err.println("exception: "+e);
        }
    }
}

Scanning Unicode Characters

ANTLR (as of 2.7.1) allows you to recognize input composed of Unicode characters; that is, you are not restricted to 8 bit ASCII characters.  I would like to emphasize that ANTLR allows, but does yet not support Unicode as there is more work to be done.  For example, end-of-file is currently incorrectly specified:

CharScanner.EOF_CHAR=(char)-1;

This must be an integer -1 not char, which is actually narrowed to 0xFFFF via the cast.   I have to go throught the entire code base looking for these problems.  Plus, we should really have a special syntax to mean "java identifier character" and some standard encodings for non-Western character sets etc... I expect 2.7.3 to add nice predefined character blocks like LETTER.

The following is a very simple example of how to match a series of space-separated identifiers.

class L extends Lexer;

options {
    // Allow any char but \uFFFF (16 bit -1)
    charVocabulary='\u0000'..'\uFFFE';
}

{
    private static boolean done = false;

    public void uponEOF()
        throws TokenStreamException, CharStreamException
    {
        done=true;
    }
    
    public static void main(String[] args) throws Exception {
        L lexer = new L(System.in);
        while ( !done ) {
            Token t = lexer.nextToken();
            System.out.println("Token: "+t);
        }
    }
}

ID    :    ID_START_LETTER ( ID_LETTER )*
    ;

WS    :    (' '|'\n') {$setType(Token.SKIP);}
    ;

protected
ID_START_LETTER
    :    '$'
    |    '_'
    |    'a'..'z'
    |    '\u0080'..'\ufffe'
    ;

protected
ID_LETTER
    :    ID_START_LETTER
    |    '0'..'9'
    ;

A final note on Unicode.  The ~x "not" operator includes everything in your specified vocabulary (up to 16 bit character space) except x.   For example,

~('$'|'a'..'z')

results in every unicode character except '$' and lowercase latin-1 letters, assuming your charVocabulary is 0..FFFF.

Manipulating Token Text and Objects

Once you have specified what to match in a lexical rule, you may ask "what can I discover about what will be matched for each rule element?"  ANTLR allows you to label the various elements and, at parse-time, access the text matched for the element.   You can even specify the token object to return from the rule and, hence, from the lexer to the parser.  This section describes the text and token object handling characteristics of ANTLR.

Manipulating the Text of a Lexical Rule

There are times when you want to look at the text matched for the current rule, alter it, or set the text of a rule to a new string.  The most common case is when you want to simply discard the text associated with a few of the elements that are matched for a rule such as quotes.

ANTLR provides the '!' operator that lets you indicate certain elements should not contribute to the text for a token being recognized. The '!' operator is used just like when building trees in the parser. For example, if you are matching the HTML tags and you do not want the '<' and '>' characters returned as part of the token text, you could manually remove them from the token's text before they are returned, but a better way is to suffix the unwanted characters with '!'. For example, the <br> tag might be recognized as follows:

BR  :  '<'! "br" '>'! ;	// discard < and >

Suffixing a lexical rule reference with '!' forces the text matched by the invoked rule to be discarded (it will not appear in the text for the invoking rule).  For example, if you do not care about the mantissa of a floating point number, you can suffix the rule that matches it with a '!':

FLOAT : INT ('.'! INT!)? ; // keep only first INT

As a shorthand notation, you may suffix an alternative or rule with '!' to indicate the alternative or rule should not pass any text back to the invoking rule or parser (if nonprotected):

// ! on rule: nothing is auto added to text of rule.
rule! : ... ;

// ! on alt: nothing is auto added to text for alt
rule : ... |! ...;
Item suffixed with '!' Effect
char or string literal Do not add text for this atom to current rule's text.
rule reference Do not add text for matched while recognizing this rule to current rule's text.
alternative Nothing that is matched by alternative is added to current rule's text; the enclosing rule contributes nothing to any invoking rule's text.  For nonprotected rules, the text for the token returned to parser is blank.
rule definition Nothing that is matched by any alternative is added to current rule's text; the rule contributes nothing to any invoking rule's text.  For nonprotected rules, the text for the token returned to parser is blank.

While the '!' implies that the text is not added to the text for the current rule, you can label an element to access the text (via the token if the element is a rule reference).

In terms of implementation, the characters are always added to the current text buffer, but are carved out when necessary (as this will be the exception rather than the rule, making the normal case efficient).

The '!' operator is great for discarding certain characters or groups of characters, but what about the case where you want to insert characters or totally reset the text for a rule or token?  ANTLR provides a series of special methods to do this (we prefix the methods with '$' because Java does not have a macro facility and ANTLR must recognize the special methods in your actions).  The following table summarizes.

Method Description/Translation
$append(x) Append x to the text of the surrounding rule.  Translation: text.append(x)
$setText(x) Set the text of the surrounding rule to x.  Translation: text.setLength(_begin); text.append(x)
$getText Return a String of the text for the surrounding rule.  Translation;
new String(text.getBuffer(),
_begin,text.length()-_begin)
$setToken(x) Set the token object that this rule is to return.  See the section on Token Object Creation. Translation: _token = x
$setType(x) Set the token type of the surrounding rule.  Translation: _ttype = x
setText(x) Set the text for the entire token being recognized regardless of what rule the action is in. No translation.
getText() Get the text for the entire token being recognized regardless of what rule the action is in. No translation.

One of the great things about an ANTLR generated lexer is that the text of a token can be modified incrementally as the token is recognized (an impossible task for a DFA-based lexer):

STRING: '"' ( ESCAPE | ~('"'|'\\') )* '"' ;

protected
ESCAPE
    :    '\\'
         ( 'n' { $setText("\n"); }
         | 'r' { $setText("\r"); }
         | 't' { $setText("\t"); }
         | '"' { $setText("\""); }
         )
    ;

Token Object Creation

Because lexical rules can call other rules just like in the parser, you sometimes want to know what text was matched for that portion of the token being matched. To support this, ANTLR allows you to label lexical rules and obtain a Token object representing the text, token type, line number, etc... matched for that rule reference.   This ability corresponds to be able to access the text matched for a lexical state in a DFA-based lexer.  For example, here is a simple rule that prints out the text matched for a rule reference, INT.

INDEX	:	'[' i:INT ']'
		{System.out.println(i.getText());}
	;
INT	:	('0'..'9')+ ;

If you moved the labeled reference and action to a parser, it would the same thing (match an integer and print it out).

All lexical rules conceptually return a Token object, but in practice this would be inefficient. ANTLR generates methods so that a token object is created only if any invoking reference is labeled (indicating they want the token object).  Imagine another rule that calls INT without a label.

FLOAT	:	INT ('.' INT)? ;

In this case, no token object is created for either reference to INT.  You will notice a boolean argument to every lexical rule that tells it whether or not a token object should be created and returned (via a member variable).  All nonprotected rules (those that are "exposed" to the parser) must always generate tokens, which are passed back to the parser.

Heterogeneous Token Object Streams

While token creation is normally handled automatically, you can also manually specify the token object to be returned from a lexical rule. The advantage is that you can pass heterogeneous token objects back to the parser, which is extremely useful for parsing languagues with complicated tokens such as HTML (the <img> and <table> tokens, for example, can have lots of attributes).  Here is a rule for the <img> tag that returns a token object of type ImageToken:

IMAGE
{
  Attributes attrs;
}
  :  "<img " attrs=ATTRIBUTES '>'
     {
     ImageToken t = new ImageToken(IMAGE,$getText);
     t.setAttributes(attrs);
     $setToken(t);
     }
  ;
ATTRIBUTES returns [Attributes a]
  :  ...
  ;

The $setToken function specifies that its argument is to be returned when the rule exits.  The parser will receive this specific object instead of a CommonToken or whatever else you may have specified with the Lexer.setTokenObjectClass method.   The action in rule IMAGE references a token type, IMAGE, and a lexical rule references, ATTRIBUTES, which matches all of the attributes of an image tag and returns them in a data structure called Attributes.

What would it mean for rule IMAGE to be protected (i.e., referenced only from other lexical rules rather than from nextToken)?   Any invoking labeled rule reference would receive the object (not the parser) and could examine it, or manipulate it, or pass it on to the invoker of that rule.  For example, if IMAGE were called from TAGS rather than being nonprotected, rule TAGS would have to pass the token object back to the parser for it.

TAGS : IMG:IMAGE
       {$setToken(img);} // pass to parser
     | PARAGRAPH // probably has no special token
     | ...
     ;

Setting the token object for a nonprotected rule invoked without a label has no effect other than to waste time creating an object that will not be used.

We use a CharScanner member _returnToken to do the return in order to not conflict with return values used by the grammar developer. For example,

PTAG: "<p>" {$setToken(new ParagraphToken($$));} ; 

which would be translated to something like:

protected final void mPTAG()
  throws RecognitionException, CharStreamException,
         TokenStreamException {
    Token _token = null;
    match("<p>");
    _returnToken =
      new ParagraphToken(text-of-current-rule);
}

Filtering Input Streams

You often want to perform an action upon seeing a pattern or two in a complicated input stream, such as pulling out links in an HTML file.  One solution is to take the HTML grammar and just put actions where you want.  Using a complete grammar is overkill and you may not have a complete grammar to start with.

ANTLR provides a mechanism similar to AWK that lets you say "here are the patterns I'm interested in--ignore everything else."  Naturally, AWK is limited to regular expressions whereas ANTLR accepts context-free grammars (Uber-AWK?).  For example, consider pulling out the <p> and <br> tags from an arbitrary HTML file.  Using the filter option, this is easy:

class T extends Lexer;
options {
    k=2;
    filter=true;
}

P : "<p>" ;
BR: "<br>" ;

In this "mode", there is no possibility of a syntax error.  Either the pattern is matched exactly or it is filtered out.

This works very well for many cases, but is not sophisticated enough to handle the situation where you want "almost matches" to be reported as errors.   Consider the addition of the <table...> tag to the previous grammar:

class T extends Lexer; 
options { 
    k=2; 
    filter = true; 
} 

P : "<p>" ; 
BR: "<br>" ; 
TABLE : "<table" (WS)? (ATTRIBUTE)* (WS)? '>' ; 
WS : ' ' | '\t' | '\n' ; 
ATTRIBUTE : ... ;

Now, consider input "<table 8 = width ;>" (a bogus table definition). As is, the lexer would simply scarf past this input without "noticing" the invalid table. What if you want to indicate that a bad table definition was found as opposed to ignoring it?  Call method

setCommitToPath(boolean commit)

in your TABLE rule to indicate that you want the lexer to commit to recognizing the table tag:

TABLE
    :   "<table" (WS)?
        {setCommitToPath(true);}
        (ATTRIBUTE)* (WS)? '>'
    ;

Input "<table 8 = width ;>" would result in a syntax error.  Note the placement after the whitespace recognition; you do not want <tabletop> reported as a bad table (you want to ignore it).

One further complication in filtering: What if the "skip language" (the stuff in between valid tokens or tokens of interest) cannot be correctly handled by simply consuming a character and trying again for a valid token?  You may want to ignore comments or strings or whatever.  In that case, you can specify a rule that scarfs anything between tokens of interest by using option filter=RULE.   For example, the grammar below filters for <p> and <br> tags as before, but also prints out any other tag (<...>) encountered.

class T extends Lexer;
options {
    k=2;
    filter=IGNORE;
    charVocabulary = '\3'..'\177';
}

P : "<p>" ;
BR: "<br>" ;

protected
IGNORE
    :   '<' (~'>')* '>'
        {System.out.println("bad tag:"+$getText);}
    |   ( "\r\n" | '\r' | '\n' ) {newline();}
    |   .
    ;

Notice that the filter rule must track newlines in the general case where the lexer might emit error messages so that the line number is not stuck at 0.

The filter rule is invoked either when the lookahead (in nextToken) predicts none of the nonprotected lexical rules or when one of those rules fails.  In the latter case, the input is rolled back before attempting the filter rule.  Option filter=true is like having a filter rule such as:

IGNORE : . ;

Actions in regular lexical rules are executed even if the rule fails and the filter rule is called.  To do otherwise would require every valid token to be matched twice (once to match and once to do the actions like a syntactic predicate)! Plus, there are few actions in lexer rules (usually they are at the end at which point an error cannot occur).

Is the filter rule called when commit-to-path is true and an error is found in a lexer rule? No, an error is reported as with filter=true.

What happens if there is a syntax error in the filter rule?  Well, you can either put an exception handler on the filter rule or accept the default behavior, which is to consume a character and begin looking for another valid token.

In summary, the filter option allows you to:

  1. Filter like awk (only perfect matches reported--no such thing as syntax error)
  2. Filter like awk + catch poorly-formed matches (that is, "almost matches" like <table 8=3;> result in an error)
  3. Filter but specify the skip language

ANTLR Masquerading as SED

To make ANTLR generate lexers that behave like the UNIX utility sed (copy standard in to standard out except as specified by the replace patterns), use a filter rule that does the input to output copying:

class T extends Lexer;
options {
  k=2;
  filter=IGNORE;
  charVocabulary = '\3'..'\177';
}
P  : "<p>" {System.out.print("<P>");};
BR : "<br>" {System.out.print("<BR>");};
protected
IGNORE
  :  ( "\r\n" | '\r' | '\n' )
     {newline(); System.out.println("");}
  |  c:. {System.out.print(c);}
  ;

This example dumps anything other than <p> and <br> tags to standard out and pushes lowercase <p> and <br> to uppercase. Works great.

Nongreedy Subrules

Quick:  What does the following match?

BLOCK : '{' (.)* '}';

Your first reaction is that it matches any set of characters inside of curly quotes.   In reality, it matches '{' followed by every single character left on the input stream!  Why?  Well, because ANTLR loops are greedy--they consume as much input as they can match.  Since the wildcard matches any character, it consumes the '}' and beyond.  This is a pain for matching strings, comments and so on.

Why can't we switch it around so that it consumes only until it sees something on the input stream that matches what follows the loop, such as the '}'?   That is, why can't we make loops nongreedy?  The answer is we can, but sometimes you want greedy and sometimes you want nongreedy (PERL has both kinds of closure loops now too).  Unfortunately, parsers usually want greedy and lexers usually want nongreedy loops.  Rather than make the same syntax behave differently in the various situations, Terence decided to leave the semantics of loops as they are (greedy) and make a subrule option to make loops nongreedy.

Greedy Subrules

I have yet to see a case when building a parser grammar where I did not want a subrule to match as much input as possible.  For example, the solution to the classic if-then-else clause ambiguity is to match the "else" as soon as possible:

stat : "if" expr "then" stat ("else" stat)?
     | ...
     ;

This ambiguity (which statement should the "else" be attached to) results in a parser nondeterminism.  ANTLR warns you about the (...)? subrule as follows:

warning: line 3: nondeterminism upon
        k==1:"else"
        between alts 1 and 2 of block

If, on the other hand, you make it clear to ANTLR that you want the subrule to match greedily (i.e., assume the default behavior), ANTLR will not generate the warning.   Use the greedy subrule option to tell ANTLR what you want:

stat : "if" expr "then" stat
       ( options {greedy=true;} : "else" stat)?
     | ID
     ;

You are not altering the behavior really, since ANTLR was going to choose to match the "else" anyway, but you have avoided a warning message.

There is no such thing as a nongreedy (...)? subrule because telling an optional subrule not to match anything is the same as not specifying the subrule in the first place.  If you make the subrule nongreedy, you will see:

warning in greedy.g: line(4),
        Being nongreedy only makes sense
        for (...)+ and (...)*
warning: line 4: nondeterminism upon
        k==1:"else"
        between alts 1 and 2 of block

Greedy subrules are very useful in the lexer also.  If you want to grab any whitespace on the end of a token definition, you can try (WS)? for some whitespace rule WS:

ID : ('a'..'z')+ (WS)? ;

However, if you want to match ID in a loop in another rule that could also match whitespace, you will run into a nondeterminism warning.  Here is a contrived loop that conflicts with the (WS)? in ID:

LOOP : (  ID
       |  WS
       )+
     ;

The whitespace on the end of the ID could be matched in ID or in LOOP now.  ANTLR chooses to match the WS immediately, in ID.  To shut off the warning, simply tell ANTLR that you mean for it do be greedy, it's default behavior:

ID : ('a'..'z')+ (options {greedy=true;}:WS)? ;

Nongreedy Lexer Subrules

ANTLR's default behavior of matching as much as possible in loops and optional subrules is sometimes not what you want in lexer grammars.  Most loops that match "a bunch of characters" in between markers, like curly braces or quotes, should be nongreedy loops.  For example, to match a nonnested block of characters between curly braces, you want to say:

CURLY_BLOCK_SCARF
    :   '{' (.)* '}'
    ;

Unfortunately, this does not work--it will consume everything after the '{' until the end of the input.  The wildcard matches anything including '}' and so the loop merrily consumes past the ending curly brace.

To force ANTLR to break out of the loop when it sees a lookahead sequence consistent with what follows the loop, use the greedy subrule option:

CURLY_BLOCK_SCARF
    :   '{'
        (
            options {
                greedy=false;
            }
        :   .
        )*
        '}'
    ;

To properly take care of newlines inside the block, you should really use the following version that "traps" newlines and bumps up the line counter:

CURLY_BLOCK_SCARF
    :   '{'
        (
            options {
                greedy=false;
            }
        :   '\r' ('\n')? {newline();}
        |   '\n'         {newline();}
        |   .
        )*
        '}'
    ;

Limitations of Nongreedy Subrules

What happens when what follows a nongreedy subrule is not as simple as a single "marker" character like a right curly brace (i.e., what about when you need k>1 to break out of a loop)?  ANTLR will either "do the right thing" or warn you that it might not.

First, consider the matching C comments:

CMT : "/*" (.)* "*/" ;

As with the curly brace matching, this rule will not stop at the end marker because the wildcard matches the "*/" end marker as well.  You must tell ANTLR to make the loop nongreedy:

CMT : "/*" (options {greedy=false;} :.)* "*/" ;

You will not get an error and ANTLR will generate an exit branch

do {
    // nongreedy exit test
    if ((LA(1)=='*')) break _loop3;
    ...

Ooops.  k=1, which is not enough lookahead.  ANTLR did not generate a warning because it assumes you are providing enough lookahead for all nongreedy subrules.   ANTLR cannot determine how much lookahead to use or how much is enough because, by definition, the decision is ambiguous--it simply generates a decision using the maximum lookahead.

You must provide enough lookahead to let ANTLR see the full end marker:

class L extends Lexer;
options {
        k=2;
}

CMT : "/*" (options {greedy=false;} :.)* "*/" ;

Now, ANTLR will generate an exit branch using k=2.

do {
    // nongreedy exit test
    if ((LA(1)=='*') && (LA(2)=='/'))
        break _loop3;
    ...

If you increase k to 3, ANTLR will generate an exit branch using k=3 instead of 2, even though 2 is sufficient.  We know that k=2 is ok, but ANTLR is faced with a nondeterminism as it will use as much information as it has to yield a deterministic parser.

There is one more issue that you should be aware of.  Because ANTLR generates linear approximate decisions instead of full LL(k) decisions, complicated "end markers" can confuse ANTLR.  Fortunately, ANTLR knows when it is confused and will let you know.

Consider a simple contrived example where a loop matches either ab or cd:

R : (   options {greedy=false;}
    :   ("ab"|"cd")
    )+
    ("ad"|"cb")
  ;

Following the loop, the grammar can match ad or cb.  These exact sequences are not a problem for a full LL(k) decision, but due to the extreme compression of the linear approximate decision, ANTLR will generate an inaccurate exit branch.  In other words, the loop will exit, for example, on ab even though that sequence cannot be matched following the loop.   The exit condition is as follows:

// nongreedy exit test
if ( _cnt10>=1 && (LA(1)=='a'||LA(1)=='c') &&
     (LA(2)=='b'||LA(2)=='d')) break _loop10;

where the _cnt10 term ensures the loop goes around at least once (but has nothing to do with the nongreedy exit branch condition really).   Note that ANTLR has compressed all characters that can possibly be matched at a lookahead depth into a single set, thus, destroying the sequence information.  The decision matches the cross product of the sets, including the spurious lookahead sequences such as ab.

Fortunately, ANTLR knows when a decision falls between its approximate decision and a full LL(k) decision--it warns you as follows:

warning in greedy.g: line(3),
    nongreedy block may exit incorrectly due
    to limitations of linear approximate lookahead
    (first k-1 sets in lookahead not singleton).

The parenthetical remark gives you a hint that some k>1 lookahead sequences are correctly predictable even with the linear approximate lookahead compression.  The idea is that if all sets for depths 1..(k-1) are singleton sets (exactly one lookahead sequence for first k-1 characters) then linear approximate lookahead compression does not weaken your parser.  So, the following variant does not yield a warning since the exit branch is linear approximate as well as full LL(k):

R : (   options {greedy=false;}
    :   .
    )+
    ("ad"|"ae")
  ;

The exit branch decision now tests lookahead as follows:

   (LA(1)=='a') && (LA(2)=='d'||LA(2)=='e')

which accurately predicts when to exit.

Lexical States

With DFA-based lexer generates such as lex, you often need to match pieces of your input with separate sets of rules called lexical states. In ANTLR, you can simply define another rule and call it like any other to switch "states". Better yet, this "state" rule can be reused by other parts of your lexer grammar because the method return stack tells the lexer which rule to return to. DFAs have no stacks unlike recursive-descent parsers and, hence, can only switch back to one hard-coded rule.

Consider an example where you would normally see a lexical state--that of matching escape characters within a string. You would attach an action to the double quote character that switched state to a STRING_STATE state. This subordinate state would then define rules for matching the various escapes and finally define a rule for double quote that whose action would switch you back to the normal lexical state. To demonstrate the solution with ANTLR, let's start with just a simple string definition:

/** match anything between double-quotes */
STRING : '"' (~'"')* '"' ;
To allow escape characters like \t, you need to add an alternative to the (...)* loop. (You could do that with a DFA-based lexer as well, but you could not have any actions associated with the escape character alternatives to do a replacement etc...). For convenience, collect all escape sequences in another rule called ESC:
STRING : '"' (ESC | ~('\\'|'"'))* '"' ;

protected
ESC    : '\\' ('t' {...} | '"' {...} )* ;
The protected is a (poorly named) indicator that the rule, ESC, is not a token to be returned to the parser. It just means that the nextToken method does not attempt to route recognition flow directly to that rule--ESC must be called from another lexer rule.

This works for simple escapes, but does not include escapes like \20. To fix it, just add a reference to another rule INT that you probably have already defined.

STRING : '"' (ESC | ~('\\'|'"'))* '"' ;

protected
ESC    : '\\' ('t' {...} | '"' {...} | INT {...})* ;

INT    : ('0'..'9')+ ;
Notice that INT is a real token that you want the parser to see so the rule is not protected. A rule may invoke any other rule, protected or not.

Lexical states with DFA-based lexers merely allow you to recognize complicated tokens more easily--the parser has no idea the contortions the lexer goes through. There are some situations where you might want multiple, completely-separate lexers to feed your parser. One such situation is where you have an embedded language such as javadoc comments. ANTLR has the ability to switch between multiple lexers using a token stream multiplexor.  Please see the discussion in streams.

The End Of File Condition

A method is available for reacting to the end of file condition as if it were an event; e.g., you might want to pop the lexer state at the end of an include file.   This method, CharScanner.uponEOF(), is called from nextToken() right before the scanner returns an EOF_TYPE token object to parser:

public void uponEOF()|
    throws TokenStreamException, CharStreamException;

This event is not generated during a syntactic predicate evaluation (i.e., when the parser is guessing) nor in the middle of the recognition of a lexical rule (that would be an IO exception).  This event is generated only after the complete evaluation of the last token and upon the next request from the parser for a token.

You can throw exceptions from this method like "Heh, premature eof" or a retry stream exception.  See the includeFile/P.g for an example usage.

Case sensitivity

You may use option caseSensitive=false in the lexer to indicate that you do not want case to be significant when matching characters against the input stream. For example, you want element 'd' to match either upper or lowercase D, however, you do not want to change the case of the input stream. We have implemented this feature by having the lexer's LA() lookahead method return lowercase versions of the characters. Method consume() still adds the original characters to the string buffer associated with a token. We make the following notes:

  • The lowercasing is done by a method toLower() in the lexer. This can be overridden to get more specific case processing.   using option caseSensitive calls method CharScanner.setCaseSensitive(...), which you can also call before (or during I suppose) the parse.
  • ANTLR issues a warning when caseSensitive=false and uppercase ASCII characters are used in character or string literals.

Case sensitivity for literals is handled separately. That is, set lexer option caseSensitiveLiterals to false when you want the literals testing to be case-insensitive. Implementing this required changes to the literals table. Instead of adding a String, it adds an ANTLRHashString that implements a case-insensitive or case-sensitive hashing as desired.

Note: ANTLR checks the characters of a lexer string to make sure they are lowercase, but does not process escapes correctly--put that one on the "to do" list.

Ignoring whitespace in the lexer

One of the great things about ANTLR is that it generates full predicated-LL(k) lexers rather than the weaker (albeit sometimes easier-to-specify) DFA-based lexers of DLG. With such power, you are tempted (and encouraged) to do real parsing in the lexer. A great example of this is HTML parsing, which begs for a two-level parse: the lexer parsers all the attributes and so on within a tag, but the parser does overall document structure and ordering of the tags etc... The problem with parsing within a lexer is that you encounter the usual "ignore whitespace" issue as you do with regular parsing.

For example, consider matching the <table> tag of HTML, which has many attributes that can be specified within the tag. A first attempt might yield:

OTABLE   :	"<table" (ATTR)* '>'
         ;

Unfortunately, input "<table border=1>" does not parse because of the blank character after the table identifier. The solution is not to simply have the lexer ignore whitespace as it is read in because the lookahead computations must see the whitespace characters that will be found in the input stream. Further, defining whitespace as a rudimentary set of things to ignore does not handle all cases, particularly difficult ones, such as comments inside tags like

<table <!--wow...a comment--> border=1>

The correct solution is to specify a rule that is called after each lexical element (character, string literal, or lexical rule reference). We provide the lexer rule option ignore to let you specify the rule to use as whitespace. The solution to our HTML whitespace problem is therefore:

TABLE	
options { ignore=WS; }
       :	"<table" (ATTR)* '>'
       ;
// can be protected or non-protected rule
WS     :	' ' | '\n' | COMMENT | ...
       ;

We think this is cool and we hope it encourages you to do more and more interesting things in the lexer!

Oh, almost forgot. There is a bug in that an extra whitespace reference is inserted after the end of a lexer alternative if the last element is an action. The effect is to include any whitespace following that token in that token's text.

Tracking Line Information

Each lexer object has a line member that can be incremented by calling newline() or by simply changing its value (e.g., when processing #line directives in C).

SL_COMMENT : "//" (~'\n')* '\n' {newline();} ;

Do not forget to split out ‘\n’ recognition when using the not operator to read until a stopping character such as:

BLOCK: '('
           ( '\n' { newline(); }
           | ~( ‘\n’ | ‘)’ )
           )*
       ')’
     ;

Another way to track line information is to override the consume() method:

Tracking Column Information

ANTLR (2.7.1 and beyond), tracks character column information so that each token knows what column it starts in; columns start at 1 just like line numbers.  The CharScanner.consume() method asks method tab() to update the column number if it sees a tab, else it just increments the column number:

    ...
    if ( c=='\t' ) {
	tab();
    }
    else {
	inputState.column++;
    }

By default, tab() is defined as follows:

/**
advance the current column number by an appropriate
amount. If you do not override this to specify how
much to jump for a tab, then tabs are counted as
 one char. This method is called from consume().
*/
public void tab() {
  // update inputState.column as function of
  // inputState.column and tab stops.
  // For example, if tab stops are columns 1
  // and 5 etc... and column is 3, then add 2
  // to column.
  inputState.column++;
}

Upon new line, the lexer needs to reset the column number to 1.  Here is the default implementation of CharScanner.newline():

    public void newline() {
	inputState.line++;
	inputState.column = 1;
    }

Do not forget to call newline() in your lexer rule that matches '\n' lest the column number not be reset to 1 at the start of a line.

The shared input state object for a lexer is actually the critter that tracks the column number (as well as the starting column of the current token):

public class LexerSharedInputState {
    protected int column=1;
    protected int line=1;
    protected int tokenStartColumn = 1;
    protected int tokenStartLine = 1;
    ...
}

If you want to handle tabs in your lexer, just implement a method like the following to override the standard behavior.

/** set tabs to 4, just round column up to next tab + 1
12345678901234567890
    x   x   x   x
 */
public void tab() {
	int t = 4;
	int c = getColumn();
	int nc = (((c-1)/t)+1)*t+1;
	setColumn( nc );
}

See the examples/java/columns directory for the complete example.

Using Explicit Lookahead

On rare occasions, you may find it useful to explicitly test the lexer lookahead in say a semantic predicate to help direct the parse. For example, /*...*/ comments have a two character stopping symbol. The following example demonstrates how to use the second symbol of lookahead to distinguish between a single '/' and a "*/":

ML_COMMENT
    :    "/*"
         (  { LA(2)!='/' }? '*'
         | '\n' { newline(); }
         | ~('*'|'\n')
         )*
         "*/"
    ;

The same effect might be possible via a syntactic predicate, but would be much slower than a semantic predicate.  A DFA-based lexer handles this with no problem because they use a bunch of (what amount to) gotos whereas we're stuck with structured elements like while-loops.

A Surprising Use of A Lexer: Parsing

The following set of rules match arithmetical expressions in a lexer not a parser (whitespace between elements is not allowed in this example but can easily be handled by specifying rule option ignore for each rule):

EXPR
{ int val; }
    :   val=ADDEXPR
        { System.out.println(val); }
    ;

protected
ADDEXPR returns [int val]
{ int tmp; }
    :   val=MULTEXPR
        ( '+' tmp=MULTEXPR { val += tmp; }
        | '-' tmp=MULTEXPR { val -= tmp; }
        )*
    ;

protected
MULTEXPR returns [int val]
{ int tmp; }
    :   val=ATOM
        (   '*' tmp=ATOM { val *= tmp; }
        |   '/' tmp=ATOM { val /= tmp; }
        )*
    ;

protected 
ATOM returns [int val]
    :   val=INT
    |   '(' val=ADDEXPR ')'
    ;

protected
INT returns [int val]
    :   ('0'..'9')+
        {val=Integer.valueOf($getText);}
    ;

But...We've Always Used Automata For Lexical Analysis!

Lexical analyzers were all built by hand in the early days of compilers until DFAs took over as the scanner implementation of choice. DFAs have several advantages over hand-built scanners:

  • DFAs can easily be built from terse regular expressions.
  • DFAs do automatic left-factoring of common (possibly infinite) left-prefixes. In a hand-built scanner, you have to find and factor out all common prefixes. For example, consider writing a lexer to match integers and floats. The regular expressions are straightforward:
    integer : "[0-9]+" ;
    real    : "[0-9]+{.[0-9]*}|.[0-9]+" ;    

    Building a scanner for this would require factoring out the common [0-9]+. For example, a scanner might look like:

    Token nextToken() {
      if ( Character.isDigit(c) ) {
        match an integer
        if ( c=='.' ) {
          match another integer
          return new Token(REAL);
        }
        else {
          return new Token(INT);
        }
      }
      else if ( c=='.' ) {
        match a float starting with .
        return new Token(REAL);
      }
      else ...
    }  

Conversely, hand-built scanners have the following advantages over DFA implementations:

  • Hand-built scanners are not limited to the regular class of languages. They may use semantic information and method calls during recognition whereas a DFA has no stack and is typically not semantically predicated.
  • Unicode (16 bit values) is handled for free whereas DFAs typically have fits about anything but 8 bit characters.
  • DFAs are tables of integers and are, consequently, very hard to debug and examine.
  • A tuned hand-built scanner can be faster than a DFA. For example, simulating the DFA to match [0-9]+ requires n DFA state transitions where n is the length of the integer in characters.

    Tom Pennello of Metaware back in 1986 ("Very Fast LR Parsing") generated LR-based parsers in machine code that used the program counter to do state transitions rather than simulating the PDA. He got a huge speed up in parse time. We can extrapolate from this experiment that avoiding a state machine simulator in favor of raw code results in a speed up.

So, what approach does ANTLR take? Neither! ANTLR allows you to specify lexical items with expressions, but generates a lexer for you that mimics what you would generate by hand. The only drawback is that you still have to do the left-factoring for some token definitions (but at least it is done with expressions and not code). This hybrid approach allows you to build lexers that are much stronger and faster than DFA-based lexers while avoiding much of the overhead of writing the lexer yourself.

In summary, specifying regular expressions is simpler and shorter than writing a hand-built lexer, but hand-built lexers are faster, stronger, able to handle unicode, and easy to debug. This analysis has led many programmers to write hand-built lexers even when DFA-generation tools such as lex and dlg are commonly-available. PCCTS 1.xx made a parallel argument concerning PDA-based LR parsers and recursive-descent LL-based parsers. As a final justification, we note that writing lexers is trivial compared to building parsers; also, once you build a lexer you will reuse it with small modifications in the future.

Version: $Id: //depot/code/org.antlr/release/antlr-2.7.5/doc/lexer.html#1 $
nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/lexer.to.parser.tokens.gif000066400000000000000000000070011161462365500255650ustar00rootroot00000000000000GIF89a4Q,4Q@H*\ȰÇ#JHŋ3jȱǏ CD d ,`@Ҥ˗0cʜI͛%[⬈rO[IѣHw 84)B)W68iž*V-8uEl vXd;M3Vaݢk5ڠ#ֽvX{b `Y`/T,֯NA}Vԁ-9ϿTPg/?60׬[_EЕIsc}7U ,2У/ d埪_̗9嶄r^gK> ){qK?z׆% ҸmsVf}ge=V 6~5]IjRjz`mxI6׆_"ZvzxHzAȚ.i,ډH5)(QޔXUeey喆եd9PY:bWDv ]wsIvy矀Zi(*Xo 裐F*FImf馜vjXjj~fa F@xqhv٪u $7c" &Y27Zh~'{V^K6v+ӧfyUCαe}7ITnꬷd{h]n عbNoYK7г>?~/.ma=N}lih $̀mM/]Ӆk͢z}jPnjVj6uW+Ď1 g;N҆BmPʞ,ZsLCƏVapI "[p3(N/Pe3&b6Ϝ2gSDLٯ+RSju -g#\Z#(I6l-ع!GΒQ&' TE52£Bj5m]ʳ'69:O% 8I%t{?|4G&m:NWQyӧ5a@UQ jK0)MMJ`W75uGRjڒ$(noW[ZMn̑y(V#Wv=T0tK=կ| e|iN[+ոMMe5k h)vMm|Y{OEhgK[ 6ͭnw pMqMnc;$*ЭsAZsWzL-_窨LML ^UzKߜ]q-&Y S<ٵt7_U/#+Pal Tz&"Pgd[!cJ- <\A Jtlg_ul(+]k361GOmlsGZq7S/PE1ث~q~XUX2Md6p[:a-Wib+g_vluFoG RZT`e Ln#>+<>G1 HtE臞X K ʼnω[$ANRmQTDX˩f:T@ tnVm=ϑMwT壾{ZvDc ]] B./iX}/6y?ix_O3~-}}7sه}GibfT~!4~#7%S.A-A-{4{ByaHZqx~\C?(f:*'-QVr5,MPcVGR[M(Th'ItSVxQw'QLA68A^TUx'2kRVCN4Srn&wȆ\dYՇub3Dž0(2+h.Dv0hYSvf(UAr@x/}'x-8DQUfkVrEVoY8o;btG8ȋ8%!+-b}I!:((`9HTU#{X֍N%^ e!Ŏ;K͘\ESYh)U:ȊS(Ay;Q 6H.Ɍ+ّBU8$Y;nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/logo.gif000066400000000000000000000102311161462365500221670ustar00rootroot00000000000000GIF89aorTD½aDLRE#ۨMJMzc3DL߹% dH¨F<0$Ȍrx­3$dK7oOa̫|lG(fdcdaļղsMԪG?G6.'^÷œi֊i\9)()mkmvA$$EEC6r_,)7zd\$ltywN̲پﱱiA⤈dxgPz̹Yiǹz_9yPH7 |_g!iYO:*vk^ᱷw(Ȧd9 p\WVWwvwXH5I\\\LhK*x$8V9d\dԄw626i,˯")68wS*lDֲD?2@Ɗ++\D@ÆE5&AJ ؽۗ k~J5d/L/4lp0 a2!˧)u9ppB.4(@x, T\6 VX!a(Pi| 6WȇrxC Ab(!(d A?蠋!Ai Ai DBHnJ(7|S.'!b Q)2 d@d%Atv4؟QnH%58!Tb4P<(`AǀX SBJ~l!MZ0Qd5J " _dF'XedG(7xq.14uO!`E S4.@p9ep |!l-_@0L= D!ܶ AD4D"O`QM0`6;̝$ V`A ACnO k/V C9̘ ^`$\0/HB 3+g S$tR`Qmm(ٕ $Oyw lG% [AJd I}D,I 4pAv49P#6L=  ,27 or,b@ 0C}tH M! A" '![+HO ԎNo %FxR*G>8<0~F!tZ(3$TC["r 6AE(·c8!_< !0D)ZQhB,ދH#57~AC"HPWp'X* B/d$ bQAn&`@ d|hMPḍB I(K"2h 5a!C=\@?AzX<&F+Dv|P`kP f؃Dl!@8ZG2@@8"(,8((f Q, 6 B!$h/@ @+a@ @Q N3 \jX9ȀCD7?h$6Cp+q`^uAa /@@6HA;(Lq(vⰛD @P@ qGy a!\1>AG9V 4acX -`pE\iB*̳WbuP +(V P2XXbr'~M^\iԸ ,pIn|' D #r@h Cd¹SEу0G#Oe8 @E&nCH&[](CkrfM"|Dh2.}&Pq@DoqY̭DCv3[B % pBd{# xx!=!KH!ĉe"{ )wӎCLHVjH9c! ŐibcCQ)APa@)4BE,<BZm`_!N8O!R(Ph9U= ~2ALj3%`9X@X`$fe!TBaʫ" =  $hSRu xA*e(d ,Al } 1  # X@:fB1BSPA:QgA. hA/8@p<1 d"7 .@=(B/qU?` PKQp V m \ST@p '\ q55Sh0$mCUo֐@ `Pua` ^P`HWפLC W/ I'k  @au .` l 2PАR` 4&Ѐ p$%RpC  00 Sp@V0JV@I_fW S`00 /@/` w?S~tL?P ` SVR"^/r%u)C e!/_H#!Xqp q#6#g7 UQ& L`X 5t`$+07==Q]^o Y !v^67o8У;#v7O2 `J C7d57w cqirpL@ a'$#֗0 ۩S$^:a:c٘ ANTLR Specification: Meta Language

ANTLR Meta-Language

ANTLR accepts three types of grammar specifications -- parsers, lexers, and tree-parsers (also called tree-walkers). Because ANTLR uses LL(k) analysis for all three grammar variants, the grammar specifications are similar, and the generated lexers and parsers behave similarly. The generated recognizers are human-readable and you can consult the output to clear up many of your questions about ANTLR's behavior.

Meta-Language Vocabulary

Whitespace. Spaces, tabs, and newlines are separators in that they can separate ANTLR vocabulary symbols such as identifiers, but are ignored beyond that. For example, "FirstName LastName" appears as a sequence of two token references to ANTLR not token reference, space, followed by token reference.

Comments. ANTLR accepts C-style block comments and C++-style line comments. Java-style documenting comments are allowed on grammar classes and rules, which are passed to the generated output if requested. For example,

/**This grammar recognizes simple expressions
 * @author Terence Parr
 */
class ExprParser;

/**Match a factor */
factor : ... ;

Characters. Character literals are specified just like in Java. They may contain octal-escape characters (e.g., '\377'), Unicode characters (e.g., '\uFF00'), and the usual special character escapes recognized by Java ('\b', '\r', '\t', '\n', '\f', '\'', '\\'). In lexer rules, single quotes represent a character to be matched on the input character stream. Single-quoted characters are not supported in parser rules.

End of file. The EOF token is automatically generated for use in parser rules:

rule : (statement)+ EOF;

You can test for EOF_CHAR in actions of lexer rules:

// make sure nothing but newline or
// EOF is past the #endif
ENDIF
{
  boolean eol=false;
}
     :   "#endif"
         ( ('\n' | '\r') {eol=true;} )?
         {
           if (!eol) {
             if (LA(1)==EOF_CHAR) {error("EOF");}
             else {error("Invalid chars");}
           }
         }
     ;

While you can test for end-of-file as a character, it is not really a character--it is a condition.  You should instead override CharScanner.uponEOF(), in your lexer grammar:

/** This method is called by YourLexer.nextToken()
 *  when the lexer has
 * hit EOF condition. EOF is NOT a character.
 * This method is not called if EOF is reached
 * during syntactic predicate evaluation or during
 * evaluation of normal lexical rules, which
 * presumably would be an IOException. This
 * traps the "normal" EOF * condition.
 *
 * uponEOF() is called after the complete evaluation
 * of the previous token and only if your parser asks
 * for another token beyond that last non-EOF token.
 *
 * You might want to throw token or char stream
 * exceptions like: "Heh, premature eof" or a retry
 * stream exception ("I found the end of this file,
 * go back to referencing file").
 */
public void uponEOF()
  throws TokenStreamException, CharStreamException
{
}

The end-of-file situation is a bit nutty (since version 2.7.1) because Terence used -1 as a char not an int (-1 is '\uFFFF'...oops).

Strings. String literals are sequences of characters enclosed in double quotes. The characters in the string may be represented using the same escapes (octal, Unicode, etc.) that are valid in character literals. Currently, ANTLR does not actually allow Unicode characters within string literals (you have to use the escape). This is because the antlr.g file sets the charVocabulary option to ascii.

In lexer rules, strings are interpreted as sequences of characters to be matched on the input character stream (e.g., "for" is equivalent to 'f' 'o' 'r').

In parser rules, strings represent tokens, and each unique string is assigned a token type. However, ANTLR does not create lexer rules to match the strings. Instead, ANTLR enters the strings into a literals table in the associated lexer. ANTLR will generate code to test the text of each token against the literals table, and change the token type when a match is encountered before handing the token off to the parser. You may also perform the test manually -- the automatic code-generation is controllable by a lexer option.

You may want to use the token type value of a string literal in your actions, for example in the synchronization part of an error-handler. For string literals that consist of alphabetic characters only, the string literal value will be a constant with a name like LITERAL_xxx, where xxx is the name of the token. For example, the literal "return" will have an associated value of LITERAL_return.   You may also assign a specific label to a literal using the tokens section.

Token references. Identifiers beginning with an uppercase letter are token references. The subsequent characters may be any letter, digit, or underscore. A token reference in a parser rule results in matching the specified token. A token reference in a lexer rule results in a call to the lexer rule for matching the characters of the token. In other words, token references in the lexer are treated as rule references.

Token definitions. Token definitions in a lexer have the same syntax as parser rule definitions, but refer to tokens, not parser rules. For example,

class MyParser extends Parser;
idList : ( ID )+;   // parser rule definition

class MyLexer extends Lexer;
ID : ( 'a'..'z' )+ ;   // token definition    

Rule references. Identifiers beginning with a lowercase letter are references to ANTLR parser rules. The subsequent characters may be any letter, digit, or underscore. Lexical rules may not reference parser rules.

Actions. Character sequences enclosed in (possibly nested) curly braces are semantic actions. Curly braces within string and character literals are not action delimiters.

Arguments Actions. Character sequences in (possibly nested) square brackets are rule argument actions. Square braces within string and character literals are not action delimiters. The arguments within [] are specified using the syntax of the generated language, and should be separated by commas.

codeBlock
[int scope, String name] // input arguments
returns [int x]          // return values
: ... ;

// pass 2 args, get return
testcblock
{int y;}
	:	y=cblock[1,"John"]
	;

Many people would prefer that we use normal parentheses for arguments, but parentheses are best used as grammatical grouping symbols for EBNF.

Symbols. The following table summarizes punctuation and keywords in ANTLR.

Symbol Description
(...) subrule
(...)* closure subrule zero-or-more
(...)+ positive closure subrule one-or-more
(...)? optional zero-or-one
{...} semantic action
[...] rule arguments
{...}? semantic predicate
(...)=> syntactic predicate
| alternative operator
.. range operator
~ not operator
. wildcard
= assignment operator
: label operator, rule start
; rule end
<...> element option
class grammar class
extends specifies grammar base class
returns specifies return type of rule
options options section
tokens tokens section
header header section
tokens token definition section

Header Section

A header section contains source code that must be placed before any ANTLR-generated code in the output parser. This is mainly useful for C++ output due to its requirement that elements be declared before being referenced. In Java, this can be used to specify a package for the resulting parser, and any imported classes. A header section looks like:

header {
  source code in the language generated by ANTLR;
}  

The header section is the first section in a grammar file. Depending on the selected target language more types of header sections might be possible. See the respective addendums.

Parser Class Definitions

All parser rules must be associated with a parser class. A grammar (.g) file may contain only one parser class definitions (along with lexers and tree-parsers). A parser class specification precedes the options and rule definitions of the parser. A parser specification in a grammar file often looks like:

{ optional class code preamble }
class YourParserClass extends Parser;
options
tokens
{ optional action for instance vars/methods }
parser rules...    

When generating code in an object-oriented language, parser classes result in classes in the output, and rules become member methods of the class. In C, classes would result in structs, and some name-mangling would be used to make the resulting rule functions globally unique.

The optional class preamble is some arbitrary text enclosed in {}. The preamble, if it exists, will be output to the generated class file immediately before the definition of the class.

Enclosing curly braces are not used to delimit the class because it is hard to associate the trailing right curly brace at the bottom of a file with the left curly brace at the top of the file. Instead, a parser class is assumed to continue until the next class statement.

You may specify a parser superclass that is used as the superclass fo the generate parser. The superclass must be fully-qualified and in double-quotes; it must itself be a subclass of antlr.LLkParser. For example,

class TinyCParser extends Parser("antlr.debug.ParseTreeDebugParser");

Lexical Analyzer Class Definitions

A parser class results in parser objects that know how to apply the associated grammatical structure to an input stream of tokens. To perform lexical analysis, you need to specify a lexer class that describes how to break up the input character stream into a stream of tokens. The syntax is similar to that of a parser class:


{ optional class code preamble }
class YourLexerClass extends Lexer;
options
tokens
{ optional action for instance vars/methods }
lexer rules...

Lexical rules contained within a lexer class become member methods in the generated class. Each grammar (.g) file may contain only one lexer class. The parser and lexer classes may appear in any order.

The optional class preamble is some arbitrary text enclosed in {}. The preamble, if it exists, will be output to the generated class file immediately before the definition of the class.

You may specify a lexer superclass that is used as the superclass for the generate lexer. The superclass must be fully-qualified and in double-quotes; it must itself be a subclass of antlr.CharScanner.

Tree-parser Class Definitions

A tree-parser is like a parser, except that is processes a two-dimensional tree of AST nodes instead of a one-dimensional stream of tokens. Tree parsers are specified identically to parsers, except that the rule definitions may contain a special form to indicate descent into the tree. Again only one tree parser may be specified per grammar (.g) file.

{ optional class code preamble }
class YourTreeParserClass extends TreeParser;
options
tokens
{ optional action for instance vars/methods }
tree parser rules...

You may specify a tree parser superclass that is used as the superclass for the generate tree parser. The superclass must be fully-qualified and in double-quotes; it must itself be a subclass of antlr.TreeParser.

Options Section

Rather than have the programmer specify a bunch of command-line arguments to the parser generator, an options section within the grammar itself serves this purpose. This solution is preferable because it associates the required options with the grammar rather than ANTLR invocation. The section is preceded by the options keyword and contains a series of option/value assignments. An options section may be specified on both a per-file, per-grammar, per-rule, and per-subrule basis.

You may also specify an option on an element, such as a token reference.

Tokens Section

If you need to define an "imaginary" token, one that has no corresponding real input symbol, use the tokens section to define them.  Imaginary tokens are used often for tree nodes that mark or group a subtree resulting from real input.  For example, you may decide to have an EXPR node be the root of every expression subtree and DECL for declaration subtrees for easy reference during tree walking.  Because there is no corresponding input symbol for EXPR, you cannot reference it in the grammar to implicitly define it.  Use the following to define those imaginary tokens.

tokens {
    EXPR;
    DECL;
}

The formal syntax is:

tokenSpec : "tokens" LCURLY
            (tokenItem SEMI)+
            RCURLY
          ;

tokenItem : TOKEN ASSIGN STRING (tokensSpecOptions)?
          | TOKEN  (tokensSpecOptions)?
          | STRING (tokensSpecOptions)?
          ;
tokensSpecOptions
          : "<"
              id ASSIGN optionValue
              ( SEMI id ASSIGN optionValue )*
            ">"
          ;

You can also define literals in this section and, most importantly, assign to them a valid label as in the following example.

tokens {
    KEYWORD_VOID="void";
    EXPR;
    DECL;
    INT="int";
}

Strings defined in this way are treated just as if you had referenced them in the parser.

If a grammar imports a vocabulary containing a token, say T, then you may attach a literal to that token type simply by adding T="a literal" to the tokens section of the grammar.  Similarly, if the imported vocabulary defines a literal, say "_int32", without a label, you may attach a label via INT32="_int32" in the tokens section.

You may define options on the tokens defined in the tokens section.  The only option available so far is AST=class-type-to-instantiate.

// Define a bunch of specific AST nodes to build.
// Can override at actual reference of tokens in
// grammar.
tokens {
    PLUS<AST=PLUSNode>;
    STAR<AST=MULTNode>;
}

Grammar Inheritance

Object-oriented programming languages such as C++ and Java allow you to define a new object as it differs from an existing object, which provides a number of benefits. "Programming by difference" saves development/testing time and future changes to the base or superclass are automatically propagated to the derived or subclass. ANTLR= supports grammar inheritance as a mechanism for creating a new grammar class based on a base class. Both the grammatical structure and the actions associated with the grammar may be altered independently.

Rule Definitions

Because ANTLR considers lexical analysis to be parsing on a character stream, both lexer and parser rules may be discussed simultaneously. When speaking generically about rules, we will use the term atom to mean an element from the input stream (be they characters or tokens).

The structure of an input stream of atoms is specified by a set of mutually-referential rules. Each rule has a name, optionally a set of arguments, optionally a "throws" clause, optionally an init-action, optionally a return value, and an alternative or alternatives. Each alternative contains a series of elements that specify what to match and where.

The basic form of an ANTLR rule is:

rulename
    :   alternative_1
    |   alternative_2
   ...
    |   alternative_n
    ;    

If parameters are required for the rule, use the following form:

rulename[formal parameters] : ... ;

If you want to return a value from the rule, use the returns keyword:

rulename returns [type id] : ... ;    

where type is a type specifier of the generated language, and id is a valid identifier of the generated language. In Java, a single type identifier would suffice most of the time, but returning an array of strings, for example, would require brackets:

ids returns [String[] s]: ( ID {...} )* ;    

Also, when generating C++, the return type could be complex such as:

ids returns [char *[] s]: ... ;    

The id of the returns statement is passed to the output code. An action may assign directly to this id to set the return value. Do not use a return instruction in an action.

To specify that your parser (or tree parser rule) can throw a non-ANTLR specific exception, use the exceptions clause.  For example, here is a simple parser specification with a rule that throws MyException:

class P extends Parser;

a throws MyException
  : A
  ;

ANTLR generates the following for rule a:

    public final void a()
        throws RecognitionException,
               TokenStreamException,
               MyException
    {
        try {
            match(A);
        }
        catch (RecognitionException ex) {
            reportError(ex);
            consume();
            consumeUntil(_tokenSet_0);
        }
    }

Lexer rules may not specify exceptions.

Init-actions are specified before the colon. Init-actions differ from normal actions because they are always executed regardless of guess mode. In addition, they are suitable for local variable definitions.

rule
{
    init-action
}
    :   ...
    ;    

Lexer rules. Rules defined within a lexer grammar must have a name beginning with an uppercase letter. These rules implicitly match characters on the input stream instead of tokens on the token stream. Referenced grammar elements include token references (implicit lexer rule references), characters, and strings. Lexer rules are processed in the exact same manner as parser rules and, hence, may specify arguments and return values; further, lexer rules can also have local variables and use recursion. See more about lexical analysis with ANTLR.

Parser rules. Parser rules apply structure to a stream of tokens whereas lexer rules apply structure to a stream of characters. Parser rules, therefore, must not reference character literals. Double-quoted strings in parser rules are considered token references and force ANTLR to squirrel away the string literal into a table that can be checked by actions in the associated lexer.

All parser rules must begin with lowercase letters.

Tree-parser rules. In a tree-parser, an additional special syntax is allowed to specify the match of a two-dimensional structure. Whereas a parser rule may look like:

rule : A B C;    

which means "match A B and C sequentially", a tree-parser rule may also use the syntax:

rule : #(A B C);  

which means "match a node of type A, and then descend into its list of children and match B and C". This notation can be nested arbitrarily, using #(...) anywhere an EBNF construct could be used, for example:

rule : #(A B #(C D (E)*) );      

Atomic Production elements

Character literal. A character literal can only be referred to within a lexer rule. The single character is matched on the character input stream. There are no need to escape regular expression meta symbols because regular expressions are not used to match lexical atoms. For example, '{' need not have an escape as you are specifying the literal character to match. Meta symbols are used outside of characters and string literals to specify lexical structure.

All characters that you reference are implicitly added to the overall character vocabulary (see option charVocabulary). The vocabulary comes into play when you reference the wildcard character, '.', or ~c ("every character but c").

You do not have to treat Unicode character literals specially. Just reference them as you would any other character literal. For example, here is a rule called LETTER that matches characters considered Unicode letters:


protected
LETTER
    :   '\u0024' |
        '\u0041'..'\u005a' |
        '\u005f' |
        '\u0061'..'\u007a' |
        '\u00c0'..'\u00d6' |
        '\u00d8'..'\u00f6' |
        '\u00f8'..'\u00ff' |
        '\u0100'..'\u1fff' |
        '\u3040'..'\u318f' |
        '\u3300'..'\u337f' |
        '\u3400'..'\u3d2d' |
        '\u4e00'..'\u9fff' |
        '\uf900'..'\ufaff'
    ;
You can reference this rule from another rule:

ID  :   (LETTER)+
    ;
ANTLR will generate code that tests the input characters against a bit set created in the lexer object.

String literal. Referring to a string literal within a parser rule defines a token type for the string literal, and causes the string literal to be placed in a hash table of the associated lexer. The associated lexer will have an automated check against every matched token to see if it matches a literal. If so, the token type for that token is set to the token type for that literal defintion imported from the parser. You may turn off the automatic checking and do it yourself in a convenient rule like ID. References to string literals within the parser may be suffixed with an element option; see token references below.

Referring to a string within a lexer rule matches the indicated sequence of characters and is a shorthand notation. For example, consider the following lexer rule definition:

BEGIN : "begin" ;

This rule can be rewritten in a functionally equivalent manner:

BEGIN : 'b' 'e' 'g' 'i' 'n' ;    

There are no need to escape regular expression meta symbols because regular expressions are not used to match characters in the lexer.

Token reference. Referencing a token in a parser rule implies that you want to recognize a token with the specified token type. This does not actually call the associated lexer rule--the lexical analysis phase delivers a stream of tokens to the parser.

A token reference within a lexer rule implies a method call to that rule, and carries the same analysis semantics as a rule reference within a parser. In this situation, you may specify rule arguments and return values. See the next section on rule references.

You may also specify an option on a token reference.  Currently, you can only specify the AST node type to create from the token.  For example, the following rule instructs ANTLR to build INTNode objects from the INT reference:

i : INT<AST=INTNode> ;

The syntax of an element option is

<option=value; option=value; ...>

Wildcard. The "." wildcard within a parser rule matches any single token; within a lexer rule it matches any single character. For example, this matches any single token between the B and C:

r : A B . C;

Simple Production elements

Rule reference. Referencing a rule implies a method call to that rule at that point in the parse. You may pass parameters and obtain return values. For example, formal and actual parameters are specified within square brackets:

funcdef
    :   type ID "(" args ")" block[1]
    ;
block[int scope]
    :   "begin" ... {/*use arg scope/*} "end"
    ;

Return values that are stored into variables use a simple assignment notation:

set
{ Vector ids=null; }  // init-action
    :  "(" ids=idList ")"
    ;
idList returns [Vector strs]
{ strs = new Vector(); }   // init-action
    :  id:ID
       { strs.appendElement(id.getText()); }
       (
          "," id2:ID
          { strs.appendElement(id2.getText()); }
       )*
    ;    

Semantic action. Actions are blocks of source code (expressed in the target language) enclosed in curly braces. The code is executed after the preceding production element has been recognized and before the recognition of the following element. Actions are typically used to generate output, construct trees, or modify a symbol table. An action's position dictates when it is recognized relative to the surrounding grammar elements.

If the action is the first element of a production, it is executed before any other element in that production, but only if that production is predicted by the lookahead.

The first action of an EBNF subrule may be followed by ':'. Doing so designates the action as an init-action and associates it with the subrule as a whole, instead of any production. It is executed immediately upon entering the subrule -- before lookahead prediction for the alternates of the subrule -- and is executed even while guessing (testing syntactic predicates). For example:

(   {init-action}:
    {action of 1st production} production_1
|   {action of 2nd production} production_2
)?    

The init-action would be executed regardless of what (if anything) matched in the optional subrule.

The init-actions are placed within the loops generated for subrules (...)+ and (...)*.

Production Element Operators

Element complement. The "~" not unary operator must be applied to an atomic element such as a token identifier. For some token atom T, ~T matches any token other than T except end-of-file. Within lexer rules, ~'a' matches any character other than character 'a'. The sequence ~. ("not anything") is meaningless and not allowed.

The vocabulary space is very important for this operator. In parsers, the complete list of token types is known to ANTLR and, hence, ANTLR simply clones that set and clears the indicated element. For characters, you must specify the character vocabulary if you want to use the complement operator. Note that for large vocabularies like Unicode character blocks, complementing a character means creating a set with 2^16 elements in the worst case (about 8k). The character vocabulary is the union of characters specified in the charVocabulary option and any characters referenced in the lexer rules. Here is a sample use of the character vocabulary option:

class L extends Lexer;
options { charVocabulary = '\3'..'\377'; } // LATIN

DIGIT : '0'..'9';
SL_COMMENT : "//" (~'\n')* '\n'; 

Set complement. the not operator can also be used to construct a token set or character set by complementing another set. This is most useful when you want to match tokens or characters until a certain delimiter set is encountered. Rather than invent a special syntax for such sets, ANTLR allows the placement of ~ in front of a subrule containing only simple elements and no actions. In this specific case, ANTLR will not generate a subrule, and will instead create a set-match. The simple elements may be token references, token ranges, character literals, or character ranges. For example:

class P extends Parser;
r : T1 (~(T1|T2|T3))* (T1|T2|T3);

class L extends Lexer;
SL_COMMENT : "//" (~('\n'|'\r'))* ('\n'|'\r);

STRING : '"' (ESC | ~('\\'|'"'))* '"';
protected ESC : '\\' ('n' | 'r');

Range operator. The range binary operator implies a range of atoms may be matched. The expression 'c1'..'c2' in a lexer matches characters inclusively in that range. The expression T..U in a parser matches any token whose token type is inclusively in that range, which is of dubious value unless the token types are generated externally.

AST root operator. When generating abstract syntax trees (ASTs), token references suffixed with the "^" root operator force AST nodes to be created and added as the root of the current tree. This symbol is only effective when the buildAST option is set. More information about ASTs is also available.

AST exclude operator. When generating abstract syntax trees, token references suffixed with the "!" exclude operator are not included in the AST constructed for that rule. Rule references can also be suffixed with the exclude operator, which implies that, while the tree for the referenced rule is constructed, it is not linked into the tree for the referencing rule. This symbol is only effective when the buildAST option is set. More information about ASTs is also available.

Token Classes

By using a range operator, a not operator, or a subrule with purely atomic elements, you implicitly define an "anonymous" token or character class--a set that is very efficient in time and space. For example, you can define a lexer rule such as:

OPS : (PLUS | MINUS | MULT | DIV) ;

or

WS  : (' '|'\n'|'\t') ;

These describe sets of tokens and characters respectively that are easily optimized to simple, single, bit-sets rather than series of token and character comparisons.

Predicates

Semantic predicate. Semantics predicates are conditions that must be met at parse-time before parsing can continue past them. The functionality of semantic predicates is explained in more detail later. The syntax of a semantic predicate is a semantic action suffixed by a question operator:

{ expression }?

The expression must not have side-effects and must evaluate to true or false (boolean in Java or bool in C++). Since semantic predicates can be executed while guessing, they should not rely upon the results of actions or rule parameters.

Syntactic predicate. Syntactic predicates specify the lookahead language needed to predict an alternative. Syntactic predicates are explained in more detail later. The syntax of a syntactic predicate is a subrule with a => operator suffix:


( lookahead-language ) => production

Where the lookahead-language can be any valid ANTLR construct including references to other rules. Actions are not executed, however, during the evaluation of a syntactic predicate.

Element Labels

Any atomic or rule reference production element can be labeled with an identifier (case not significant). In the case of a labeled atomic element, the identifier is used within a semantic action to access the associated Token object or character. For example,

assign
    :   v:ID "=" expr ";"
        { System.out.println(
            "assign to "+v.getText()); }
    ;

No "$" operator is needed to reference the label from within an action as was the case with PCCTS 1.xx.

Inside actions a token reference can be accessed as label to acces the Token object, or as #label to access the AST generated for the token. The AST node constructed for a rule reference may be accessed from within actions as #label.

Labels on token references can also be used in association with parser exception handlers to specify what happens when that token cannot be matched.

Labels on rule references are used for parser exception handling so that any exceptions generated while executing the labeled rule can be caught.

EBNF Rule Elements

ANTLR supports extended BNF notation according to the following four subrule syntax / syntax diagrams:

( P1 | P2 | ... | Pn )
( P1 | P2 | ... | Pn )?
( P1 | P2 | ... | Pn )*
( P1 | P2 | ... | Pn )+

Interpretation Of Semantic Actions

Semantic actions are copied to the appropriate position in the output parser verbatim with the exception of AST action translation.

None of the $-variable notation from PCCTS 1.xx carries forward into ANTLR.

Semantic Predicates

A semantic predicate specifies a condition that must be met (at run-time) before parsing may proceed. We differentiate between two types of semantic predicates: (i) validating predicates that throw exceptions if their conditions are not met while parsing a production (like assertions) and (ii) disambiguating predicates that are hoisted into the prediction expression for the associated production.

Semantic predicates are syntactically semantic actions suffixed with a question mark operator:

{ semantic-predicate-expression }?

The expression may use any symbol provided by the programmer or generated by ANTLR that is visible at the point in the output the expression appears.

The position of a predicate within a production determines which type of predicate it is. For example, consider the following validating predicate (which appear at any non-left-edge position) that ensures an identifier is semantically a type name:

decl: "var" ID ":" t:ID
      { isTypeName(t.getText()) }?
    ;    

Validating predicates generate parser exceptions when they fail. The thrown exception is is of type SemanticException. You can catch this and other parser exceptions in an exception handler.

Disambiguating predicates are always the first element in a production because they cannot be hoisted over actions, token, or rule references. For example, the first production of the following rule has a disambiguating predicate that would be hoisted into the prediction expression for the first alternative:


stat:   // declaration "type varName;"
        {isTypeName(LT(1))}? ID ID ";"
    |   ID "=" expr ";"            // assignment
    ;

If we restrict this grammar to LL(1), it is syntactically nondeterministic because of the common left-prefix: ID. However, the semantic predicate correctly provides additional information that disambiguates the parsing decision. The parsing logic would be:

if ( LA(1)==ID && isTypeName(LT(1)) ) {
    match production one
}
else if ( LA(1)==ID ) {
    match production one
}
else error    

Formally, in PCCTS 1.xx, semantic predicates represented the semantic context of a production. As such, the semantic AND syntactic context (lookahead) could be hoisted into other rules. In ANTLR, predicates are not hoisted outside of their enclosing rule. Consequently, rules such as:

type : {isType(t)}? ID ;

are meaningless. On the other hand, this "semantic context" feature caused considerable confusion to many PCCTS 1.xx folks.

Syntactic Predicates

There are occasionally parsing decisions that cannot be rendered deterministic with finite lookahead. For example:

a   :   ( A )+ B
    |   ( A )+ C
    ;

The common left-prefix renders these two productions nondeterministic in the LL(k) sense for any value of k. Clearly, these two productions can be left-factored into:

a   :   ( A )+ (B|C)
    ;

without changing the recognized language. However, when actions are embedded in grammars, left-factoring is not always possible. Further, left-factoring and other grammatical manipulations do not result in natural (readable) grammars.

The solution is simply to use arbitrary lookahead in the few cases where finite LL(k) for k>1 is insufficient. ANTLR allows you to specify a lookahead language with possibly infinite strings using the following syntax:

( prediction block ) => production

For example, consider the following rule that distinguishes between sets (comma-separated lists of words) and parallel assignments (one list assigned to another):

stat:   ( list "=" )=> list "=" list
    |   list
    ;

If a list followed by an assignment operator is found on the input stream, the first production is predicted. If not, the second alternative production is attempted.

Syntactic predicates are a form of selective backtracking and, therefore, actions are turned off while evaluating a syntactic predicate so that actions do not have to be undone.

Syntactic predicates are implemented using exceptions in the target language if they exist. When generating C code, longjmp would have to be used.

We could have chosen to simply use arbitrary lookahead for any non-LL(k) decision found in a grammar. However, making the arbitrary lookahead explicit in the grammar is useful because you don't have to guess what the parser will be doing. Most importantly, there are language constructs that are ambiguous for which there exists no deterministic grammar! For example, the infamous if-then-else construct has no LL(k) grammar for any k. The following grammar is ambiguous and, hence, nondeterministic:


stat:   "if" expr "then" stat ( "else" stat )?
    |   ...
    ;

Given a choice between two productions in a nondeterministic decision, we simply choose the first one. This works out well is most situations. Forcing this decision to use arbitrary lookahead would simply slow the parse down.

Fixed depth lookahead and syntactic predicates

ANTLR cannot be sure what lookahead can follow a syntactic predicate (the only logical possibility is whatever follows the alternative predicted by the predicate, but erroneous input and so on complicates this), hence, ANTLR assumes anything can follow.  This situation is similar to the computation of lexical lookahead when it hits the end of the token rule definition.

Consider a predicate with a (...)* whose implicit exit branch forces a computation attempt on what follows the loop, which is the end of the syntactic predicate in this case.

class parse extends Parser;
a	:	(A (P)*) => A (P)*
	|	A
	;

The lookahead is artificially set to "any token" for the exit branch.   Normally, the P and the "any token" would conflict, but ANTLR knows that what you mean is to match a bunch of P tokens if they are present--no warning is generated.

If more than one path can lead to the end of the predicate in any one decision, ANTLR will generate a warning.  The following rule results in two warnings.

class parse extends Parser;
a	:	(A (P|)*) => A (P)*
	|	A
	;

The empty alternative can indirectly be the start of the loop and, hence, conflicts with the P.  Further, ANTLR detects the problem that two paths reach end of predicate.  The resulting parser will compile but never terminate the (P|)* loop.

The situation is complicated by k>1 lookahead.  When the nth lookahead depth reaches the end of the predicate, it records the fact and then code generation ignores the lookahead for that depth.

class parse extends Parser;
options {
	k=2;
}
a	:	(A (P B|P )*) => A (P)*
	|	A
	;

ANTLR generates a decision of the following form inside the (..)* of the predicate:

if ((LA(1)==P) && (LA(2)==B)) {
    match(P);
    match(B);
}
else if ((LA(1)==P) && (true)) {
    match(P);
}
else {
    break _loop4;
}

This computation works in all grammar types.

ANTLR Meta-Language Grammar

See antlr/antlr.g for the grammar that describes ANTLR input grammar syntax in ANTLR meta-language itself.

Version: $Id: //depot/code/org.antlr/release/antlr-2.7.5/doc/metalang.html#1 $ nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/optional.gif000066400000000000000000000037421161462365500230650ustar00rootroot00000000000000GIF89a,@H*\ȰÇ#Jhň,X`Ǐ ;:D@KL0(G>T$K5mVL3B:?QQH*]%̡7J**DVgfz+ןBD#Q8דFߎE5,ڢ0ҝ('ϲL Nt%ƽ|+.Wq6 9m冔/' ֲX3I4l juiΨ)l"u xp`3چ6U'jzg1 ɕIyWs^_w|ɧ}`m}"tf4y6?Wm8XeXcuݥc"BndF"L6Pgm }ҕcVQ9v!R#aGd)C|G_NQߖy)㔘yHhyՠd(_QFZHp8j.ΕZH{⹢:)-)ckb*蛖:'gtRʠFמ'L:k=r.llB,R[-b۪r[UnΪi΅:q%+حM[\ziH0>.q'^ep pm\s.I(lS}>50lrƊ~,"# nZ飯,4l; 3󢸍9)zEmJ&lKuR]-&v\Kc=٦K6*=I+ͮi%ȚN- oG.yr+3QNS옲њ^ʪ~-{ȶɰ>leov+镵V9i cb[pO]޷ 5>߽{7|𻾷UkDך|i9ԯ|JH%/} ĿvBQH6P(D iGT!Xg)P|'졓~e+ ANTLR Options

Options

File, Grammar, and Rule Options

Rather than have the programmer specify a bunch of command-line arguments to the parser generator, an options section within the grammar itself serves this purpose. This solution is preferrable because it associates the required options with the grammar rather than ANTLR invocation. The section is preceded by the options keyword and contains a series of option/value assignments surrounded by curly braces such as:


options {
   k = 2;
   tokenVocbaulary = IDL;
   defaultErrorHandler = false;
}

The options section for an entire (.g) file, if specified, immediately follows the (optional) file header:

header { package X; }
options {language="FOO";}

The options section for a grammar, if specified, must immediately follow the ';' of the class specifier:

class MyParser extends Parser;
options { k=2; }

The options section for a rule, if specified, must immediately follow the rule name:

myrule[args] returns [retval]
   options { defaultErrorHandler=false; }
   :   // body of rule...
   ;    

The option names are not keywords in ANTLR, but rather are entries in a symbol table examined by ANTLR. The scope of option names is limited to the options section; identifiers within your grammar may overlap with these symbols.

The only ANTLR options not specified in the options section are things that do not vary with the grammar, but rather than invocation of ANTLR itself. The best example is debugging information. Typically, the programmer will want a makefile to change an ANTLR flag indicating a debug or release build.

Options supported in ANTLR

Key for the type column: F=file, G=grammar, R=rule, L=lexer, S=subrule, C=C++ only.

Symbol Type Description
language F Set the generated language
k G Set the lookahead depth
importVocab G Initial grammar vocabulary
exportVocab G Vocabulary exported from grammar
testLiterals LG,LR Generate literal-testing code
defaultErrorHandler G,R Control default exception-handling
greedy S False implies you want subrule loop, (..)* and (..)+, to exit when it sees lookahead consistent with what follows the loop.
codeGenMakeSwitchThreshold G Control code generation
codeGenBitsetTestThreshold G Control code generation
buildAST G Set automatic AST construction in Parser (transform mode in Tree-Parser)
analyzerDebug G Spit out lots of debugging information while performing grammar analysis.
codeGenDebug G Spit out lots of debugging information while doing code generation.
ASTLabelType G Specify the type of all user-defined labels, overrides default of AST.
charVocabulary LG Set the lexer character vocabulary
interactive G Both the lexer and the parser have an interactive option, which defaults to "false". See the parser speed section above.
caseSensitive LG Case is ignored when comparing against character and string literals in the lexer. The case of the input stream is maintained when stored in the token objects.
ignore LR Specify a lexer rule to use as whitespace between lexical rule atomic elements (chars, strings, and rule references). The grammar analysis and, hence, the lookhaead sets are aware of the whitespace references. This is a lexer rule option.
paraphrase LR An easy way to specify a string to use in place of the token name during error processing.
caseSensitiveLiterals LG Case is ignored when comparing tokens against the listerals table.
classHeaderPrefix G Replace the usual class prefix ("public" in Java) for the enclosing class definition.
classHeaderSuffix G Append a string to the enclosing class definition. In Java, this amounts to a comma-separated list of interfaces that your lexer, parser, or tree walker must implement.
mangleLiteralPrefix F Sets the prefix for the token type definitions of literals rather than using the default of "TOKEN_".
warnWhenFollowAmbig S Warnings will be printed when the lookahead set of what follows a subrule containing an empty alternative conflicts with a subrule alternative or when the implicit exit branch of a closure loop conflicts with an alternative.  The default is true.
generateAmbigWarnings S When true, no ambiguity/nondeterminism warning is generated for the decision associated with the subrule.  Use this very carefully--you may change the subrule and miss an ambiguity because of the option.  Make very sure that the ambiguity you mask is handled properly by ANTLR.  ANTLR-generated parsers resolve ambiguous decisions by consuming input as soon as possible (or by choosing the alternative listed first).

See the Java and HTML grammars for proper use of this option.  A comment should be supplied for each use indicating why it is ok to shut off the warning.

filter LG When true, the lexer ignores any input not exactly matching one of the nonprotected lexer rules.  When set to a rule name, the filter option using the rule to parse input characters between valid tokens or those tokens of interest.
namespace FGC When set, all the C++ code generated is wrapped in the namespace mentioned here.
namespaceStd FGC When set, the ANTLR_USE_NAMESPACE(std) macros in the generated C++ code are replaced by this value. This is a cosmetic option that only makes the code more readable. It does not replace this macro in the support C++ files. Note: use this option directly after setting the language to C++.
namespaceAntlr FGC When set, the ANTLR_USE_NAMESPACE(antlr) macros in the generated C++ code are replaced by this value. This is a cosmetic option that only makes the code more readable. It does not replace this macro in the support C++ files. Note: use this option directly after setting the language to C++.
genHashLines FGC Boolean toggle, when set to 'true' #line <linenumber> "filename" lines are inserted in the generated code so compiler errors/warnings refer the .g files.
noConstructors FGLC Boolean toggle, when set to 'true' the default constructors for the generated lexer/parser/treewalker are omitted. The user then has the option to specify them himself (with extra initializers etc.)

language: Setting the generated language

ANTLR supports multiple, installable code generators. Any code-generator conforming to the ANTLR specification may be invoked via the language option. The default language is "Java", but "Cpp" and "CSharp" are also supported.   The language option is specified at the file-level, for example:

header { package zparse; }
options { language="Java"; }
... classes follow ...    

k: Setting the lookahead depth

You may set the lookahead depth for any grammar (parser, lexer, or tree-walker), by using the k= option:

class MyLexer extends Lexer;
options { k=3; }
...

Setting the lookahead depth changes the maximum number of tokens that will be examined to select alternative productions, and test for exit conditions of the EBNF constructs (...)?, (...)+, and (...)*. The lookahead analysis is linear approximate (as opposed to full LL(k) ). This is a bit involved to explain in detail, but consider this example with k=2:

r :  ( A B | B A )
  |  A A
  ;

Full LL(k) analysis would resolve the ambiguity and produce a lookahead test for the first alternate like:

if ( (LA(1)==A && LA(2)==B) || (LA(1)==B && LA(2)==A) )

However, linear approximate analysis would logically OR the lookahead sets at each depth, resulting in a test like:

if ( (LA(1)==A || LA(1)==B) && (LA(2)==A || LA(2)==B) )

Which is ambiguous with the second alternate for {A,A}. Because of this, setting the lookahead depth very high tends to yield diminishing returns in most cases, because the lookahead sets at large depths will include almost everything.

importVocab: Initial Grammar Vocabulary

[See the documentation on vocabularies for more information]

To specify an initial vocabulary (tokens, literals, and token types), use the importVocab grammar option.

class MyParser extends Parser;
options {
   importVocab=V;
}

ANTLR will look for VTokenTypes.txt in the current directory and preload the token manager for MyParser with the enclosed information.

This option is useful, for example, if you create an external lexer and want to connect it to an ANTLR parser. Conversely, you may create an external parser and wish to use the token set with an ANTLR lexer. Finally, you may find it more convenient to place your grammars in separate files, especially if you have multiple tree-walkers that do not add any literals to the token set.

The vocabulary file has an identifier on the first line that names the token vocabulary that is followed by lines of the form ID=value or "literal"=value. For example:

ANTLR // vocabulary name
"header"=3
ACTION=4
COLON=5
SEMI=6
...

A file of this form is automatically generated by ANTLR for each grammar.

Note: you must take care to run ANTLR on the vocabulay-generating grammar files before you run ANTLR on the vocabulary-consuming grammar files.

exportVocab: Naming Export Vocabulary

[See the documentation on vocabularies for more information]

The vocabulary of a grammar is the union of the set of tokens provided by an importVocab option and the set of tokens and literals defined in the grammar.  ANTLR exports a vocabulary for each grammar whose default name is the same as the grammar.   So, the following grammar yields a vocabulary called P:

class P extends Parser;
a : A;

ANTLR generates files PTokenTypes.txt and PTokenTypes.java.

You can specify the name of the exported vocabulary with the exportVocab option.   The following grammar generates a vocabulary called V not P.

class P extends Parser;
options {
  exportVocab=V;
}
a : A;

All grammars in the same file witht the same vocabulary name contribute to the same vocabulary (and resulting files).  If the the grammars were in separate files, on the other hand, they would all overwrite the same file.  For example, the following parser and lexer grammars both may contribute literals and tokens to the MyTokens vocabulary.

class MyParser extends Parser;
options {
  exportVocab=MyTokens;
}
...

class MyLexer extends Lexer;
options {
  exportVocab=MyTokens;
}
... 

testLiterals: Generate literal-testing code

By default, ANTLR will generate code in all lexers to test each token against the literals table (the table generated for literal strings), and change the token type if it matches the table. However, you may suppress this code generation in the lexer by using a grammar option:

class L extends Lexer;
options { testLiterals=false; }
...

If you turn this option off for a lexer, you may re-enable it for specific rules. This is useful, for example, if all literals are keywords, which are special cases of ID:

ID
options { testLiterals=true; }
   : LETTER (LETTER | DIGIT)*
   ;

If you want to test only a portion of a token's text for a match in the literals table, explicitly test the substring within an action using method:

    public int testLiteralsTable(String text, int ttype) {...}

For example, you might want to test the literals table for just the tag word in an HTML word.

defaultErrorHandler: Controlling default exception-handling

By default, ANTLR will generate default exception handling code for a parser or tree-parser rule. The generated code will catch any parser exceptions, synchronize to the follow set of the rule, and return. This is simple and often useful error-handling scheme, but it is not very sophisticated. Eventually, you will want to install your own exepttion handlers. ANTLR will automatically turn off generation of default exception handling for rule where an exception handler is specified. You may also explicitly control generation of default exception handling on a per-grammar or per-rule basis. For example, this will turn off default error-handing for the entire grammar, but turn it back on for rule "r":

class P extends Parser;
options {defaultErrorHandler=false;}

r
options {defaultErrorHandler=true;}
: A B C;

For more information on exception handling in the lexer, go here.

codeGenMakeSwitchThreshold: controlling code generation

ANTLR will optimize lookahead tests by generating a switch statement instead of a series of if/else tests for rules containing a sufficiently large number of alternates whose lookahead is strictly LL(1). The option codeGenMakeSwitchThreshold controls this test. You may want to change this to control optimization of the parser. You may also want to disable it entirely for debugging purposes, by setting it to a large number:

class P extends Parser;
options { codeGenMakeSwitchThreshold=999; }
...

codeGenBitsetTestThreshold: controlling code generation

ANTLR will optimize lookahead tests by generating a bitset test instead of an if statement, for very complex lookahead sets. The option codeGenBitsetTestThreshold controls this test. You may want to change this to control optimization of the parser:

class P extends Parser;
// make bitset if test involves five or more terms
options { codeGenBitsetTestThreshold=5; }
...

You may also want to disable it entirely for debugging purposes, by setting it to a large number:

class P extends Parser;
options { codeGenBitsetTestThreshold=999; }
...      

buildAST: Automatic AST construction

In a Parser, you can tell ANTLR to generate code to construct ASTs corresponding to the structure of the recognized syntax. The option, if set to true, will cause ANTLR to generate AST-building code. With this option set, you can then use all of the AST-building syntax and support methods.

In a Tree-Parser, this option turns on "transform mode", which means an output AST will be generated that is a transformation of the input AST. In a tree-walker, the default action of buildAST is to generate a copy of the portion of the input AST that is walked. Tree-transformation is almost identical to building an AST in a Parser, except that the input is an AST, not a stream of tokens.

ASTLabelType: Setting label type

When you must define your own AST node type, your actions within the grammar will require lots of downcasting from AST (the default type of any user-defined label) to your tree node type; e.g.,

decl : d:ID {MyAST t=(MyAST)#d;}
     ;

This makes your code a pain to type in and hard to read.  To avoid this, use the grammar option ASTLabelType to have ANTLR automatically do casts and define labels of the appropriate type.

class ExprParser extends Parser;

options {
  buildAST=true;
  ASTLabelType = "MyAST";
}

expr : a:term ;

The type of #a within an action is MyAST not AST.

charVocabulary: Setting the lexer character vocabulary

ANTLR processes Unicode. Because of this this, ANTLR cannot make any assumptions about the character set in use, else it would wind up generating huge lexers. Instead ANTLR assumes that the character literals, string literals, and character ranges used in the lexer constitute the entire character set of interest. For example, in this lexer:

class L extends Lexer;
A : 'a';
B : 'b';
DIGIT : '0' .. '9';

The implied character set is { 'a', 'b', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' }. This can produce unexpected results if you assume that the normal ASCII character set is always used. For example, in:

class L extends Lexer;
A : 'a';
B : 'b';
DIGIT : '0' .. '9';
STRING: '"' (~'"")* '"';

The lexer rule STRING will only match strings containing 'a', 'b' and the digits, which is usually not what you want. To control the character set used by the lexer, use the charVocbaulary option. This example will use a general eight-bit character set.

class L extends Lexer;
options { charVocabulary = '\3'..'\377'; }
...

This example uses the ASCII character set in conjunction with some values from the extended Unicode character set:


class L extends Lexer;
options {
	charVocabulary = '\3'..'\377' | '\u1000'..'\u1fff';
}
...

warnWhenFollowAmbig

[Warning: you should know what you are doing before you use this option.  I deliberately made it a pain to shut warnings off (rather than a single character operator) so you would not just start turning off all the warnings.  I thought for long time before implementing this exact mechanism.  I recommend a comment in front of any use of this option that explains why it is ok to hush the warning.]

This subrule option is true by default and controls the generation of nondeterminism (ambiguity) warnings when comparing the FOLLOW lookahead sets for any subrule with an empty alternative and any closure subrule such as (..)+ and (...)*.  For example, the following simple rule has a nondeterministic subrule, which arises from a language ambiguity that you could attach an ELSE clause to the most recent IF or to an outer IF because the construct can nest.

stat	:	"if" expr "then" stat ("else" stat)?
	|	ID ASSIGN expr SEMI
	;

Because the language is ambiguous, the context-free grammar must be ambiguous and the resulting parser nondeterministic (in theory).  However, being the practical language folks that we are, we all know you can trivially solve this problem by having ANTLR resolve conflicts by consuming input as soon as possible; I have yet to see a case where this was the wrong thing to do, by the way.  This option, when set to false, merely informs ANTLR that it has made the correct assumption and can shut off an ambiguity related to this subrule and an empty alternative or exit path.  Here is a version of the rule that does not yield a warning message:

 

stat	:	"if" expr "then" stat
		(
		    // standard if-then-else ambig
		    options {
		        warnWhenFollowAmbig=false;
		    }
		:	"else" stat
		)?
	|	ID ASSIGN expr SEMI
	;

One important note: This option does not affect non-empty alternatives.  For example, you will still get a warning for the following subrule between alts 1 and 3 (upon lookahead A):

(
	options {
		warnWhenFollowAmbig=false;
	}
:	A
|	B
|	A
)

Further, this option is insensitive to lookahead.  Only completely empty alternatives count as candidate alternatives for hushing warnings.  So, at k=2, just because ANTLR can see past alternatives with single tokens, you still can get warnings.

Command Line Options

-o outputDir specify output directory where all output generated.
-glib supergrammarFile Specify a file with a supergrammar for the generated file.
-debug launch the ParseView debugger upon parser invocation.  Unless you have downloaded and unzipped the debugger over the top of the standard ANTLR distribution, the code emanating from ANTLR with this option will not compile (likewise for Swing).
-html generate a HTML file from your grammar without actions and so on.   This is only a prototype, but seems to do something useful.   It only works for parsers, not lexers or tree parsers.
-docbook generate a docbook SGML file from your grammar without actions and so on.   This is only a prototype, but seems to do something useful.   It only works for parsers, not lexers or tree parsers.
-diagnostic generate a text file from your grammar with a lot of debugging info.
-trace have all rules call traceIn/traceOut.
-traceParser have parser rules call traceIn/traceOut.
-traceLexer have lexer rules call traceIn/traceOut.
-traceTreeParser have tree walker rules call traceIn/traceOut.
-h|-help|--help help message.

Version: $Id: //depot/code/org.antlr/release/antlr-2.7.5/doc/options.html#1 $ nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/posclosure.gif000066400000000000000000000037571161462365500234440ustar00rootroot00000000000000GIF89a,@H*\ȰÇ#JHŋ3"DDŽ? )dG&3Lrʖ_F9K|M+_ T)HuzPUn4*ѯEÊKVWSbu-˶nƝ[̜Pn ՁCyV-_zCXM~jE=jƞc<-hמ:5dA[M۵=~Wan{ֽϞj{ΙKNQgWY=u#sgn[4l-7ih l;KueqY'|pWi}Wr=WX-I(X>!uNI by2fn<7X{@2SD7#|-w$VUbWPJ@!zKf\vZ`)cjދj`u gAXx:W'E yyzXm1nDqB~^o"Fh _~&u}Q Vۢ4i[YcN8!^igsJ*g~Ҋk9I&qj9ilU9du'lrh Դ) iSj)m쬽2jdjY{k/l)|K _}%Y/G DmU [O*0¨rŀ2}NfVSY.<X@=>'L7PG-5 #V#5mfZ\5U} f{j;}}yM,3ƣ}{7[,g$m+A1yqLo%r盇mx#v-Y %skXH덑 #Wg8?y* t~ HJ҉b. ưtI&PA C* TCB]\ :urjpl%qiK".|q@\{6ۙnf9SSHГLѦ$ŐB;nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/python-runtime.html000066400000000000000000000540671161462365500244470ustar00rootroot00000000000000 Notes for using the ANTLR Python Code Generator

Python Code Generator for ANTLR 2.7.5

With the release of ANTLR 2.7.5, you can now generate your Lexers, Parsers and TreeParsers in Python. This feature extends the benefits of ANTLR's predicated-LL(k) parsing technology to the Python language and platform.

To be able to build and use the Python language Lexers, Parsers and TreeParsers, you will need to have the ANTLR Python runtime library installed in your Python path. The Python runtime model is based on the existing runtime model for Java and is thus immediately familiar. The Python runtime and the Java runtime are very similar although there a number of subtle (and not so subtle) differences. Some of these result from differences in the respective runtime environments.

ANTLR Python support was contributed (and is to be maintained) by Wolfgang Haefelinger and Marq Kole.

Building the ANTLR Python Runtime

The ANTLR Python runtime source and build files are completely integrated in the ANTLR build process.The ANTLR runtime support module for Python is located in the lib/python subdirectory of the ANTLR distribution. Installation of the Python runtime support is enabled automatically if Python can be found on your system by the configure script.

With Python support enabled the current distribution will look for the presence of a python executable of version 2.2 or higher. If it has found such a beast, it will generate and install the ANTLR Python runtime as part of the overall ANTLR building and installation process.

If the python distribution you are using is at an unusual location, perhaps because you are using a local installation instead of a system-wide one, you can provide the location of that python executable using the --with-python=<path> option for the configure script, for instance:

./configure --with-python=$HOME/bin/python2.3

Also, if the python executable is at a regular location, but has a name that differs from "python", you can specify the correct name through the --with-python=<path>, as shown above, or through environment variable $PYTHON

PYTHON=python2.3
export PYTHON
./configure

All the example grammars for the ANTLR Python runtime are built when ANTLR itself is built. They can be run in one go by running make test in the same directory where you ran the configure script in the ANTLR distribution. So after you've run configure you can do:

# Build ANTLR and all examples
make
# Run them
make test
# Install everything
make install

Note that make install will not add the ANTLR Python runtime (i.e. antlr.py) to your Python installation but rather install antlr.py in ${prefix}/lib. To be able to use antlr.py you would need to adjust Python's sys.path.

However, there a script is provided that let's you easily add antlr.py as module to your Python installation. After installation just run

${prefix}/sbin/pyantlr.sh install

Note that usually you need to be superuser in order to succeed. Also note that you can run this command later at any time again, for example, if you have a second Python installation etc. Just make sure that python is in your $PATH when running pyantlr.sh.

Note further that you can also do this to install ANTLR Python runtime immediatly after having called ./configure:

scripts/pyantlr.sh install

Specifying Code Generation

You can instruct ANTLR to generate your Lexers, Parsers and TreeParsers using the Python code generator by adding the following entry to the global options section at the beginning of your grammar file.

{
    language="Python";
}

After that things are pretty much the same as in the default java code generation mode. See the examples in examples/python for some illustrations.

One particular issue that is worth mentioning is the handling of comments in ANTLR Python. Java, C++, and C# all use the same lexical structures to define comments: // for single-line comments, and /* ... */ for block comments. Unfortunately, Python does not handle comments this way. It only knows about single-line comments, and these start off with a # symbol.

Normally, all comments outside of actions are actually comments in the ANTLR input language. These comments, and that is both block comments and single-line comments are translated into Python single-line comments.

Secondly, all comments inside actions should be comments in the target language, Python in this case. Unfortunately, if the actions contain ANTLR actions, such as $getText, the code generator seems to choke on Python comments as the # sign is also used in tree construction. The solution is to use Java/C++-style comments in all actions; these will be translated into Python comments by the ANTLR as it checks these actions for the presence of predefined action symbols such as $getText.

So, as a general issue: all comments in an ANTLR grammar for the Python target should be in Java/C++ style, not in Python style.

Python-Specific ANTLR Sections

  • header - specify additional import directives

    You can instruct the ANTLR Python code generator to import additional Python packages in your generated Lexer/Parser/TreeParser by adding code to the header section which must be the first section at the beginning of your ANTLR grammar file, apart from any other header sections.

    header {
       import os, sys
    }
    
  • header "__init__" - specify additional code in the __init__ method

    You can instruct the ANTLR Python code generator to include additional Python code in your generated Lexer/Parser/TreeParser by adding code to the init header section which must be the first section at the beginning of your ANTLR grammar file, apart from any other header sections. The code in the header is appended to the end of the __init__ method.

    header "__init__" {
       self.message = "This is the default message"
    }
    

    If your grammar file contains both a Lexer and a Parser (or any other multiple of definitions), the code in the __init__ header will be reproduced in the __init__ methods of all of these definitions without change. If you really want to update only one of the definitions, for instance, the __init__ method of the Lexer class you are creating, use

    header "<LexerGrammar>.__init__" {
       self.message = "This is the default message"
    }
    

    where <LexerGrammar> is the name of the Lexer grammar. The same construction also works with the Parsers and TreeParsers, of course.

    In the case both a generic init header and a grammar-specific header are present, the grammar-specific one will override the generic one.

  • header "__main__" - specify additional code after the class definition

    You can instruct the ANTLR Python code generator to add additional Python code at the end of your generated Lexer/Parser/TreeParser, so after the class definition itself by adding code to the __main__ header section which must be the first section at the beginning of your ANTLR grammar file, apart from any other header sections.

    header "__main__" {
        print "You cannot execute this file!"
    }
    

    If your grammar file contains both a Lexer and a Parser (or any other multiple of definitions), the code in the __main__ header will be reproduced at the end of all of the generated class definitions. If you really want to add code after only one of the definitions, for instance, after the Lexer class, use

    header "<LexerGrammar>.__main__" {
        print "You cannot execute this file!"
    }
    

    where <LexerGrammar> is the name of the Lexer grammar. The same construction also works with the Parsers and TreeParsers, of course.

    In the case both a generic init header and a grammar-specific header are present, the grammar-specific one will override the generic one. If no __main__ headers are present and the grammar is for a Lexer, automated test code for that lexer is automatically added at the end of the generated module. This can be prevented by providing an empty __main__ header. In the latter case it is good practise to provide a comment explaining why an empty header is present.

    header "<LexerGrammar>.__main__" {
        // Empty main header to prevent automatic test code from being added
        // to the generated lexer module.
    }
    

    This automated test code can be executed by running Python with the generated lexer file (<LexerGrammar>.pywhere <LexerGrammar> is the name of the Lexer grammar) and providing some test input on stdin:

    python <LexerGrammar>.py < test.in
    

Python-Specific ANTLR Options

  • className - change the default name of the generated class

    options {
        className="Scanner";
    }
    

    If you are using the className option conjunction with the Python specific header options, there will be no collisions. The className option changes the class name, while the main headers require the use of the grammar name which will become the module name after code generation.

    header "ParrotSketch.init" {
        self.state = JohnCleese.select("dead", "pushing up daisies", \
                                       "no longer", "in Parrot Heaven")
        print "This parrot is", self.state
    }
    
    class ParrotSketch extends Lexer;
    
    options {
        className="Scanner";
    }
    

A Template Python ANTLR Grammar File

As the handling of modules &emdash; packages in Java speak &emdash; in Python differs from that in Java, the current approach in ANTLR to call both the file and the class they contain after the name of the grammar is kind of awkward. Instead, a different approach is chosen that better reflects the handling of modules in Python. The name of the generated Python file is still derived from the name of the grammar, but the name of the class is fixed to the particular kind of grammar. A lexer grammar will be used to generate a class Lexer; a parser grammar will be used to generate a class Parser; and a treeparser grammar will be used to generate a class Walker.

header {
    // gets inserted in the Python source file before any generated
    // declarations
    ...
}
header "__init__" {
    // gets inserted in the __init__ method of each of the generated Python
    // classes
    ...
}
header "MyParser.__init__" {
    // gets inserted in the __init__ method of the generated Python class
    // for the MyParser grammar
    ...
}
header "__main__" {
    // gets inserted at the end of each of the generated Python files in an
    // indented section preceeded by the conditional:
    // if __name__ == "__main__":
    ...
}
header "MyLexer.__init__" {
    // gets inserted at the end of the generated Python file for the MyLexer
    // grammar in an indented section preceeded by the conditional:
    // if __name__ == "__main__":
    // and preventing the insertion of automatic test code in the same place.
    ...
}
options {
    language  = "Python";
}
{
    // global code stuff that will be included in the 'MyParser.py' source
    // file just before the 'Parser' class below
    ...
}
class MyParser extends Parser;
options {
   exportVocab=My;
}
{
   // additional methods and members for the generated 'Parser' class
   ...
}
... generated RULES go here ...
{
   // global code stuff that will be included in the 'MyLexer' source file
   // just before the 'Lexer' class below
   ...
}
class MyLexer extends Lexer;
options {
   exportVocab=My;
}
{
   // additional methods and members for the generated 'Lexer' class
   ...
}
... generated RULES go here ...
{
   // global code stuff that will be included in the 'MyTreeParser' source
   // file just before the 'Walker' class below
   ...
}
class MyTreeParser extends TreeParser;
options {
   exportVocab=My;
}
{
   // additional methods and members for the generated 'Walker' class
   ...
}
... generated RULES go here ...

Version number in parentheses shows the tool version used to develop and test. It may work with older versions as well. Python 2.2 or better is required as some recent Python features (like super() for example) are being used.

More notes on using ANTLR Python

  • The API of the generated lexers, parsers, and treeparsers is supposed to be similar to the Java ones. However, calling a lexer is somewhat simplified:

    ### class "calcLexer extends Lexer" will generate python
    ### module "calcLexer" with class "Lexer". 
    import calcLexer
    ### read from stdin ..
    L = calcLexer.Lexer() 
    ### read from file "test.in" ..
    L = calcLexer.Lexer("test.in")
    ### open a file and read from it ..
    f = file("test.in", "r")
    L = calcLexer.Lexer(f)
    ### this works of course as well
    import sys
    L = calcLexer.Lexer(sys.stdin)
    ### use a shared input state
    L1 = calcLexer.Lexer(...)
    state = L1.inputState
    L2 = calcLexer.Lexer(state)
    
  • The loop for the lexer to retrieve token by token can be written as:

    lexer = calcLexer.Lexer()          ### create a lexer for calculator
    for token in lexer:
        ## do something with token
        print token
    
    or even:
    for token in calcLexer.Lexer():    ### create a lexer for calculator
        ## do something with token
        print token
    

    As an iterator is available for all TokenStreams, you can apply the same technique with a TokenStreamSelector.

  • However, writing this particular lexer loop is rarely necessary as it is generated by default in each generated lexer. Just run:

    python calcLexer.py < calc.in
    
    to test the generated lexer.
  • Symbolic token number, table of literals bitsets and bitset data functions are generated on file (module) scope instead of class scope. For example:

    import calcLexer      # import calc lexer module
      
    calcLexer.EOF_TYPE    # prints 1
    calcLexer.literals    # { ';': 11, 'end': 12, 'begin': 10 }
    
  • Comments in action should be in Java/C++ formats, ie. // and /* ... */ are valid comments. However, make sure that you put a comment before or after a statement, but not within. For example, this will not work:

    x = /* one */ 1
    

    The reason is that Python only supports single-line comments. Such a Python comment skips everything till end-of-line. Therefore in the translation of the comment a newline will be introduced on reaching */. The code above would result in the following Python code in the generated file:

    x = # one
    1
    

    which is probably not what you want.

  • The Lexer actions $newline, $nl and $skip have been introduced as language independent shortcuts for calling self.newline() ($newline, $nl) and _ttype = SKIP ($skip).
  • In Python arguments to function and method calls do not have a declared type. Also, functionns and methdos do not have to declare a return type. If you want to pass a value to a rule in your grammar, you can do so by providing simply the name of a variable.

    ident [symtable]
        :   ( 'a'..'z' | '0'..'9' )+
        ;
    

    Similarly, is you want a rule to pass a return value, you do not have to provide a type either. It is possible to provide a default value.

    sign returns [isPos = False]
        :    '-' { /* default value is OK */ }
        |    '+' { isPos = True }
        ;
    
  • The __init__ method of the generated Lexer, Parser, or TreeParser has the following heading:

    def __init__(self, *args, **kwargs):
        ...
    

    So if you need to pass special arguments to your generated class, you can use the **kwargs to check for a particular keyword argument, irrespective of any non-keyword arguments that you did provide. So if you have a TokenStreamSelector that you want to access locally, you can pass it to the Lexer in the following call:

    MySpecialLexer.Lexer(sys.stdin, selector=TokenStreamSelector())
    

    while in the __init__ header of this particular grammar you can specify the handling of the selector keyword argument in the following way:

    header "MyParser.__init__" {
        self.selector = None
        if kwargs.has_key("selector"):
            self.selector = kwargs["selector"]
            assert(isinstance(self.selector, TokenStreamSelector))
    
    }
    
  • Because of limitations in the lexer of the ANTLR compiler generator itself, you cannot use single quoted strings of more than one character in your Python code.
    So if you use a Python string like 'wink, wink, nudge, nudge' in one of your actions, ANTLR will give a parse error when you try to compile this grammar. Instead you should use double quotes: "wink, wink, nudge, nudge".

  • Unicode is supported but it's easy to run into errors if your terminal(output device) is not able to handle unicode chars.
    Here are some rules when using Unicode input:

    1. You need to wrap your input stream by a stream reader which translates bytes into unicode chars. This requires usually knowledge about your input's encoding. Assume for example that your input is 'latin1', you would do this:
      ### replace  stdin  with  a  wrapper that spits out
      ### unicode chars.       
      sys.stdin = codecs.lookup('latin1')[-2](sys.stdin)
      
      Here reading from stdin gets wrapped.
    2. When printing tokens etc containing Unicode chars it appears to be best to translate explicit to a unicode string before printing. Consider:
      for token in unicode_l.Lexer() :
          print unicode(token)   ## explict cast
      
      This explicit cast appears to be a bug in Python found during development (discussion still in progress).
nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/runtime.html000066400000000000000000001222501161462365500231160ustar00rootroot00000000000000 ANTLR Specification: Run-time

Java Runtime Model


Programmer's Interface

In this section, we describe what ANTLR generates after reading your grammar file and how to use that output to parse input. The classes from which your lexer, token, and parser classes are derived are provided as well.

What ANTLR generates

ANTLR generates the following types of files, where MyParser, MyLexer, and MyTreeParser are names of grammar classes specified in the grammar file. You may have an arbitrary number of parsers, lexers, and tree-parsers per grammar file; a separate class file will be generated for each. In addition, token type files will be generated containing the token vocabularies used in the parsers and lexers. One or more token vocabularies may be defined in a grammar file, and shared between different grammars. For example, given the grammar file:

class MyParser extends Parser;
options {
  exportVocab=My;
}
... rules ...

class MyLexer extends Lexer;
options {
  exportVocab=My;
}
... rules ...

class MyTreeParser extends TreeParser;
options {
  exportVocab=My;
}
... rules ...

The following files will be generated:

  • MyParser.java. The parser with member methods for the parser rules.
  • MyLexer.java. The lexer with the member methods for the lexical rules.
  • MyTreeParser.java. The tree-parser with the member methods for the tree-parser rules.
  • MyTokenTypes.java. An interface containing all of the token types defined by your parsers and lexers using the exported vocabulary named My.
  • MyTokenTypes.txt. A text file containing all of the token types, literals, and paraphrases defined by parsers and lexers contributing vocabulary My.

The programmer uses the classes by referring to them:

  1. Create a lexical analyzer. The constructor with no arguments implies that you want to read from standard input.
  2. Create a parser and attach it to the lexer (or other TokenStream).
  3. Call one of the methods in the parser to begin parsing.

If your parser generates an AST, then get the AST value, create a tree-parser, and invoke one of the tree-parser rules using the AST.

MyLexer lex = new MyLexer();
MyParser p =
  new MyParser(lex,user-defined-args-if-any);
p.start-rule();
// and, if you are tree parsing the result...
MyTreeParser tp = new MyTreeParser();
tp.start-rule(p.getAST());

You can also specify the name of the token and/or AST objects that you want the lexer/parser to create. Java's support of dynamic programming makes this quite painless:

MyLexer lex = new MyLexer();
lex.setTokenObjectClass("mypackage.MyToken");
  // defaults to "antlr.CommonToken"
...
parser.setASTNodeClass("mypackage.MyASTNode");
  // defaults to "antlr.CommonAST"

Make sure you give a fully-qualified class name.

The lexer and parser can cause IOExceptions as well as RecognitionExceptions, which you must catch:

  CalcLexer lexer =
    new CalcLexer(new DataInputStream(System.in));
  CalcParser parser = new CalcParser(lexer);
  // Parse the input expression
  try {
    parser.expr();
  }
  catch (IOException io) {
    System.err.println("IOException");
  }
  catch(RecognitionException e) {
    System.err.println("exception: "+e);
  }

Multiple Lexers/Parsers With Shared Input State

Occasionally, you will want two parsers or two lexers to share input state; that is, you will want them to pull input from the same source token stream or character stream.   The section on multiple lexer "states" describes such a situation.

ANTLR factors the input variables such as line number, guessing state, input stream, etc... into a separate object so that another lexer or parser could same that state.  The LexerSharedInputState and ParserSharedInputState embody this factoring.   Method getInputState() can be used on either CharScanner or Parser objects.  Here is how to construct two lexers sharing the same input stream:

// create Java lexer
JavaLexer mainLexer = new JavaLexer(input);
// create javadoc lexer; attach to shared
// input state of java lexer
JavaDocLexer doclexer =
  new JavaDocLexer(mainLexer.getInputState());

Parsers with shared input state can be created similarly:

JavaDocParser jdocparser =
  new JavaDocParser(getInputState());
jdocparser.content(); // go parse the comment

Sharing state is easy, but what happens upon exception during the execution of the "subparser"?  What about syntactic predicate execution?  It turns out that invoking a subparser with the same input state is exactly the same as calling another rule in the same parser as far as error handling and syntactic predicate guessing are concerned.  If the parser is guessing before the call to the subparser, the subparser must continue guessing, right?  Exceptions thrown inside the subparser must exit the subparser and return to enclosing erro handler or syntactic predicate handler.

Parser Implementation

Parser Class

ANTLR generates a parser class (an extension of LLkParser) that contains a method for every rule in your grammar. The general format looks like:

public class MyParser extends LLkParser
    implements MyLexerTokenTypes
{
  protected P(TokenBuffer tokenBuf, int k) {
    super(tokenBuf,k);
    tokenNames = _tokenNames;
  }
  public P(TokenBuffer tokenBuf) {  
    this(tokenBuf,1);
  }
  protected P(TokenStream lexer, int k) {
    super(lexer,k);
    tokenNames = _tokenNames;       
  }
  public P(TokenStream lexer) {  
    this(lexer,1);
  }
  public P(ParserSharedInputState state) {
    super(state,1);
    tokenNames = _tokenNames;
  }
  ...
  // add your own constructors here...
  rule-definitions
}
  

Parser Methods

ANTLR generates recursive-descent parsers, therefore, every rule in the grammar will result in a method that applies the specified grammatical structure to the input token stream. The general form of a parser method looks like:

public void rule()
  throws RecognitionException,
         TokenStreamException
{
  init-action-if-present
  if ( lookahead-predicts-production-1 ) {
     code-to-match-production-1
  }
  else if ( lookahead-predicts-production-2 ) {
     code-to-match-production-2
  }
  ...
  else if ( lookahead-predicts-production-n ) {
     code-to-match-production-n
  }
  else {
    // syntax error
    throw new NoViableAltException(LT(1));
  }
}
  This code results from a rule of the form:  
rule:   production-1
    |   production-2
   ...
    |   production-n
    ;
  

If you have specified arguments and a return type for the rule, the method header changes to:

/* generated from:
 *    rule(user-defined-args)
 *      returns return-type : ... ;
 */
public return-type rule(user-defined-args)
  throws RecognitionException,
         TokenStreamException
{
  ...
}
  

Token types are integers and we make heavy use of bit sets and range comparisons to avoid excessively-long test expressions.

EBNF Subrules

Subrules are like unlabeled rules, consequently, the code generated for an EBNF subrule mirrors that generated for a rule. The only difference is induced by the EBNF subrule operators that imply optionality or looping.

(...)? optional subrule. The only difference between the code generated for an optional subrule and a rule is that there is no default else-clause to throw an exception--the recognition continues on having ignored the optional subrule.

{
  init-action-if-present
  if ( lookahead-predicts-production-1 ) {
     code-to-match-production-1
  }
  else if ( lookahead-predicts-production-2 ) {
     code-to-match-production-2
  }
  ...
  else if ( lookahead-predicts-production-n ) {
     code-to-match-production-n
  }
}
  

Not testing the optional paths of optional blocks has the potential to delay the detection of syntax errors.

(...)* closure subrule. A closure subrule is like an optional looping subrule, therefore, we wrap the code for a simple subrule in a "forever" loop that exits whenever the lookahead is not consistent with any of the alternative productions.

{
  init-action-if-present
loop:
  do {
    if ( lookahead-predicts-production-1 ) {
       code-to-match-production-1
    }
    else if ( lookahead-predicts-production-2 ) {
       code-to-match-production-2
    }
    ...
    else if ( lookahead-predicts-production-n ) {
       code-to-match-production-n
    }
    else {
      break loop;
    }
  }
  while (true);
}
  

While there is no need to explicity test the lookahead for consistency with the exit path, the grammar analysis phase computes the lookahead of what follows the block. The lookahead of what follows much be disjoint from the lookahead of each alternative otherwise the loop will not know when to terminate. For example, consider the following subrule that is nondeterministic upon token A.

( A | B )* A
  

Upon A, should the loop continue or exit? One must also ask if the loop should even begin. Because you cannot answer these questions with only one symbol of lookahead, the decision is non-LL(1).

Not testing the exit paths of closure loops has the potential to delay the detection of syntax errors.

As a special case, a closure subrule with one alternative production results in:

{
  init-action-if-present
loop:
  while ( lookahead-predicts-production-1 ) {
       code-to-match-production-1
  }
}
   

This special case results in smaller, faster, and more readable code.

(...)+ positive closure subrule. A positive closure subrule is a loop around a series of production prediction tests like a closure subrule. However, we must guarantee that at least one iteration of the loop is done before proceeding to the construct beyond the subrule.

{
  int _cnt = 0;
  init-action-if-present
loop:
  do {
    if ( lookahead-predicts-production-1 ) {
       code-to-match-production-1
    }
    else if ( lookahead-predicts-production-2 ) {
       code-to-match-production-2
    }
    ...
    else if ( lookahead-predicts-production-n ) {
       code-to-match-production-n
    }
    else if ( _cnt>1 ) {
      // lookahead predicted nothing and we've
      // done an iteration
      break loop;
    }
    else {
      throw new NoViableAltException(LT(1));
    }
    _cnt++;  // track times through the loop
  }
  while (true);
}
  

While there is no need to explicity test the lookahead for consistency with the exit path, the grammar analysis phase computes the lookahead of what follows the block. The lookahead of what follows much be disjoint from the lookahead of each alternative otherwise the loop will not know when to terminate. For example, consider the following subrule that is nondeterministic upon token A.

( A | B )+ A
  

Upon A, should the loop continue or exit? Because you cannot answer this with only one symbol of lookahead, the decision is non-LL(1).

Not testing the exit paths of closure loops has the potential to delay the detection of syntax errors.

You might ask why we do not have a while loop that tests to see if the lookahead is consistent with any of the alternatives (rather than having series of tests inside the loop with a break). It turns out that we can generate smaller code for a series of tests than one big one. Moreover, the individual tests must be done anyway to distinguish between alternatives so a while condition would be redundant.

As a special case, if there is only one alternative, the following is generated:

{
  init-action-if-present
  do {
    code-to-match-production-1
  }
  while ( lookahead-predicts-production-1 );
}
  

Optimization. When there are a large (where large is user-definable) number of strictly LL(1) prediction alternatives, then a switch-statement can be used rather than a sequence of if-statements. The non-LL(1) cases are handled by generating the usual if-statements in the default case. For example:

switch ( LA(1) ) {
  case KEY_WHILE :
  case KEY_IF :
  case KEY_DO :
    statement();
    break;
  case KEY_INT :
  case KEY_FLOAT :
    declaration();
    break;
  default :
    // do whatever else-clause is appropriate
}
  

This optimization relies on the compiler building a more direct jump (via jump table or hash table) to the ith production matching code. This is also more readable and faster than a series of bit set membership tests.

Production Prediction

LL(1) prediction. Any LL(1) prediction test is a simple set membership test. If the set is a singleton set (a set with only one element), then an integer token type == comparison is done. If the set degree is greater than one, a bit set is created and the single input token type is tested for membership against that set. For example, consider the following rule:

a : A | b ;
b : B | C | D | E | F;
  

The lookahead that predicts production one is {A} and the lookahead that predicts production two is {B,C,D,E,F}. The following code would be generated by ANTLR for rule a (slightly cleaned up for clarity):

public void a() {
  if ( LA(1)==A ) {
    match(A);
  }
  else if (token_set1.member(LA(1))) {
    b();
  }
}
  

The prediction for the first production can be done with a simple integer comparison, but the second alternative uses a bit set membership test for speed, which you probably didn't recognize as testing LA(1) member {B,C,D,E,F}. The complexity threshold above which bitset-tests are generated is user-definable.

We use arrays of long ints (64 bits) to hold bit sets. The ith element of a bitset is stored in the word number i/64 and the bit position within that word is i % 64. The divide and modulo operations are extremely expensive and, but fortunately, a strength reduction can be done. Dividing by a power of two is the same as shifting right and modulo a power of two is the same as masking with that power minus one. All of these details are hidden inside the implementation of the BitSet class in the package antlr.collections.impl.

The various bit sets needed by ANTLR are created and initialized in the generated parser (or lexer) class.

Approximate LL(k) prediction. An extension of LL(1)...basically we do a series of up to k bit set tests rather than a single as we do in LL(1) prediction. Each decision will use a different amount of lookahead, with LL(1) being the dominant decision type.

Production Element Recognition

Token references. Token references are translated to:

match(token-type);
  

For example, a reference to token KEY_BEGIN results in:

match(KEY_BEGIN);
  

where KEY_BEGIN will be an integer constant defined in the MyParserTokenType interface generated by ANTLR.

String literal references. String literal references are references to automatically generated tokens to which ANTLR automatically assigns a token type (one for each unique string). String references are translated to:

match(T);
  

where T is the token type assigned by ANTLR to that token.

Character literal references. Referencing a character literal implies that the current rule is a lexical rule. Single characters, 't', are translated to:

match('t');
  

which can be manually inlined with:

if ( c=='t' ) consume();
else throw new MismatchedCharException(
               "mismatched char: '"+(char)c+"'");
   

if the method call proves slow (at the cost of space).

Wildcard references. In lexical rules, the wildcard is translated to:

consume();
  

which simply gets the next character of input without doing a test.

References to the wildcard in a parser rule results in the same thing except that the consume call will be with respect to the parser.

Not operator. When operating on a token, ~T is translated to:

matchNot(T);
 

When operating on a character literal, 't' is translated to:

matchNot('t');
  

Range operator. In parser rules, the range operator (T1..T2) is translated to:

matchRange(T1,T2);
   

In a lexical rule, the range operator for characters c1..c2 is translated to:

matchRange(c1,c2);
  

Labels. Element labels on atom references become Token references in parser rules and ints in lexical rules. For example, the parser rule:

a : id:ID {System.out.println("id is "+id);} ;
  would be translated to:  
public void a() {
  Token id = null;
  id = LT(1);
  match(ID);
  System.out.println("id is "+id);
}
  For lexical rules such as:  
ID : w:. {System.out.println("w is "+(char)w);};
  the following code would result:  
public void ID() {
  int w = 0;
  w = c;
  consume(); // match wildcard (anything)
  System.out.println("w is "+(char)w);
}
  

Labels on rule references result in AST references, when generating trees, of the form label_ast.

Rule references. Rule references become method calls. Arguments to rules become arguments to the invoked methods. Return values are assigned like Java assignments. Consider rule reference i=list[1] to rule:

list[int scope] returns int
    :   { return scope+3; }
    ;
  The rule reference would be translated to:  
i = list(1);
  

Semantic actions. Actions are translated verbatim to the output parser or lexer except for the translations required for AST generation and the following:

  • $FOLLOW(r): FOLLOW set name for rule r
  • $FIRST(r): FIRST set name for rule r

Omitting the rule argument implies you mean the current rule. The result type is a BitSet, which you can test via $FIRST(a).member(LBRACK) etc...

Here is a sample rule:

a : A {System.out.println($FIRST(a));} B
  exception
    catch [RecognitionException e] {    
        if ( $FOLLOW.member(SEMICOLON) ) {
        consumeUntil(SEMICOLON);
    }
    else {
        consume();
    }
    }
  ;
Results in
public final void a() throws RecognitionException, TokenStreamException {  
    try {
        match(A);
        System.out.println(_tokenSet_0);
        match(B);
    }
    catch (RecognitionException e) {
        if ( _tokenSet_1.member(SEMICOLON) ) {
            consumeUntil(SEMICOLON);
        }
        else {
            consume();
        }
    }
}

To add members to a lexer or parser class definition, add the class member definitions enclosed in {} immediately following the class specification, for example:

class MyParser;
{
   protected int i;
   public MyParser(TokenStream lexer,
        int aUsefulArgument) {
      i = aUsefulArgument;
   }
}
... rules ...

ANTLR collects everything inside the {...} and inserts it in the class definition before the rule-method definitions. When generating C++, this may have to be extended to allow actions after the rules due to the wacky ordering restrictions of C++.

Standard Classes

ANTLR constructs parser classes that are subclasses of the antlr.LLkParser class, which is a subclass of the antlr.Parser class. We summarize the more important members of these classes here. See Parser.java and LLkParser.java for details of the implementation.

public abstract class Parser {
   protected ParserSharedInputState inputState;
   protected ASTFactory ASTFactory;
   public abstract int LA(int i);
   public abstract Token LT(int i);
   public abstract void consume();
   public void consumeUntil(BitSet set) { ... }
   public void consumeUntil(int tokenType) { ... }
   public void match(int t)
      throws MismatchedTokenException { ... }
   public void matchNot(int t)
      throws MismatchedTokenException { ... }
   ...
}

public class LLkParser extends Parser {
   public LLkParser(TokenBuffer tokenBuf, int k_)
     { ... }
   public LLkParser(TokenStream lexer, int k_)
     { ... }
   public int LA(int i) { return input.LA(i); }
   public Token LT(int i) { return input.LT(i); }
   public void consume() { input.consume(); }
   ...
}

Lexer Implementation

Lexer Form

The lexers produced by ANTLR are a lot like the parsers produced by ANTLR. They only major differences are that (a) scanners use characters instead of tokens, and (b) ANTLR generates a special nextToken rule for each scanner which is a production containing each public lexer rule as an alternate. The name of the lexical grammar class provided by the programmer results in a subclass of CharScanner, for example

public class MyLexer extends antlr.CharScanner
  implements LTokenTypes, TokenStream
{
  public L(InputStream in) {
          this(new ByteBuffer(in));
  }
  public L(Reader in) {
          this(new CharBuffer(in));
  }
  public L(InputBuffer ib) {
          this(new LexerSharedInputState(ib));
  }
  public L(LexerSharedInputState state) {
          super(state);
          caseSensitiveLiterals = true;
          setCaseSensitive(true);
          literals = new Hashtable();
  }

  public Token nextToken() throws TokenStreamException {
     scanning logic
    ...
  }
  recursive and other non-inlined lexical methods
  ...
}
  

When an ANTLR-generated parser needs another token from its lexer, it calls a method called nextToken. The general form of the nextToken method is:

public Token nextToken()
  throws TokenStreamException {
  int tt;
  for (;;) {
     try {
        resetText();
        switch ( c ) {
        case for each char predicting lexical rule
           call lexical rule gets token type -> tt
        default :
           throw new NoViableAltForCharException(
               "bad char: '"+(char)c+"'");
        }
        if ( tt!=Token.SKIP ) {
           return makeToken(tt);
        }
     }
     catch (RecognitionException ex) {
        reportError(ex.toString());
     }
  }
}
  

For example, the lexical rules:

lexclass Lex;

WS   : ('\t' | '\r' | ' ') {_ttype=Token.SKIP;} ;
PLUS : '+';
MINUS: '-';
INT  : ( '0'..'9' )+ ;
ID   : ( 'a'..'z' )+ ;
UID  : ( 'A'..'Z' )+ ;
  would result in something like:  
public class Lex extends CharScanner
  implements TTokenTypes {
...
public Token nextToken()
    throws TokenStreamException {
    int _tt = Token.EOF_TYPE;
    for (;;) {
    try {
       resetText();
       switch ( _c ) {
       case '\t': case '\r': case ' ': 
           _tt=mWS();
           break;
       case '+': 
           _tt=mPLUS();
           break;
       case '-': 
           _tt=mMINUS();
           break;
       case '0': case '1': case '2': case '3': 
       case '4': case '5': case '6': case '7': 
       case '8': case '9': 
           _tt=mINT();
           break;
       case 'a': case 'b': case 'c': case 'd': 
       case 'e': case 'f': case 'g': case 'h': 
       case 'i': case 'j': case 'k': case 'l': 
       case 'm': case 'n': case 'o': case 'p': 
       case 'q': case 'r': case 's': case 't': 
       case 'u': case 'v': case 'w': case 'x': 
       case 'y': case 'z': 
           _tt=mID();
           break;
       case 'A': case 'B': case 'C': case 'D': 
       case 'E': case 'F': case 'G': case 'H': 
       case 'I': case 'J': case 'K': case 'L': 
       case 'M': case 'N': case 'O': case 'P': 
       case 'Q': case 'R': case 'S': case 'T': 
       case 'U': case 'V': case 'W': case 'X': 
       case 'Y': case 'Z': 
           _tt=mUID();
           break;
       case EOF_CHAR :
           _tt = Token.EOF_TYPE;
           break;
       default :
          throw new NoViableAltForCharException(
               "invalid char "+_c);
       }
       if ( _tt!=Token.SKIP ) {
           return makeToken(_tt);
       }
    }  // try
	catch (RecognitionException ex) {
	  reportError(ex.toString());
	}
	}  // for
}

public int mWS()
    throws RecognitionException,
           CharStreamException,
           TokenStreamException {
    int _ttype = WS;
    switch ( _c) {
    case '\t': 
        match('\t');
        break;
    case '\r': 
        match('\r');
        break;
    case ' ': 
        match(' ');
        break;
    default :
    {
        throw new NoViableAltForException(
               "no viable for char: "+(char)_c);
    }
    }
     _ttype = Token.SKIP;
    return _ttype;
}

public int mPLUS()
    throws RecognitionException,
           CharStreamException,
           TokenStreamException {
    int _ttype = PLUS;
    match('+');
    return _ttype;
}

public int mMINUS()
    throws RecognitionException,
           CharStreamException,
           TokenStreamException {

    int _ttype = MINUS;
    match('-');
    return _ttype;
}

public int mINT()
    throws RecognitionException,
           CharStreamException,
           TokenStreamException {

    int _ttype = INT;
    {
    int _cnt=0;
    _loop:
    do {
        if ( _c>='0' && _c<='9')
          { matchRange('0','9'); }
        else
        if ( _cnt>=1 ) break _loop;
        else {
           throw new ScannerException(
              "no viable alternative for char: "+
                (char)_c);
        }
        _cnt++;
    } while (true);
    }
    return _ttype;
}

public int mID()
    throws RecognitionException,
           CharStreamException,
           TokenStreamException {
    int _ttype = ID;
    {
    int _cnt=0;
    _loop:
    do {
        if ( _c>='a' && _c<='z')
        { matchRange('a','z'); }
        else
        if ( _cnt>=1 ) break _loop;
        else {
            throw new NoViableAltForCharException(
               "no viable alternative for char: "+
                 (char)_c);
        }
        _cnt++;
        } while (true);
    }
    return _ttype;
}

public int mUID()
    throws RecognitionException,
           CharStreamException,
           TokenStreamException {

    int _ttype = UID;
    {
    int _cnt=0;
    _loop:
    do {
        if ( _c>='A' && _c<='Z')
        { matchRange('A','Z'); }
        else
        if ( _cnt>=1 ) break _loop;
        else {
            throw new NoViableAltForCharException(
               "no viable alternative for char: "+
                 (char)_c);
        }
        _cnt++;
    } while (true);
    }
    return _ttype;
}

}
  

ANTLR-generated lexers assume that you will be reading streams of characters. If this is not the case, you must create your own lexer.

Creating Your Own Lexer

To create your own lexer, your Java class that will doing the lexing must implement interface TokenStream, which simply states that you must be able to return a stream of tokens via nextToken:

/**This interface allows any object to
 * pretend it is a stream of tokens.
 * @author Terence Parr, MageLang Institute
 */
public interface TokenStream {
  public Token nextToken();
}
  

ANTLR will not generate a lexer if you do not specify a lexical class.

Launching a parser with a non-ANTLR-generated lexer is the same as launching a parser with an ANTLR-generated lexer:

HandBuiltLexer lex = new HandBuiltLexer(...);
MyParser p = new MyParser(lex);
p.start-rule();

The parser does not care what kind of object you use for scanning as as long as it can answer nextToken.

If you build your own lexer, and the token values are also generated by that lexer, then you should inform the ANTLR-generated parsers about the token type values generated by that lexer. Use the importVocab in the parsers that use the externally-generated token set, and create a token definition file following the requirements of the importVocab option.

Lexical Rules

Lexical rules are essentially the same as parser rules except that lexical rules apply a structure to a series of characters rather than a series of tokens. As with parser rules, each lexical rule results in a method in the output lexer class.

Alternative blocks. Consider a simple series of alternatives within a block:

FORMAT : 'x' | 'f' | 'd';
  

The lexer would contain the following method:

public int mFORMAT() {
  if ( c=='x' ) {
    match('x');
  }
  else if ( c=='x' ) {
    match('x');
  }
  else if ( c=='f' ) {
    match('f');
  }
  else if ( c=='d' ) {
    match('d');
  }
  else {
    throw new NoViableAltForCharException(
        "no viable alternative: '"+(char)c+"'");
  }
  return FORMAT;
}
  

The only real differences between lexical methods and grammar methods are that lookahead prediction expressions do character comparisons rather than LA(i) comparisons, match matches characters instead of tokens, a return is added to the bottom of the rule, and lexical methods throw CharStreamException objects in addition to TokenStreamException and RecognitionException objects.

Optimization: Non-Recursive lexical rules. Rules that do not directly or indirectly call themselves can be inlined into the lexer entry method: nextToken. For example, the common identifier rule would be placed directly into the nextToken method. That is, rule:

ID  :   ( 'a'..'z' | 'A'..'Z' )+
    ;
 

would not result in a method in your lexer class. This rule would become part of the resulting lexer as it would be probably inlined by ANTLR:

public Token nextToken() {
  switch ( c ) {
  cases for operators and such here
  case '0': // chars that predict ID token
  case '1':
  case '2':
  case '3':
  case '4':
  case '5':
  case '6':
  case '7':
  case '8':
  case '9':
    while ( c>='0' && c<='9' ) {
      matchRange('0','9');
    }
    return makeToken(ID);
  default :
    check harder stuff here like rules
      beginning with a..z
}
  

If not inlined, the method for scanning identifiers would look like:

public int mID() {
  while ( c>='0' && c<='9' ) {
    matchRange('0','9');
  }
  return ID;
}
  

where token names are converted to method names by prefixing them with the letter m. The nextToken method would become:

public Token nextToken() {
  switch ( c ) {
  cases for operators and such here
  case '0': // chars that predict ID token
  case '1':
  case '2':
  case '3':
  case '4':
  case '5':
  case '6':
  case '7':
  case '8':
  case '9':
    return makeToken(mID());
  default :
    check harder stuff here like rules
      beginning with a..z
}
  

Note that this type of range loop is so common that it should probably be optimized to:

while ( c>='0' && c<='9' ) {
  consume();
}
  

Optimization: Recursive lexical rules. Lexical rules that are directly or indirectly recursive are not inlined. For example, consider the following rule that matches nested actions:

ACTION
    :   '{' ( ACTION | ~'}' )* '}'
    ;
  

ACTION would be result in (assuming a character vocabulary of 'a'..'z', '{', '}'):

public int mACTION()
    throws RecognitionException,
           CharStreamException,
           TokenStreamException {

    int _ttype = ACTION;
    match('{');
    {
    _loop:
    do {
        switch ( _c) {
        case '{':
            mACTION();
            break;
        case 'a': case 'b': case 'c': case 'd':
        case 'e': case 'f': case 'g': case 'h':
        case 'i': case 'j': case 'k': case 'l':
        case 'm': case 'n': case 'o': case 'p':
        case 'q': case 'r': case 's': case 't':
        case 'u': case 'v': case 'w': case 'x':
        case 'y': case 'z':
            matchNot('}');
            break;
        default :
            break _loop;
        }
    } while (true);
    }
    match('}');
    return _ttype;
}
       

Token Objects

The basic token knows only about a token type:

public class Token {
  // constants
  public static final int MIN_USER_TYPE = 3;
  public static final int INVALID_TYPE = 0;
  public static final int EOF_TYPE = 1;
  public static final int SKIP = -1;
  
  // each Token has at least a token type
  int type=INVALID_TYPE;
  
  // the illegal token object
  public static Token badToken =
    new Token(INVALID_TYPE, "");
  
  public Token() {;}
  public Token(int t) { type = t; }
  public Token(int t, String txt) {
    type = t; setText(txt);
  }

  public void setType(int t) { type = t; }
  public void setLine(int l) {;}
  public void setColumn(int c) {;}
  public void setText(String t) {;}
  
  public int getType() { return type; }
  public int getLine() { return 0; }
  public int getColumn() { return 0; }
  public String getText() {...}
}

The raw Token class is not very useful.  ANTLR supplies a "common" token class that it uses by default, which contains the line number and text associated with the token:

public class CommonToken extends Token {
  // most tokens will want line, text information
  int line;
  String text = null;
 
  public CommonToken() {}
  public CommonToken(String s)  { text = s; }
  public CommonToken(int t, String txt) {
    type = t;
    setText(txt);
  }

  public void setLine(int l)    { line = l; }
  public int  getLine()         { return line; }
  public void setText(String s) { text = s; }
  public String getText()       { return text; }
}

ANTLR will generate an interface that defines the types of tokens in a token vocabulary. Parser and lexers that share this token vocabulary are generated such that they implement the resulting token types interface:

public interface MyLexerTokenTypes {
  public static final int ID = 2;
  public static final int BEGIN = 3;
  ...
}

ANTLR defines a token object for use with the TokenStreamHiddenTokenFilter object called CommonHiddenStreamToken:

public class CommonHiddenStreamToken
  extends CommonToken {
  protected CommonHiddenStreamToken hiddenBefore;
  protected CommonHiddenStreamToken hiddenAfter;

  public CommonHiddenStreamToken
    getHiddenAfter() {...}
  public CommonHiddenStreamToken
    getHiddenBefore() {...}
}

Hidden tokens are weaved amongst the normal tokens.  Note that, for garbage collection reasons, hidden tokens never point back to normal tokens (preventing a linked list of the entire token stream).

Token Lookahead Buffer

The parser must always have fast access to k symbols of lookahead. In a world without syntactic predicates, a simple buffer of k Token references would suffice. However, given that even LL(1) ANTLR parsers must be able to backtrack, an arbitrarily-large buffer of Token references must be maintained. LT(i) looks into the token buffer.

Fortunately, the parser itself does not implement the token-buffering and lookahead algorithm. That is handled by the TokenBuffer object. We begin the discussion of lookahead by providing an LL(k) parser framework:

public class LLkParser extends Parser {
   TokenBuffer input;
   public int LA(int i) {
      return input.LA(i);
   }
   public Token LT(int i) {
      return input.LT(i);
   }
   public void consume() {
      input.consume();
   }
}
       

All lookahead-related calls are simply forwarded to the TokenBuffer object. In the future, some simple caching may be performed in the parser itself to avoid the extra indirection, or ANTLR may generate the call to input.LT(i) directly.

The TokenBuffer object caches the token stream emitted by the scanner. It supplies LT() and LA() methods for accessing the kth lookahead token or token type, as well as methods for consuming tokens, guessing, and backtracking.

public class TokenBuffer {
   ...
   /** Mark another token for
    *  deferred consumption */
   public final void consume() {...}

   /** Get a lookahead token */
   public final Token LT(int i) { ... }

   /** Get a lookahead token value */
   public final int LA(int i) { ... }

   /**Return an integer marker that can be used to
    * rewind the buffer to its current state. */
   public final int mark() { ... }

   /**Rewind the token buffer to a marker.*/
   public final void rewind(int mark) { ... }
}

To begin backtracking, a mark is issued, which makes the TokenBuffer record the current position so that it can rewind the token stream. A subsequent rewind directive will reset the internal state to the point before the last mark.

Consider the following rule that employs backtracking:

stat:   (list EQUAL) => list EQUAL list
    |   list
    ;
list:   LPAREN (ID)* RPAREN
    ;
 

Something like the following code would be generated:

public void stat()
  throws RecognitionException,
         TokenStreamException
{
  boolean synPredFailed;
  if ( LA(1)==LPAREN ) { // check lookahead
    int marker = tokenBuffer.mark();
    try {
      list();
      match(EQUAL);
      synPredFailed = false;
    }
    catch (RecognitionException e) {
      tokenBuffer.rewind(marker);
      synPredFailed = true;
    }
  }
  if ( LA(1)==LPAREN && !synPredFailed ) {
    // test prediction of alt 1
    list();
    match(EQUAL);
    list();
  }
  else if ( LA(1)==LPAREN ) {
    list();
  }
}
      

The token lookahead buffer uses a circular token buffer to perform quick indexed access to the lookahead tokens. The circular buffer is expanded as necessary to calculate LT(i) for arbitrary i. TokenBuffer.consume() does not actually read more tokens. Instead, it defers the read by counting how many tokens have been consumed, and then adjusts the token buffer and/or reads new tokens when LA() or LT() is called.

Version: $Id: //depot/code/org.antlr/release/antlr-2.7.5/doc/runtime.html#1 $ nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/sor.html000066400000000000000000000347431161462365500222470ustar00rootroot00000000000000 ANTLR Tree Parsers

ANTLR Tree Parsers

Or, The Entity Formerly Known As SORCERER

ANTLR 2.xx helps you build intermediate form trees (ASTs) by augmenting a grammar with tree operators, rewrite rules, and actions. ANTLR also allows you to specify the grammatical structure of ASTs, thus, supporting the manipulation or simple walking of trees to produce translations.

Formerly, a separate tool, SORCERER, was used to generate tree parsers, but ANTLR has taken over this role. ANTLR now builds recognizers for streams of characters, tokens, or tree nodes.

What's a tree parser?

Parsing is the application of grammatical structure to a stream of input symbols. ANTLR takes this further than most tools and considers a tree to be a stream of nodes, albeit in two dimensions. In fact, the only real difference in ANTLR's code generation for token stream parsing versus tree parsing lies in the testing of lookahead, rule-method definition headers, and the introduction of a two-dimensional tree structure code-generation template.

What kinds of trees can be parsed?

ANTLR tree parsers can walk any tree that implements the AST interface, which imposes a child-sibling like structure to whatever tree data-structure you may have. The important navigation methods are:

  • getFirstChild: Return a reference to the first child of the sibling list.
  • getNextSibling: Return a reference to the next child in the list of siblings.

Each AST node is considered to have a list of children, some text, and a "token type". Trees are self-similar in that a tree node is also a tree. An AST is defined completely as:


/** Minimal AST node interface used by ANTLR AST generation
 * and tree-walker.
 */
public interface AST {
    /** Add a (rightmost) child to this node */
    public void addChild(AST c);
    public boolean equals(AST t);
    public boolean equalsList(AST t);
    public boolean equalsListPartial(AST t);
    public boolean equalsTree(AST t);
    public boolean equalsTreePartial(AST t);
    public ASTEnumeration findAll(AST tree);
    public ASTEnumeration findAllPartial(AST subtree);
    /** Get the first child of this node; null if no children */
    public AST getFirstChild();
    /** Getthe next sibling in line after this one */
    public AST getNextSibling();
    /** Get the token text for this node */
    public String getText();
    /** Get the token type for this node */
    public int getType();
    /** Get number of children of this node; if leaf, returns 0 */
    public int getNumberOfChildren();
    public void initialize(int t, String txt);
    public void initialize(AST t);
    public void initialize(Token t);
    /** Set the first child of a node. */
    public void setFirstChild(AST c);
    /** Set the next sibling after this one. */
    public void setNextSibling(AST n);
    /** Set the token text for this node */
    public void setText(String text);
    /** Set the token type for this node */
    public void setType(int ttype);
    public String toString();
    public String toStringList();
    public String toStringTree();
}

Tree grammar rules

As with the SORCERER tool of PCCTS 1.33 and the ANTLR token grammars, tree grammars are collections of EBNF rules embedded with actions, semantic predicates, and syntactic predicates.


rule:	alternative1
    |	alternative2
   ...
    |	alternativen
    ;

Each alternative production is composed of a list of elements where an element can be one of the items in a regular ANTLR grammar with the addition of the tree pattern element, which has the form:


#( root-token child1 child2 ... childn )
    

			

For example, the following tree pattern matches a simple PLUS-rooted tree with two INT children:=


#( PLUS INT INT )

The root of a tree pattern must be a token reference, but the children elements can even be subrules. For example, a common structure is an if-then-else tree where the else-clause statement subtree is optional:


#( IF expr stat (stat)? )
    

An important thing to remember when specifying tree patterns and tree grammars in general is that sufficient matches are done not exact matches. As long as the tree satistfies the pattern, a match is reported, regardless of how much is left unparsed. For example, #( A B ) will report a match for any larger tree with the same structure such as #( A #(B C) D).

Syntactic predicates

ANTLR tree parsers use only a single symbol of lookahead, which is normally not a problem as intermediate forms are explicitly designed to be easy to walk. However, there is occasionally the need to distinguish between similar tree structures. Syntactic predicates can be used to overcome the limitations of limited fixed lookahead. For example, distinguishing between the unary and binary minus operator is best done by using operator nodes of differing token types, but given the same root node, a syntactic predicate can be used to distinguish between these structures:


expr:   ( #(MINUS expr expr) )=> #( MINUS expr expr )
    |   #( MINUS expr )
   ...
    ;

The order of evaluation is very important as the second alternative is a "subset" of the first alternative.

Semantic predicates

Semantic predicates at the start of an alternative are simply incorporated into the alternative prediction expressions as with a regular grammar. Semantic predicates in the middle of productions throw exceptions when they evaluate to false just like a regular grammar.

An Example Tree Walker

Consider how you would build a simple calculator. One approach would be to build a parser that computed expression values as it recognized the input. For the purposes of illustration, we will build a parser that constructs a tree intermediate representation of the input expression and a tree parser that walks the intermediate representation, computing the result.

Our recognizer, CalcParser, is defined via the following grammar.


class CalcParser extends Parser;
options {
    buildAST = true;   // uses CommonAST by default
}

expr:   mexpr (PLUS^ mexpr)* SEMI!
    ;

mexpr
    :   atom (STAR^ atom)*
    ;

atom:   INT
    ;

The PLUS and STAR tokens are considered operators and, hence, subtree roots; they are annotated with the '^' character. The SEMI token reference is suffixed with the '!' character to indicate it should not be included in the tree.

The scanner for this calculator is defined as follows:


class CalcLexer extends Lexer;

WS	:	(' '
	|	'\t'
	|	'\n'
	|	'\r')
		{ _ttype = Token.SKIP; }
	;

LPAREN:	'('
	;

RPAREN:	')'
	;

STAR:	'*'
	;

PLUS:	'+'
	;

SEMI:	';'
	;

INT	:	('0'..'9')+
	;
    

The trees generated by this recognizer are simple expression trees. For example, input "3*4+5" results in a tree of the form #( + ( * 3 4 ) 5 ). In order to build a tree walker for trees of this form, you have to describe its structure recursively to ANTLR:


class CalcTreeWalker extends TreeParser;

expr	:	#(PLUS expr expr)
	|	#(STAR expr expr)
	|	INT
	;

Once the structure has been specified, you are free to embed actions to compute the appropriate result. An easy way to accomplish this is to have the expr rule return an integer result and then have each alternative compute the subresult for each subtree. The following tree grammar and actions produces the desired effect:


class CalcTreeWalker extends TreeParser;

expr returns [int r]
{
	int a,b;
	r=0;
}
	:	#(PLUS a=expr b=expr) {r = a+b;}
	|	#(STAR a=expr b=expr) {r = a*b;}
	|	i:INT		      {r = Integer.parseInt(i.getText());}
	;
    

Notice that no precedence specification is necessary when computing the result of an expression--the structure of the tree encodes this information. That is why intermediate trees are much more than copies of the input in tree form. The input symbols are indeed stored as nodes in the tree, but the structure of the input is encoded as the relationship of those nodes.

The code needed to launch the parser and tree walker is:


import java.io.*;
import antlr.CommonAST;
import antlr.collections.AST;

class Calc {
    public static void main(String[] args) {
        try {
            CalcLexer lexer =
                new CalcLexer(new DataInputStream(System.in));
            CalcParser parser = new CalcParser(lexer);
            // Parse the input expression
            parser.expr();
            CommonAST t = (CommonAST)parser.getAST();
            // Print the resulting tree out in LISP notation
            System.out.println(t.toStringList());
            CalcTreeWalker walker = new CalcTreeWalker();
            // Traverse the tree created by the parser
            int r = walker.expr(t);
            System.out.println("value is "+r);
        } catch(Exception e) {
            System.err.println("exception: "+e);
        }
    }
}
    

Transformations

While tree parsers are useful to examine trees or generate output from a tree, they must be augmented to handle tree transformations. ANTLR tree parsers support the buildAST option just like regular parsers; this is analogous to the transform mode of SORCERER. Without programmer intervention, the tree parser will automatically copy the input tree to a result tree. Each rule has an implicit (automatically defined) result tree; the result tree of the start symbol can be obtained from the tree parser via the getAST method. The various alternatives and grammar elements may be annotated with "!" to indicate that they should not be automatically linked into the output tree. Portions of, or entire, subtrees may be rewritten.

Actions embedded within the rules can set the result tree based upon tests and tree constructions. See the section on grammar action translations.

An Example Tree Transformation

Revisiting the simple Calc example from above, we can perform a few tree transformations instead of computing the expression value. The action in the following tree grammar optimizes away the addition identity operation (addition to zero).


class CalcTreeWalker extends TreeParser;
options{
    buildAST = true;	// "transform" mode
}

expr:!  #(PLUS left:expr right:expr)
        // '!' turns off auto transform
        {
            // x+0 = x
            if ( #right.getType()==INT &&
                 Integer.parseInt(#right.getText())==0 )
            {
                #expr = #left;
            }
            // 0+x = x
            else if ( #left.getType()==INT &&
                      Integer.parseInt(#left.getText())==0 )
            {
                #expr = #right;
            }
            // x+y
            else {
                #expr = #(PLUS, left, right);
            }
        }
    |   #(STAR expr expr)  // use auto transformation
    |   i:INT
    ;
    

The code to launch the parser and tree transformer is:


import java.io.*;
import antlr.CommonAST;
import antlr.collections.AST;

class Calc {
    public static void main(String[] args) {
        try {
            CalcLexer lexer =
                new CalcLexer(new DataInputStream(System.in));
            CalcParser parser = new CalcParser(lexer);
            // Parse the input expression
            parser.expr();
            CommonAST t = (CommonAST)parser.getAST();
            // Print the resulting tree out in LISP notation
            System.out.println(t.toLispString());

            CalcTreeWalker walker = new CalcTreeWalker();
            // Traverse the tree created by the parser
            walker.expr(t);
            // Get the result tree from the walker
            t = (CommonAST)walker.getAST();
            System.out.println(t.toLispString());
        } catch(Exception e) {
            System.err.println("exception: "+e);
        }
    }
}

Examining/Debugging ASTs

Often when developing a tree parser, you will get parse errors.  Unfortunately, your trees are usually very large, making it difficult to determine where your AST structure error is.  To help the situation (I found it VERY useful when building the Java tree parser), I created an ASTFrame class (a JFrame) that you can use to view your ASTs in a Swing tree view.   It does not copy the tree, but uses a TreeModel.  Run antlr.debug.misc.ASTFrame as an application to test it out or see the new Java grammar Main.java.   I am not sure it will live in the same package as I'm not sure how debugging etc... will shake out with future ANTLR versions.  Here is a simple example usage:

public static void main(String args[]) {
  // Create the tree nodes
  ASTFactory factory = new ASTFactory();
  CommonAST r = (CommonAST)factory.create(0, "ROOT");
  r.addChild((CommonAST)factory.create(0, "C1"));
  r.addChild((CommonAST)factory.create(0, "C2"));
  r.addChild((CommonAST)factory.create(0, "C3"));

  ASTFrame frame = new ASTFrame("AST JTree Example", r);
  frame.setVisible(true);
}
Version: $Id: //depot/code/org.antlr/release/antlr-2.7.5/doc/sor.html#1 $
nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/stream.perspectives.gif000066400000000000000000000051671161462365500252510ustar00rootroot00000000000000GIF89a2,2@H*\ȰÇ#JHŋ3jȱcBCr)ɓ(S$%A c&p)͚3_s"M@ 'ѠF*]Tc҈?o✪3'էPj5kWXKٳhӪ]˶۷p㒝KnѰNw޾ LL#w1K!;.(ykT^Ƽ91΁}J'̡Vq딯 Vـq͗wHv/> xU h54M})Cd*Mq&0Ifr~yhFEB.̣E)AzF꒤6;d cKY(%VPc@E[*+v iTէCTV;=C|+MWs kɊHrsZS^W$RU-O~mTSRʌw ldKѣNv&#L٣DK]zW4vBm1UZ!l8l[NB)3얷q=*.E3ss;N:uvj/ϴ{NZUy0g/G"zm9N [Fup"K*W9.sf_J2ӢX11\/][aЎ5&G'YU$ka#Rt-ҙLqc"F1AaeUru1.kLbQr0Bf%Dc.9!o7L\N2WBL6r_9zɛ` /Ňm 4=%BE`>q[i:Gɥ3tP_ ;nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/stream.selector.gif000066400000000000000000000135301161462365500243460ustar00rootroot00000000000000GIF89a,@H*\ȰÇ#JHŋ3jȱǏ CI2"Qx˗0cʜI͛.9S'O  mѣH%TʦP `*Oz+YY^`؏\~ʶ[K;}{p*-hW+߻׬Uk~2T ӎM 8Avd44ڕuF}xYC,g˯3#,aj]mNdC5GS^I7m񖥈y5֡iFgd׆i՘ltyamXniny'} ~qٛCe\t\RYwc6zj]ʁflq*~ h,*+rg뭸}韵jTQ'&lF+Rkf[v+R覫ƫnk/ޫDץeΕdh9*Xzd(kZQoz"}EJꥁXfVF˸asZ4'e^q곡 RDž1 ̓ތ"穖|>MX(;ӿḨFL={Dd=vXrShq{uNۃm2vOi,۷R/՛*[R7U}]`"9謷.:[e6Q,Uc{} /ichiQM/?[݊7+& [aXF_̓hb\֙!xqw3[(Oaۆ=  ɓ Բ 2J3<48QoSzcIza:_Q,` L@ WnzJN}i6 dEJ|"-UfuEtڢEFIYBż'=}+A?*`D7̠fm)N!sDA1[_ȿNZJݟצÌEՖwRluq kI%%",HC Վ:E*Oɹ߃9VS=Wֺ-acKtAl<ު [D*̏ ;iDMVPUL nTU^ sς:2PT"[qTjɻNљbjwu[>4fG6diT~UZތz5,ï#BRl`8PEC 3+Qͦrm rd3v'ܭyTn-ۂB0B9ê)2yܲXY.dRsW͜tdi[{;9j&F ho쭏ʳ:RELt>E~W$ T =(QMkRMazٗ&9խYȆQDc'U7 d$v]]lKRk:WYNpFԑQ ,kLK n̆]0qs۷o~\JߵaxOl Ɉg"T/#sYjԔ-5E{7۰_W?gdoRk°6?1rަ{8LPKޫ6cM(Gy-Av1Ǯ,{mW>wɈsf۴Nٜ*_'q `R[Vuzv} =>OWv%dd d %See5.꩕mT(M$*lۓ< $ְKW;wz;l8mٱ<-";E-u {ģ1&Gz z2.c3ɱo)D0늠*0+25bgKU}zW8w٧׭CyUD0i="(Ҷ9٦RH0Xʫwt+ʣL z0cbJK$jxxaJv& GRHteVJ^GcUIyDlFUi+zwnhS&IS PhߪI8S1r32>!HG'7tF8?iȼA;5;6i%s ȫVS}YAF V(1eqWkVڴK}); ";Õ5R2g-UxkMZ*狟ڢ&"+׼4,8"ewǶL|ÚwY{oڷ2ihfpnLvZ·;?ŎgR oXm7 '&i/e fXtvT`z ثjȈ|ȃ .xȐuëɑ;S¨ǧˊ,\hO>,,˴w|ƅ˺̵< <̃K ṳ̈̀ |\i||Fͱ\̗lҼ,]KFM:e@z\JnkkuIVm)l#4y (2fhD>U"pO}Cx#uq=q=QɃ-%N=:sH4 }Eǚ4c)DMxsw݅ټ֝MXSKH5u[܎p;ueۮͺԱݠghWٸ%T]BѹT;d?\ڮ׳-k߷-"S}m)!۵M^ٞ}ai܆4>Zu;X'hP%:Qخ\;b|}dTa&E #@4^;H8JZh[*6F]6cs3<*HS]z?hdngeB#VB|ր.=#TNƐ@I2 \KJwnIJӍ.bq՜'~`Mj5,a.^ܓ'3_mc D*c)g2ܮnLlL4xhjh,w;fȌũyG>EqJA (^rr^7;~Ji>;̃{;+_ _96&92=3^6} :o4/rH?66;JWX;nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/stream.splitter.gif000066400000000000000000000126271161462365500244020ustar00rootroot00000000000000GIF89a","@H*\ȰÇ#JHŋ3jȱǏ +"I FT, iL8sɳϟ@/,˓ XhiR:]<4իX>DPUW]U&Jb,۠)Ҵ8MrU׻x/PڄQSQ_=ǐVu2ELϾ%R`9 L4OQ[sbcFyTGsfm۳ Aѫ+/wUC.دY7k_yDE{ [O~}kv'Z&~ wUgEZg܅AtrQM!YyBy4b~m6bk*^H'BH\R+HU^)~"tX^DcKM`[ S?f|M6$Ii.h;ߒf]yfiboG|nNY''YgQ%xaB[7اM{fjcgmVeo 7&6b\Hy&ih5fة6.J(kaء‰뇛:D=n,U)u۲Fib m=v+,b{퓰Ƣg~뮹nRڧ)jN ~l[ El_WWSYh|Y(gfWrb5^q $~M xެ32o;h+LV"-]+ua4Js95!-0Rkundjvh(={tWc\_ˤs;mU6M܍ mx̋7yJ&i8Eʺ_-k 7x7g% i"UjG:zݻHj'O"ccώ|<]?Y8{Ŵ_jڟ{܃/ll/is+vg ?(BuXQ} =~\vA刯V O} !qA PB)^#,J J̴5%kz\5Q(L9$uT<QF>i"=m߃i}40!G5.Q>B ] E7ȽvkO&Ғ܍t<idbJ/ND8o,8r@(~1^ &%ms}ӐԪDr35$UK\WB1o>Zyֶp$2%׭Mk\&c#^@`2Y<=1YH3ӥ]c% xTe96w.,>=/@M[uE,@XHb1ls;Q6*oĄba&ֲExxEq:I^W %4 BtDRi> UsRX$-8Cm#H7NT$hnyyitI9QgF0SX1-x~qxZ҇XQք`8&\HS8aCX5h]HJ𦄗h^8QH!8vHx'x%VCHA"/3yŋvWQe3Xp@YU#Fv%\(%fp27Čɘv֌gHP)aW3gTyNY'dvY>bY}'{2z`IxbR.~~,iS-\{6r+!/vƨm~ >tv+uWd-yt5vfw8hY+j؆%&M2"P姗wn7n׉Bdٝ ]}EYg~57Wn৒d(ٞqri"w}YahVZbshiƜ"d{Ƞt(/&|[N/}ءLz~ wD$D#>g6tYB' #kg/HqS.UKImF#M)ёq/$mGWgD;Xfjymil!u_I_ؘJ:x=woqyzO*uIHowRɟ} Dp<7Dy~0dfwo_8sg}bLmfsSr #wwzh{B)rdo3Vi: CNzsErueXIfũ؅tpIwb9YsTzihZu"*@pesjrZZB!Evwh4Xojhت|E7\E20m\ljFDdXQv tjTl;TbRHW' RqAo9}ZJG" zmRwhI\4Ǻy:Ihi]98[=?뵜(>3j mGa!9:wbi]B`k[J)JtQko' buqiaTc ƪgfF 4Vqj1 j{z몱2{r- $(xJʜx>G_KdKkE*BZ"۷|۹(<]~<Ըt{"뙮yk[˖Gpޫvz=B)OY sڛ=V@˾ (jO{~$Wm8>^[kϘ-Q[,Wǃl?.@3ǻA- ؒCm_ Ceb`/'0I.R-c`U@) *3cK7{&.+UD+\ƽ1tfqt#Rz<rV= 4|U.:+fSlS[ǸzA)oiM{CSye,eʰ;x!*"S0jl˷˴<(;IḵvG̩mÂ̧b5% 7ͅ< zL.AD1L<ȥK -Kٻ΢ou H샾lϯ"ǀufVL:P, Uӫ:# -{[GذX!}Un;YضU"?(g)H$$* 2 Hj9lg7M-]-|3Q9|1q33,{R =UVfNaՔXOm^ͣxbDgֻ6w H;3rVx3=V;- ^>hZvM)X(Uheϡz< +'Ӏ]٣`ɠ,^!X}k]כBwm)<~9ɀrMD;Lܫ_g:z{UMhLr|9-ꇾ3\t˗ œ؞QZHӑ}Tzߟ2 "Mn񻢽ͭ]ڐ|=- $ nad(.;nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/streams.html000066400000000000000000000745661161462365500231310ustar00rootroot00000000000000 ANTLR Specification: Token Streams

Token Streams

Traditionally, a lexer and parser are tightly coupled objects; that is, one does not imagine anything sitting between the parser and the lexer, modifying the stream of tokens.   However, language recognition and translation can benefit greatly from treating the connection between lexer and parser as a token stream.  This idea is analogous to Java I/O streams, where you can pipeline lots of stream objects to produce highly-processed data streams.

Introduction

ANTLR identifies a stream of Token objects as any object that satisfies the TokenStream interface (prior to 2.6, this interface was called Tokenizer); i.e., any object that implements the following method.

Token nextToken();

Graphically, a normal stream of tokens from a lexer (producer) to a parser (consumer) might look like the following at some point during the parse.

lexer.to.parser.tokens.gif (3585 bytes)

The most common token stream is a lexer, but once you imagine a physical stream between the lexer and parser, you start imagining interesting things that you can do.  For example, you can:

  • filter a stream of tokens to strip out unwanted tokens
  • insert imaginary tokens to help the parser recognize certain nasty structures
  • split a single stream into multiple streams, sending certain tokens of interest down the various streams
  • multiplex multiple token streams onto one stream, thus, "simulating" the lexer states of tools like PCCTS, lex, and so on.

The beauty of the token stream concept is that parsers and lexers are not affected--they are merely consumers and producers of streams.  Stream objects are filters that produce, process, combine, or separate token streams for use by consumers.   Existing lexers and parsers may be combined in new and interesting ways without modification.

This document formalizes the notion of a token stream and describes in detail some very useful stream filters.

Pass-Through Token Stream

A token stream is any object satisfying the following interface.

public interface TokenStream {
  public Token nextToken()
    throws java.io.IOException;
}

For example, a "no-op" or pass-through filter stream looks like:

import antlr.*;
import java.io.IOException;

class TokenStreamPassThrough
    implements TokenStream {
  protected TokenStream input;

  /** Stream to read tokens from */
  public TokenStreamPassThrough(TokenStream in) {
    input = in;
  }

  /** This makes us a stream */
  public Token nextToken() throws IOException {
    return input.nextToken(); // "short circuit"
  }
}

You would use this simple stream by having it pull tokens from the lexer and then have the parser pull tokens from it as in the following main() program.

public static void main(String[] args) {
  MyLexer lexer =
    new MyLexer(new DataInputStream(System.in));
  TokenStreamPassThrough filter =
    new TokenStreamPassThrough(lexer);
  MyParser parser = new MyParser(filter);
  parser.startRule();
}

Token Stream Filtering

Most of the time, you want the lexer to discard whitespace and comments, however, what if you also want to reuse the lexer in situations where the parser must see the comments?  You can design a single lexer to cover many situations by having the lexer emit comments and whitespace along with the normal tokens.  Then, when you want to discard whitespace, put a filter between the lexer and the parser to kill whitespace tokens.

ANTLR provides TokenStreamBasicFilter for such situations.  You can instruct it to discard any token type or types without having to modify the lexer.  Here is an example usage of TokenStreamBasicFilter that filters out comments and whitespace.

public static void main(String[] args) {
  MyLexer lexer =
    new MyLexer(new DataInputStream(System.in));
  TokenStreamPassThrough filter =
    new TokenStreamPassThrough(lexer);
  filter.discard(MyParser.WS);
  filter.discard(MyParser.COMMENT);
  MyParser parser = new MyParser(filter);
  parser.startRule();
}

Note that it is more efficient to have the lexer immediately discard lexical structures you do not want because you do not have to construct a Token object.  On the other hand, filtering the stream leads to more flexible lexers.

Token Stream Splitting

Sometimes you want a translator to ignore but not discard portions of the input during the recognition phase.   For example, you want to ignore comments vis-a-vis parsing, but you need the comments for translation.   The solution is to send the comments to the parser on a hidden token stream--one that the parser is not "listening" to.  During recognition, actions can then examine the hidden stream or streams, collecting the comments and so on.  Stream-splitting filters are like prisms that split white light into rainbows.

The following diagram illustrates a situation in which a single stream of tokens is split into three.

stream.splitter.gif (5527 bytes)

You would have the parser pull tokens from the topmost stream.

There are many possible capabilities and implementations of a stream splitter.   For example, you could have a "Y-splitter" that actually duplicated a stream of tokens like a cable-TV Y-connector.  If the filter were thread-safe and buffered, you could have multiple parsers pulling tokens from the filter at the same time.

This section describes a stream filter supplied with ANTLR called TokenStreamHiddenTokenFilter that behaves like a coin sorter, sending pennies to one bin, dimes to another, etc...  This filter splits the input stream into two streams, a main stream with the majority of the tokens and a hidden stream that is buffered so that you can ask it questions later about its contents.   Because of the implementation, however, you cannot attach a parser to the hidden stream.  The filter actually weaves the hidden tokens among the main tokens as you will see below.

Example

Consider the following simple grammar that reads in integer variable declarations.

decls: (decl)+
     ;
decl : begin:INT ID end:SEMI
     ; 

Now assume input:

int n; // list length
/** doc */
int f;

Imagine that whitespace is ignored by the lexer and that you have instructed the filter to split comments onto the hidden stream.  Now if the parser is pulling tokens from the main stream, it will see only "INT ID SEMI FLOAT ID SEMI" even though the comments are hanging around on the hidden stream.  So the parser effectively ignores the comments, but your actions can query the filter for tokens on the hidden stream.

The first time through rule decl, the begin token reference has no hidden tokens before or after, but

filter.getHiddenAfter(end)

returns a reference to token

// list length

which in turn provides access to

/** doc */

The second time through decl

filter.getHiddenBefore(begin)

refers to the

/** doc */

comment.

Filter Implementation

The following diagram illustrates how the Token objects are physically weaved together to simulate two different streams.

hidden.stream.gif (3667 bytes)

 

As the tokens are consumed, the TokenStreamHiddenTokenFilter object hooks the hidden tokens to the main tokens via linked list.  There is only one physical TokenStream of tokens emanating from this filter and the interweaved pointers maintain sequence information.

Because of the extra pointers required to link the tokens together, you must use a special token object called CommonHiddenStreamToken (the normal object is called CommonToken).   Recall that you can instruct a lexer to build tokens of a particular class with

lexer.setTokenObjectClass("classname");

Technically, this exact filter functionality could be implemented without requiring a special token object, but this filter implementation is extremely efficient and it is easy to tell the lexer what kind of tokens to create.  Further, this implementation makes it very easy to automatically have tree nodes built that preserve the hidden stream information.

This filter affects the lazy-consume of ANTLR.  After recognizing every main stream token, the TokenStreamHiddenTokenFilter must grab the next Token to see if it is a hidden token. Consequently, the use of this filter is not be very workable for interactive (e.g., command-line) applications.

How To Use This Filter

To use TokenStreamHiddenTokenFilter, all you have to do is:

  • Create the lexer and tell it to create token objects augmented with links to hidden tokens.
MyLexer lexer = new MyLexer(some-input-stream);
lexer.setTokenObjectClass(
  "antlr.CommonHiddenStreamToken"
);
  • Create a TokenStreamHiddenTokenFilter object that pulls tokens from the lexer.
TokenStreamHiddenTokenFilter filter =
  new TokenStreamHiddenTokenFilter(lexer);
  • Tell the TokenStreamHiddenTokenFilter which tokens to hide, and which to discard.  For example,
filter.discard(MyParser.WS);
filter.hide(MyParser.SL_COMMENT);
  • Create a parser that pulls tokens from the TokenStreamHiddenTokenFilter rather than the lexer.
MyParser parser = new MyParser(filter);
try {
  parser.startRule(); // parse as usual
}
catch (Exception e) {
  System.err.println(e.getMessage());
}

See the ANTLR fieldguide entry on preserving whitespace for a complete example.

Tree Construction

Ultimately, hidden stream tokens are needed during the translation phase, which normally means while tree walking.  How do we pass the hidden stream info to the translator without mucking up the tree grammar?  Easy: use AST nodes that save the hidden stream tokens.  ANTLR defines CommonASTWithHiddenTokens for you that hooks the hidden stream tokens onto the tree nodes automatically; methods are available to access the hidden tokens associated with a tree node.  All you have to do is tell the parser to create nodes of this node type rather than the default CommonAST.

parser.setASTNodeClass("antlr.CommonASTWithHiddenTokens");

Tree nodes are created as functions of Token objects.  The initialize() method of the tree node is called with a Token object when the ASTFactory creates the tree node.  Tree nodes created from tokens with hidden tokens before or after will have the same hidden tokens.  You do not have to use this node definition, but it works for many translation tasks:

package antlr;

/** A CommonAST whose initialization copies
 *  hidden token information from the Token
 *  used to create a node.
 */
public class CommonASTWithHiddenTokens
  extends CommonAST {
  // references to hidden tokens
  protected Token hiddenBefore, hiddenAfter;

  public CommonHiddenStreamToken getHiddenAfter() {
    return hiddenAfter;
  }
  public CommonHiddenStreamToken getHiddenBefore() {
    return hiddenBefore;
  }
  public void initialize(Token tok) {
    CommonHiddenStreamToken t =
      (CommonHiddenStreamToken)tok;
    super.initialize(t);
    hiddenBefore = t.getHiddenBefore();
    hiddenAfter  = t.getHiddenAfter();
  }
}

Notice that this node definition assumes that you are using CommonHiddenStreamToken objects.  A runtime class cast except occurs if you do not have the lexer create CommonHiddenStreamToken objects.

Garbage Collection Issues

By partitioning up the input stream and preventing hidden stream tokens from referring to main stream tokens, GC is allowed to work on the Token stream. In the integer declaration example above, when there are no more references to the first SEMI token and the second INT token, the comment tokens are candidates for garbage collection.  If all tokens were linked together, a single reference to any token would prevent GC of any tokens.  This is not the case in ANTLR's implementation.

Notes

This filter works great for preserving whitespace and comments during translation, but is not always the best solution for handling comments in situations where the output is very dissimilar to the input.  For example, there may be 3 comments interspersed within an input statement that you want to combine at the head of the output statement during translation.  Rather than having to ask each parsed token for the comments surrounding it, it would be better to have a real, physically-separate stream that buffered the comments and a means of associating groups of parsed tokens with groups of comment stream tokens.  You probably want to support questions like "give me all of the tokens on the comment stream that originally appeared between this beginning parsed token and this ending parsed token."

This filter implements the exact same functionality as JavaCC's special tokens.  Sriram Sankar (father of JavaCC) had a great idea with the special tokens and, at the 1997 Dr. T's Traveling Parsing Revival and Beer Tasting Festival, the revival attendees extended the idea to the more general token stream concept.  Now, the JavaCC special token functionality is just another ANTLR stream filter with the bonus that you do not have to modify the lexer to specify which tokens are special.

Token Stream Multiplexing (aka "Lexer states")

Now, consider the opposite problem where you want to combine multiple streams rather than splitting a single stream.  When your input contains sections or slices that are radically diverse such as Java and JavaDoc comments, you will find that it is hard to make a single lexer recognize all slices of the input.  This is primarily because merging the token definitions of the various slices results in an ambiguous lexical language or allows invalid tokens.  For example, "final" may be a keyword in one section, but an identifier in another.  Also, "@author" is a valid javadoc tag within a comment, but is invalid in the surrounding Java code.

Most people solve this problem by having the lexer sit in one of multiple states (for example, "reading Java stuff" vs "reading JavaDoc stuff").  The lexer starts out in Java mode and then, upon "/**", switches to JavaDoc mode; "*/" forces the lexer to switch back to Java mode.

Multiple Lexers

Having a single lexer with multiple states works, but having multiple lexers that are multiplexed onto the same token stream solves the same problem better because the separate lexers are easier to reuse (no cutting and pasting into a new lexer--just tell the stream multiplexor to switch to it).  For example, the JavaDoc lexer could be reused for any language problem that had JavaDoc comments.

ANTLR provides a predefined token stream called TokenStreamSelector that lets you switch between multiple lexers.  Actions in the various lexers control how the selector switches input streams.  Consider the following Java fragment.

/** Test.
 *  @author Terence
 */
int n;

Given two lexers, JavaLexer and JavaDocLexer, the sequence of actions by the two lexers might look like this:

JavaLexer: match JAVADOC_OPEN, switch to JavaDocLexer
JavaDocLexer: match AUTHOR
JavaDocLexer: match ID
JavaDocLexer: match JAVADOC_CLOSE, switch back to JavaLexer
JavaLexer: match INT
JavaLexer: match ID
JavaLexer: match SEMI

In the Java lexer grammar, you will need a rule to perform the switch to the JavaDoc lexer (recording on the stack of streams the "return lexer"):

JAVADOC_OPEN
    :    "/**" {selector.push("doclexer");}
    ;

Similarly, you will need a rule in the JavaDoc lexer to switch back:

JAVADOC_CLOSE
    :    "*/" {selector.pop();}
    ;

The selector has a stack of streams so the JavaDoc lexer does not need to know who invoked it.

Graphically, the selector combines the two lexer streams into a single stream presented to the parser.

stream.selector.gif (5976 bytes)

The selector can maintain of list of streams for you so that you can switch to another input stream by name or you can tell it to switch to an actual stream object.

public class TokenStreamSelector implements TokenStream {
  public TokenStreamSelector() {...}
  public void addInputStream(TokenStream stream,
    String key) {...}
  public void pop() {...}
  public void push(TokenStream stream) {...}
  public void push(String sname) {...}
  /** Set the stream without pushing old stream */
  public void select(TokenStream stream) {...}
  public void select(String sname)
    throws IllegalArgumentException {...}
}

Using the selector is easy:

  • Create a selector.
TokenStreamSelector selector =
  new TokenStreamSelector();
  • Name the streams (don't have to name--you can use stream object references instead to avoid the hashtable lookup on each switch).
selector.addInputStream(mainLexer, "main");
selector.addInputStream(doclexer, "doclexer");
  • Select which lexer reads from the char stream first.
// start with main java lexer
selector.select("main");
  • Attach your parser to the selector instead of one of the lexers.
JavaParser parser = new JavaParser(selector);

Lexers Sharing Same Character Stream

Before moving on to how the parser uses the selector, note that the two lexers have to read characters from the same input stream.  Prior to ANTLR 2.6.0, each lexer had its own line number variable, input char stream variable and so on.  In order to share the same input state, ANTLR 2.6.0 factors the portion of a lexer dealing with the character input into an object, LexerSharedInputState, that can be shared among n lexers (single-threaded).  To get multiple lexers to share state, you create the first lexer, ask for its input state object, and then use that when constructing any further lexers that need to share that input state:

// create Java lexer
JavaLexer mainLexer = new JavaLexer(input);
// create javadoc lexer; attach to shared
// input state of java lexer
JavaDocLexer doclexer =
  new JavaDocLexer(mainLexer.getInputState());

Parsing Multiplexed Token Streams

Just as a single lexer may have trouble producing a single stream of tokens from diverse input slices or sections, a single parser may have trouble handling the multiplexed token stream.  Again, a token that is a keyword in one lexer's vocabulary may be an identifier in another lexer's vocabulary.  Factoring the parser into separate subparsers for each input section makes sense to handle the separate vocabularies as well as for promoting grammar reuse.

The following parser grammar uses the main lexer token vocabulary (specified with the importVocab option) and upon JAVADOC_OPEN it creates and invokes a JavaDoc parser to handle the subsequent stream of tokens from within the comment.

class JavaParser extends Parser;
options {
    importVocab=Java;
}

input
    :   ( (javadoc)? INT ID SEMI )+
    ;

javadoc
    :   JAVADOC_OPEN
        {
        // create a parser to handle the javadoc comment
        JavaDocParser jdocparser =
          new JavaDocParser(getInputState());
        jdocparser.content(); // go parse the comment
        }
        JAVADOC_CLOSE
    ;

You will note that ANTLR parsers from 2.6.0 also share token input stream state.   When creating the "subparser", JavaParser tells it to pull tokens from the same input state object.

The JavaDoc parser matches a bunch of tags:

class JavaDocParser extends Parser;
options {
    importVocab=JavaDoc;
}

content
    :   (   PARAM // includes ID as part of PARAM
        |   EXCEPTION
        |   AUTHOR
        )*
    ;

When the subparser rule content finishes, control is naturally returned to the invoking method, javadoc, in the Java parser.

The Effect of Lookahead Upon Multiplexed Token Streams

What would happen if the parser needed to look two tokens ahead at the start of the JavaDoc comment?  In other words, from the perspective of the main parser, what is the token following JAVADOC_OPEN?   Token JAVADOC_CLOSE, naturally!  The main parser treats any JavaDoc comment, no matter how complicated, as a single entity; it does not see into the token stream of the comment nor should it--the subparser handles that stream.

What is the token following the content rule in the subparser?  "End of file".  The analysis of the subparser cannot determine what random method will call it from your code.  This is not an issue because there is normally a single token that signifies the termination of the subparser.  Even if EOF gets pulled into the analysis somehow, EOF will not be present on the token stream.

Multiple Lexers Versus Calling Another Lexer Rule

Multiple lexer states are also often used to handle very complicated single   tokens such as strings with embedded escape characters where input "\t" should not be allowed outside of a string.  Typically, upon the initial quote, the lexer switches to a "string state" and then switches back to the "normal state" after having matched the guts of the string.

So-called "modal" programming, where your code does something different depending on a mode, is often a bad practice.  In the situation of complex tokens, it is better to explicity specify the complicated token with more rules.  Here is the golden rule of when to and when not to use multiplexed token streams:

Complicated single tokens should be matched by calling another (protected) lexer rule whereas streams of tokens from diverse slices or sections should be handled by different lexers multiplexed onto the same stream that feeds the parser.

For example, the definition of a string in a lexer should simply call another rule to handle the nastiness of escape characters:

STRING_LITERAL
    :    '"' (ESC|~('"'|'\\'))* '"'
    ;

protected // not a token; only invoked by another rule.
ESC
    :    '\\'
        (    'n'
        |    'r'
        |    't'
        |    'b'
        |    'f'
        |    '"'
        |    '\''
        |    '\\'
        |    ('u')+
             HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT 
        ...
       )
    ;

TokenStreamRewriteEngine Easy Syntax-Directed Translation

There are many common situations where you want to tweak or augment a program or data file. ANTLR 2.7.3 introduced a (Java/C# versions only) a very simple but powerful TokenStream targeted at the class of problems where:
  1. the output language and the input language are similar
  2. the relative order of language elements does not change
See the
Syntax Directed TokenStream Rewriting article on the antlr website.

The Future

The ANTLR 2.6 release provides the basic structure for using token streams--future versions will be more sophisticated once we have experience using them.

The current "hidden token" stream filter clearly solves the "ignore but preserve whitespace" problem really well, but it does not handle comments too well in most situations.  For example, in real translation problems you want to collect comments at various single tree nodes (like DECL or METHOD) for interpretation rather than leaving them strewn throughout the tree.  You really need a stream splitter that buffers up the comments on a separate stream so you can say "give me all comments   consumed during the recognition of this rule" or "give me all comments found between these two real tokens." That is almost certainly something you need for translation of comments.

Token streams will lead to fascinating possibilities.  Most folks are not used to thinking about token streams so it is hard to imagine what else they could be good for.  Let your mind go wild.  What about embedded languages where you see slices (aspects) of the input such as Java and SQL (each portion of the input could be sliced off and put through on a different stream).  What about parsing Java .class files with and without debugging information?  If you have a parser for .class files without debug info and you want to handle .class files with debug info, leave the parser alone and augment the lexer to see the new debug structures.  Have a filter split the debug tokens of onto a different stream and the same parser will work for both types of .class files.

Later, I would like to add "perspectives", which are really just another way to look at filters.  Imagine a raw stream of tokens emanating from a lexer--the root perspective.  I can build up a tree of perspectives very easily from there.  For example, given a Java program with embedded SQL, you might want multiple perspectives on the input stream for parsing or translation reasons:

stream.perspectives.gif (2679 bytes)

You could attach a parser to the SQL stream or the Java stream minus comments, with actions querying the comment stream.

In the future, I would also like to add the ability of a parser to generate a stream of tokens (or text) as output just like it can build trees now.  In this manner, multipass parsing becomes a very natural and simple problem because parsers become stream producers also.  The output of one parser can be the input to another.

Version: $Id: //depot/code/org.antlr/release/antlr-2.7.5/doc/streams.html#1 $ nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/subrule.gif000066400000000000000000000035371161462365500227230ustar00rootroot00000000000000GIF89a,@H*\ȰÇ#Jxň,X`Ǐ ;:D@@K0(\r&H6!I0cw(ϗ?*]ʴK+1zUEʕb֮4?~؂gYr`ѝg]V-Zv}Εj7PH |TϨ:Un㭏e<92Y +c9/^.5w]i \6צ7-(m\~`f:mbcfEi4;*yskX#p7XUqg*zջ"۾*[a;Ny~}Ryp߁Gx䑷X{BX`Qxa_l gnSM^ ^%4[+bRlhMm`j^{'l38huR0Yz/ ޻u'n1ϹE7QS4ߔC>ƒy3$W~1zh=ayBc93k[ p7KzG*ӝ{ݖ#A}"x]=brFi?;r[8gOp'>?߰ P\<Ϣ,D@-j 78AVl221 Q{:/:f&3,_y ANTLR Tree Construction

ANTLR Tree Construction

ANTLR helps you build intermediate form trees, or abstract syntax trees (ASTs), by providing grammar annotations that indicate what tokens are to be treated as subtree roots, which are to be leaves, and which are to be ignored with respect to tree construction.  As with PCCTS 1.33, you may manipulate trees using tree grammar actions.

It is often the case that programmers either have existing tree definitions or need a special physical structure, thus, preventing ANTLR from specifically defining the implementation of AST nodes. ANTLR specifies only an interface describing minimum behavior. Your tree implementation must implement this interface so ANTLR knows how to work with your trees. Further, you must tell the parser the name of your tree nodes or provide a tree "factory" so that ANTLR knows how to create nodes with the correct type (rather than hardcoding in a new AST() expression everywhere).   ANTLR can construct and walk any tree that satisfies the AST interface.  A number of common tree definitions are provided. Unfortunately, ANTLR cannot parse XML DOM trees since our method names conflict (e.g., getFirstChild()); ANTLR was here first <wink>. Argh!

Notation

In this and other documents, tree structures are represented by a LISP-like notation, for example:

#(A B C)

is a tree with A at the root, and children B and C. This notation can be nested to describe trees of arbitrary structure, for example:

#(A B #(C D E))

is a tree with A at the root, B as a first child, and an entire subtree as the second child. The subtree, in turn, has C at the root and D,E as children.

Controlling AST construction

AST construction in an ANTLR Parser, or AST transformation in a Tree-Parser, is turned on and off by the buildAST option.

From an AST construction and walking point of view, ANTLR considers all tree nodes to look the same (i.e., they appear to be homogeneous).  Through a tree factory or by specification, however, you can instruct ANTLR to create nodes of different types.   See the section below on heterogeneous trees.

Grammar annotations for building ASTs

Leaf nodes

ANTLR assumes that any nonsuffixed token reference or token-range is a leaf node in the resulting tree for the enclosing rule. If no suffixes at all are specified in a grammar, then a Parser will construct a linked-list of the tokens (a degenerate AST), and a Tree-Parser will copy the input AST.

Root nodes

Any token suffixed with the "^" operator is considered a root token. A tree node is constructed for that token and is made the root of whatever portion of the tree has been built

a : A B^ C^ ;

results in tree #(C #(B A)).

First A is matched and made a lonely child, followed by B which is made the parent of the current tree, A. Finally, C is matched and made the parent of the current tree, making it the parent of the B node. Note that the same rule without any operators results in the flat tree A B C.

Turning off standard tree construction

Suffix a token reference with "!" to prevent incorporation of the node for that token into the resulting tree (the AST node for the token is still constructed and may be referenced in actions, it is just not added to the result tree automatically). Suffix a rule reference "!" to indicate that the tree constructed by the invoked rule should not be linked into the tree constructed for the current rule.

Suffix a rule definition with "!" to indicate that tree construction for the rule is to be turned off. Rules and tokens referenced within that rule still create ASTs, but they are not linked into a result tree. The following rule does no automatic tree construction. Actions must be used to set the return AST value, for example:

begin!
    :   INT PLUS i:INT
        { #begin = #(PLUS INT i); }
    ;

For finer granularity, prefix alternatives with "!" to shut off tree construction for that alternative only. This granularity is useful, for example, if you have a large number of alternatives and you only want one to have manual tree construction:

stat:
        ID EQUALS^ expr   // auto construction
    ... some alternatives ...
    |!  RETURN expr
        {#stat = #([IMAGINARY_TOKEN_TYPE] expr);}
    ... more alternatives ...
    ; 

Tree node construction

With automatic tree construction off (but with buildAST on), you must construct your own tree nodes and combine them into tree structures within embedded actions. There are several ways to create a tree node in an action:

  1. use new T(arg) where T is your tree node type and arg is either a single token type, a token type and token text, or a Token.
  2. use ASTFactory.create(arg) where T is your tree node type and arg is either a single token type, a token type and token text, or a Token. Using the factory is more general than creating a new node directly, as it defers the node-type decision to the factory, and can be easily changed for the entire grammar.
  3. use the shorthand notation #[TYPE] or #[TYPE,"text"] or #[TYPE,"text",ASTClassNameToConstruct]. The shorthand notation results in a call to ASTFactory.create() with any specified arguments.
  4. use the shorthand notation #id, where id is either a token matched in the rule, a label, or a rule-reference.

To construct a tree structure from a set of nodes, you can set the first-child and next-sibling references yourself or call the factory make method or use #(...) notation described below.

AST Action Translation

In parsers and tree parsers with buildAST set to true, ANTLR will translate portions of user actions in order to make it easier to build ASTs within actions. In particular, the following constructs starting with '#' will be translated:

#label
The AST associated with a labeled token-reference or rule-reference may be accessed as #label. The translation is to a variable containing the AST node built from that token, or the AST returned from the rule.
#rule
When rule is the name of the enclosing rule, ANTLR will translate this into the variable containing the result AST for the rule. This allows you to set the return AST for a rule or examine it from within an action. This can be used when AST generation is on or suppressed for the rule or alternate. For example:
r! : a:A	{ #r = #a; }
Setting the return tree is very useful in combination with normal tree construction because you can have ANTLR do all the work of building a tree and then add an imaginary root node such as:
 
decl : ( TYPE ID )+
       { #decl = #([DECL,"decl"], #decl); }
     ;
ANTLR allows you to assign to #rule anywhere within an alternative of the rule. ANTLR ensures that references of and assignments to #rule within an action force the parser's internal AST construction variables into a stable state. After you assign to #rule, the state of the parser's automatic AST construction variables will be set as if ANTLR had generated the tree rooted at #rule. For example, any children nodes added after the action will be added to the children of #rule.
#label_in
In a tree parser, the input AST associated with a labeled token reference or rule reference may be accessed as #label_in. The translation is to a variable containing the input-tree AST node from which the rule or token was extracted. Input variables are seldom used. You almost always want to use #label instead of #label_in.
 
#id
ANTLR supports the translation of unlabeled token references as a shorthand notation, as long as the token is unique within the scope of a single alternative. In these cases, the use of an unlabeled token reference identical to using a label. For example, this:

r! : A { #r = #A; }

is equivalent to:


r! : a:A { #r = #a; }
#id_in is given similar treatment to #label_in.
 
#[TOKEN_TYPE] or #[TOKEN_TYPE,"text"] or #[TYPE,"text",ASTClassNameToConstruct]
AST node constructor shorthand. The translation is a call to the ASTFactory.create() method.  For example, #[T] is translated to:
ASFFactory.create(T)
#(root, c1, ..., cn)
AST tree construction shorthand. ANTLR looks for the comma character to separate the tree arguments. Commas within method call tree elements are handled properly; i.e., an element of "foo(#a,34)" is ok and will not conflict with the comma separator between the other tree elements in the tree. This tree construct is translated to a "make tree" call. The "make-tree" call is complex due to the need to simulate variable arguments in languages like Java, but the result will be something like:
ASTFactory.make(root, c1, ...,
cn);

In addition to the translation of the #(...) as a whole, the root and each child c1..cn will be translated. Within the context of a #(...) construct, you may use:

  • id or label as a shorthand for #id or #label.
  • [...] as a shorthand for #[...].
  • (...) as a shorthand for #(...).

The target code generator performs this translation with the help of a special lexer that parses the actions and asks the code-generator to create appropriate substitutions for each translated item. This lexer might impose some restrictions on label names (think of C/C++ preprocessor directives)

Invoking parsers that build trees

Assuming that you have defined a lexer L and a parser P in your grammar, you can invoke them sequentially on the system input stream as follows.

L lexer = new L(System.in);
P parser = new P(lexer);
parser.setASTNodeType("MyAST");
parser.startRule();   

If you have set buildAST=true in your parser grammar, then it will build an AST, which can be accessed via parser.getAST(). If you have defined a tree parser called T, you can invoke it with:

T walker = new T();
walker.startRule(parser.getAST()); // walk tree  

If, in addition, you have set buildAST=true in your tree-parser to turn on transform mode, then you can access the resulting AST of the tree-walker:

AST results = walker.getAST();
DumpASTVisitor visitor = new DumpASTVisitor();
visitor.visit(results);

Where DumpASTVisitor is a predefined ASTVisitor implementation that simply prints the tree to the standard output.

You can also use get a LISP-like print out of a tree via

String s = parser.getAST().toStringList();

AST Factories

ANTLR uses a factory pattern to create and connect AST nodes. This is done to primarily to separate out the tree construction facility from the parser, but also gives you a hook in between the parser and the tree node construction.  Subclass ASTFactory to alter the create methods.

If you are only interested in specifying the AST node type at runtime, use the

setASTNodeType(String className)

method on the parser or factory.  By default, trees are constructed of nodes of type antlr.CommonAST. (You must use the fully-qualified class name).

You can also specify a different class name for each token type to generate heterogeneous trees:

/** Specify an "override" for the Java AST object created for a
 *  specific token.  It is provided as a convenience so
 *  you can specify node types dynamically.  ANTLR sets
 *  the token type mapping automatically from the tokens{...}
 *  section, but you can change that mapping with this method.
 *  ANTLR does it's best to statically determine the node
 *  type for generating parsers, but it cannot deal with
 *  dynamic values like #[LT(1)].  In this case, it relies
 *  on the mapping.  Beware differences in the tokens{...}
 *  section and what you set via this method.  Make sure
 *  they are the same.
 *
 *  Set className to null to remove the mapping.
 *
 *  @since 2.7.2
 */
public void setTokenTypeASTNodeType(int tokenType, String className)
	throws IllegalArgumentException;

The ASTFactory has some generically useful methods:

/** Copy a single node with same Java AST objec type.
 *  Ignore the tokenType->Class mapping since you know
 *  the type of the node, t.getClass(), and doing a dup.
 *
 *  clone() is not used because we want all AST creation
 *  to go thru the factory so creation can be
 *  tracked.  Returns null if t is null.
 */
public AST dup(AST t);
/** Duplicate tree including siblings
 * of root.
 */
public AST dupList(AST t);
/**Duplicate a tree, assuming this is a
 * root node of a tree--duplicate that node
 * and what's below; ignore siblings of root
 * node.
 */
public AST dupTree(AST t);

Heterogeneous ASTs

Each node in an AST must encode information about the kind of node it is; for example, is it an ADD operator or a leaf node such as an INT?  There are two ways to encode this: with a token type or with a Java (or C++ etc...) class type.  In other words, do you have a single class type with numerous token types or no token types and numerous classes?  For lack of better terms, I (Terence) have been calling ASTs with a single class type homogeneous trees and ASTs with many class types heterogeneous trees.

The only reason to have a different class type for the various kinds of nodes is for the case where you want to execute a bunch of hand-coded tree walks or your nodes store radically different kinds of data.  The example I use below demonstrates an expression tree where each node overrides value() so that root.value() is the result of evaluating the input expression.   From the perspective of building trees and walking them with a generated tree parser, it is best to consider every node as an identical AST node.  Hence, the schism that exists between the hetero- and homogeneous AST camps.

ANTLR supports both kinds of tree nodes--at the same time!  If you do nothing but turn on the "buildAST=true" option, you get a homogeneous tree.  Later, if you want to use physically separate class types for some of the nodes, just specify that in the grammar that builds the tree.  Then you can have the best of both worlds--the trees are built automatically, but you can apply different methods to and store different data in the various nodes.  Note that the structure of the tree is unaffected; just the type of the nodes changes.

ANTLR applies a "scoping" sort of algorithm for determining the class type of a particular AST node that it needs to create.  The default type is CommonAST unless, prior to parser invocation, you override that with a call to:

  myParser.setASTNodeType("com.acme.MyAST");

where you must use a fully qualified class name.

In the grammar, you can override the default class type by setting the type for nodes created from a particular input token.  Use the element option <AST=typename> in the tokens section:

tokens {
    PLUS<AST=PLUSNode>;
    ...
}

You may further override the class type by annotating a particular token reference in your parser grammar:

anInt : INT<AST=INTNode> ;

This reference override is super useful for tokens such as ID that you might want converted to a TYPENAME node in one context and a VARREF in another context.

ANTLR uses the AST factory to create all AST nodes even if it knows the specific type.   In other words, ANTLR generates code similar to the following:

ANode tmp1_AST = (ANode)astFactory.create(LT(1),"ANode");
from
a : A<AST=ANode> ;
.

An Expression Tree Example

This example includes a parser that constructs expression ASTs, the usual lexer, and some AST node class definitions.

Let's start by describing the AST structure and node types.   Expressions have plus and multiply operators and integers.  The operators will be subtree roots (nonleaf nodes) and integers will be leaf nodes.  For example, input 3+4*5+21 yields a tree with structure:

(  + (  +  3 (  *  4  5 ) )  21 )

or:

  +
  |
  +--21
  |
  3--*
     |
     4--5

All AST nodes are subclasses of CalcAST, which are BaseAST's that also answer method value().   Method value() evaluates the tree starting at that node.  Naturally, for integer nodes, value() will simply return the value stored within that node.  Here is CalcAST:

public abstract class CalcAST
    extends antlr.BaseAST
{
    public abstract int value();
}

The AST operator nodes must combine the results of computing the value of their two subtrees.  They must perform a depth-first walk of the tree below them.  For fun and to make the operations more obvious, the operator nodes define left() and right() instead, making them appear even more different than the normal child-sibling tree representation.  Consequently, these expression trees can be treated as both homogeneous child-sibling trees and heterogeneous expression trees.

public abstract class BinaryOperatorAST extends
    CalcAST
{
    /** Make me look like a heterogeneous tree */
    public CalcAST left() {
        return (CalcAST)getFirstChild();
    }

    public CalcAST right() {
        CalcAST t = left();
        if ( t==null ) return null;
        return (CalcAST)t.getNextSibling();
    }
}

The simplest node in the tree looks like:

import antlr.BaseAST;
import antlr.Token;
import antlr.collections.AST;
import java.io.*;

/** A simple node to represent an INT */
public class INTNode extends CalcAST {
    int v=0;

    public INTNode(Token tok) {
        v = Integer.parseInt(tok.getText());
    }

    /** Compute value of subtree; this is
     *  heterogeneous part :)
     */
    public int value() {
        return v;
    }

    public String toString() {
        return " "+v;
    }

    // satisfy abstract methods from BaseAST
    public void initialize(int t, String txt) {
    }
    public void initialize(AST t) {
    }
    public void initialize(Token tok) {
    }
}

The operators derive from BinaryOperatorAST and define value() in terms of left() and right().  For example, here is PLUSNode:

import antlr.BaseAST;
import antlr.Token;
import antlr.collections.AST;
import java.io.*;

/** A simple node to represent PLUS operation */
public class PLUSNode extends BinaryOperatorAST {
    public PLUSNode(Token tok) {
    }

    /** Compute value of subtree;
     * this is heterogeneous part :)
     */
    public int value() {
        return left().value() + right().value();
    }

    public String toString() {
        return " +";
    }

    // satisfy abstract methods from BaseAST
    public void initialize(int t, String txt) {
    }
    public void initialize(AST t) {
    }
    public void initialize(Token tok) {
    }
}

The parser is pretty straightforward except that you have to add the options to tell ANTLR what node types you want to create for which token matched on the input stream.   The tokens section lists the operators with element option AST appended to their definitions.  This tells ANTLR to build PLUSNode objects for any PLUS tokens seen on the input stream, for example.  For demonstration purposes, INT is not included in the tokens section--the specific token references is suffixed with the element option to specify that nodes created from that INT should be of type INTNode (of course, the effect is the same as there is only that one reference to INT).

class CalcParser extends Parser;
options {
    buildAST = true; // uses CommonAST by default
}

// define a bunch of specific AST nodes to build.
// can override at actual reference of tokens in
// grammar below.
tokens {
    PLUS<AST=PLUSNode>;
    STAR<AST=MULTNode>;
}

expr:   mexpr (PLUS^ mexpr)* SEMI!
    ;

mexpr
    :   atom (STAR^ atom)*
    ;

// Demonstrate token reference option
atom:   INT<AST=INTNode>
    ;

Invoking the parser is done as usual.  Computing the value of the resulting AST is accomplished by simply calling method value() on the root.

import java.io.*;
import antlr.CommonAST;
import antlr.collections.AST;

class Main {
    public static void main(String[] args) {
        try {
            CalcLexer lexer =
                new CalcLexer(
                  new DataInputStream(System.in)
                );
            CalcParser parser =
                new CalcParser(lexer);
            // Parse the input expression
            parser.expr();
            CalcAST t = (CalcAST)parser.getAST();

            System.out.println(t.toStringTree());

            // Compute value and return
            int r = t.value();
            System.out.println("value is "+r);
        } catch(Exception e) {
            System.err.println("exception: "+e);
            e.printStackTrace();
        }
    }
}

For completeness, here is the lexer:

class CalcLexer extends Lexer;

WS  :   (' '
    |   '\t'
    |   '\n'
    |   '\r')
        { $setType(Token.SKIP); }
    ;

LPAREN: '(' ;

RPAREN: ')' ;

STAR:   '*' ;

PLUS:   '+' ;

SEMI:   ';' ;

protected
DIGIT
    :   '0'..'9' ;

INT :   (DIGIT)+ ;

Describing Heterogeneous Trees With Grammars

So what's the difference between this approach and default homogeneous tree construction?  The big difference is that you need a tree grammar to describe the expression tree and compute resulting values.  But, that's a good thing as it's "executable documentation" and negates the need to handcode the tree parser (the value() methods).  If you used homogeneous trees, here is all you would need beyond the parser/lexer to evaluate the expressions:  [This code comes from the examples/java/calc directory.]

class CalcTreeWalker extends TreeParser;

expr returns [float r]
{
    float a,b;
    r=0;
}
    :   #(PLUS a=expr b=expr)   {r = a+b;}
    |   #(STAR a=expr b=expr)   {r = a*b;}
    |   i:INT
        {r = (float)
         Integer.parseInt(i.getText());}
    ;

Because Terence wants you to use tree grammars even when constructing heterogeneous ASTs (to avoid handcoding methods that implement a depth-first-search), implement the following methods in your various heterogeneous AST node class definitions:

    /** Get the token text for this node */
    public String getText();
    /** Get the token type for this node */
    public int getType();

That is how you can use heterogeneous trees with a tree grammar.  Note that your token types must match the PLUS and STAR token types imported from your parser.  I.e., make sure PLUSNode.getType() returns CalcParserTokenTypes.PLUS.   The token types are generated by ANTLR in interface files that look like:

public interface CalcParserTokenTypes {
	...
        int PLUS = 4;
        int STAR = 5;
	...
}

AST (XML) Serialization

[Oliver Zeigermann olli@zeigermann.de provided the initial implementation of this serialization.  His XTAL XML translation code is worth checking out; particularly for reading XML-serialized ASTs back in.]

For a variety of reasons, you may want to store an AST or pass it to another program or computer.  Class antlr.BaseAST is Serializable using the Java code generator, which means you can write ASTs to the disk using the standard Java stuff.  You can also write the ASTs out in XML form using the following methods from BaseAST:

  • public void xmlSerialize(Writer out)
  • public void xmlSerializeNode(Writer out)
  • public void xmlSerializeRootOpen(Writer out)
  • public void xmlSerializeRootClose(Writer out)

All methods throw IOException.

You can override xmlSerializeNode and so on to change the way nodes are written out.  By default the serialization uses the class type name as the tag name and has attributes text and type to store the text and token type of the node.

The output from running the simple heterogeneous tree example, examples/java/heteroAST, yields:

 (  + (  +  3 (  *  4  5 ) )  21 )
<PLUS><PLUS><int>3</int><MULT>
<int>4</int><int>5</int>
</MULT></PLUS><int>21</int></PLUS>
value is 44

The LISP-form of the tree shows the structure and contents.  The various heterogeneous nodes override the open and close tags and change the way leaf nodes are serialized to use <int>value</int> instead of tag attributes of a single node.

Here is the code that generates the XML:

Writer w = new OutputStreamWriter(System.out);
t.xmlSerialize(w);
w.write("\n");
w.flush();

AST enumerations

The AST findAll and findAllPartial methods return enumerations of tree nodes that you can walk.  Interface

antlr.collections.ASTEnumeration

and

class antlr.Collections.impl.ASTEnumerator

implement this functionality.  Here is an example:

// Print out all instances of
// a-subtree-of-interest
// found within tree 't'.
ASTEnumeration enum;
enum = t.findAll(a-subtree-of-interest);
while ( enum.hasMoreNodes() ) {
  System.out.println(
    enum.nextNode().toStringList()
  );
}

A few examples


sum :term ( PLUS^ term)*
    ; 

The "^" suffix on the PLUS tells ANTLR to create an additional node and place it as the root of whatever subtree has been constructed up until that point for rule sum. The subtrees returned by the term references are collected as children of the addition nodes.  If the subrule is not matched, the associated nodes would not be added to the tree. The rule returns either the tree matched for the first term reference or a PLUS-rooted tree.

The grammar annotations should be viewed as operators, not static specifications. In the above example, each iteration of the (...)* will create a new PLUS root, with the previous tree on the left, and the tree from the new term on the right, thus preserving the usual associatively for "+".

Look at the following rule that turns off default tree construction.

decl!:
    modifiers type ID SEMI;
	{ #decl = #([DECL], ID, ([TYPE] type),
                    ([MOD] modifiers) ); }
    ;

In this example, a declaration is matched. The resulting AST has an "imaginary" DECL node at the root, with three children. The first child is the ID of the declaration. The second child is a subtree with an imaginary TYPE node at the root and the AST from the type rule as its child. The third child is a subtree with an imaginary MOD at the root and the results of the modifiers rule as its child.

Labeled subrules

[THIS WILL NOT BE IMPLEMENTED AS LABELED SUBRULES...We'll do something else eventually.]

In 2.00 ANTLR, each rule has exactly one tree associated with it. Subrules simply add elements to the tree for the enclosing rule, which is normally what you want. For example, expression trees are easily built via:


expr: ID ( PLUS^ ID )*
    ;
    

However, many times you want the elements of a subrule to produce a tree that is independent of the rule's tree. Recall that exponents must be computed before coefficients are multiplied in for exponent terms. The following grammar matches the correct syntax.


// match exponent terms such as "3*x^4"
eterm
    :   expr MULT ID EXPONENT expr
    ;
    

However, to produce the correct AST, you would normally split the ID EXPONENT expr portion into another rule like this:


eterm:
    expr MULT^ exp
    ;

exp:
	ID EXPONENT^ expr
    ;
    

In this manner, each operator would be the root of the appropriate subrule. For input 3*x^4, the tree would look like:


#(MULT 3 #(EXPONENT ID 4))
    

However, if you attempted to keep this grammar in the same rule:


eterm
    :   expr MULT^ (ID EXPONENT^ expr)
    ;
    

both "^" root operators would modify the same tree yielding


#(EXPONENT #(MULT 3 ID) 4)
    

This tree has the operators as roots, but they are associated with the wrong operands.

Using a labeled subrule allows the original rule to generate the correct tree.


eterm
    :   expr MULT^ e:(ID EXPONENT^ expr)
    ;
    

In this case, for the same input 3*x^4, the labeled subrule would build up its own subtree and make it the operand of the MULT tree of the eterm rule. The presence of the label alters the AST code generation for the elements within the subrule, making it operate more like a normal rule. Annotations of "^" make the node created for that token reference the root of the tree for the e subrule.

Labeled subrules have a result AST that can be accessed just like the result AST for a rule. For example, we could rewrite the above decl example using labeled subrules (note the use of ! at the start of the subrules to suppress automatic construction for the subrule):


decl!:
    m:(! modifiers { #m = #([MOD] modifiers); } )
    t:(! type { #t = #([TYPE] type); } )
    ID
    SEMI;
    { #decl = #( [DECL] ID t m ); }
    ;
    

What about subrules that are closure loops? The same rules apply to a closure subrule--there is a single tree for that loop that is built up according to the AST operators annotating the elements of that loop. For example, consider the following rule.


term:   T^ i:(OP^ expr)+
    ;
    

For input T OP A OP B OP C, the following tree structure would be created:


#(T #(OP #(OP #(OP A) B) C) )
    

which can be drawn graphically as


T
|
OP
|
OP--C
|
OP--B
|
A
    

The first important thing to note is that each iteration of the loop in the subrule operates on the same tree. The resulting tree, after all iterations of the loop, is associated with the subrule label. The result tree for the above labeled subrule is:


#(OP #(OP #(OP A) B) C)
    

The second thing to note is that, because T is matched first and there is a root operator after it in the rule, T would be at the bottom of the tree if it were not for the label on the subrule.

Loops will generally be used to build up lists of subtree. For example, if you want a list of polynomial assignments to produce a sibling list of ASSIGN subtrees, then the following rule you would normally split into two rules.


interp
    :   ( ID ASSIGN poly ";" )+
    ;
    

Normally, the following would be required


interp
    :   ( assign )+
    ;
assign
    :   ID ASSIGN^ poly ";"!
    ;
    

Labeling a subrule allows you to write the above example more easily as:


interp
    :   ( r:(ID ASSIGN^ poly ";") )+
    ;
    

Each recognition of a subrule results in a tree and if the subrule is nested in a loop, all trees are returned as a list of trees (i.e., the roots of the subtrees are siblings). If the labeled subrule is suffixed with a "!", then the tree(s) created by the subrule are not linked into the tree for the enclosing rule or subrule.

Labeled subrules within labeled subrules result in trees that are linked into the surrounding subrule's tree. For example, the following rule results in a tree of the form X #( A #(B C) D) Y.


a   :   X r:( A^ s:(B^ C) D) Y
    ;
    

Labeled subrules within nonlabeled subrules result in trees that are linked into the surrounding rule's tree. For example, the following rule results in a tree of the form #(A X #(B C) D Y).


a   :   X ( A^ s:(B^ C) D) Y
    ;    

Reference nodes

Not implemented. A node that does nothing but refer to another node in the tree. Nice for embedding the same tree in multiple lists.

Required AST functionality and form

The data structure representing your trees can have any form or type name as long as they implement the AST interface:

package antlr.collections;

/** Minimal AST node interface used by ANTLR
 *  AST generation and tree-walker.
 */
public interface AST {
    /** Get the token type for this node */
    public int getType();

    /** Set the token type for this node */
    public void setType(int ttype);

    /** Get the token text for this node */
    public String getText();

    /** Set the token text for this node */
    public void setText(String text);

    /** Get the first child of this node;
     *  null if no children */
    public AST getFirstChild();

    /** Set the first child of a node */
    public void setFirstChild(AST c);

    /** Get the next sibling in line after this
     * one
     */
    public AST getNextSibling();

    /** Set the next sibling after this one */
    public void setNextSibling(AST n);

    /** Add a (rightmost) child to this node */
    public void addChild(AST node);
    /** Are two nodes exactly equal? */
    public boolean equals(AST t);
    /** Are two lists of nodes/subtrees exactly
     *  equal in structure and content? */
    public boolean equalsList(AST t);
    /** Are two lists of nodes/subtrees
     *  partially equal? In other words, 'this'
     *  can be bigger than 't'
     */
    public boolean equalsListPartial(AST t);
    /** Are two nodes/subtrees exactly equal? */
    public boolean equalsTree(AST t);
    /** Are two nodes/subtrees exactly partially
     *  equal? In other words, 'this' can be
     *  bigger than 't'.
     */
    public boolean equalsTreePartial(AST t);
    /** Return an enumeration of all exact tree
     * matches for tree within 'this'.
     */
    public ASTEnumeration findAll(AST tree);
    /** Return an enumeration of all partial
     *  tree matches for tree within 'this'.
     */
    public ASTEnumeration findAllPartial(
        AST subtree);
    /** Init a node with token type and text */
    public void initialize(int t, String txt);
    /** Init a node using content from 't' */
    public void initialize(AST t);
    /** Init a node using content from 't' */
    public void initialize(Token t);
    /** Convert node to printable form */
    public String toString();
    /** Treat 'this' as list (i.e.,
     *  consider 'this'
     *  siblings) and convert to printable
     *  form
     */
    public String toStringList();
    /** Treat 'this' as tree root
     *  (i.e., don't consider
     *  'this' siblings) and convert
     *   to printable form */
    public String toStringTree();
}

This scheme does not preclude the use of heterogeneous trees versus homogeneous trees. However, you will need to write extra code to create heterogeneous trees (via a subclass of ASTFactory) or by specifying the node types at the token reference sites or in the tokens section, whereas the homogeneous trees are free.

Version: $Id: //depot/code/org.antlr/release/antlr-2.7.5/doc/trees.html#1 $
nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/doc/vocab.html000066400000000000000000000306411161462365500225270ustar00rootroot00000000000000 ANTLR Specification: Vocabularies

Token Vocabularies

Every grammar specifies language structure with rules (substructures) and vocabulary symbols.  These symbols are equated with integer "token types" for efficient comparison at run-time.  The files that define this mapping from symbol to token type are fundamental to the execution of ANTLR and ANTLR-generated parsers.   This document describes the files used and generated by ANTLR plus the options used to control the vocabularies.

Introduction

A parser grammar refers to tokens in its vocabulary by symbol that will correspond to Token objects, generated by the lexer or other token stream, at parse-time.    The parser compares a unique integer token type assigned to each symbol against the token type stored in the token objects.  If the parser is looking for token type 23, but finds that the first lookahead token's token type, LT(1).getType(), is not 23, then the parser throws MismatchedTokenException.

A grammar may have an import vocabulary and always has an export vocabulary, which can be referenced by other grammars.  Imported vocabularies are never modified and represent the "initial condition" of the vocabulary.  Do not confuse importVocabular

The following represent the most common questions:

How does ANTLR decide which vocabulary symbol gets what token type?

Each grammar has a token manager that manages a grammar's export vocabulary.  The token manager can be preloaded with symbol / token type pairs by using the grammar importVocab option.  The option forces ANTLR to look for a file with mappings that look like:

PLUS=44

Without the importVocab option, the grammar's token manager is empty (with one caveat you will see later).

Any token referenced in your grammar that does not have a predefined token type is assigned a type in the order  encountered.  For example, in the following grammar, tokens A and B will be 4 and 5, respectively:

class P extends Parser;
a : A B ;

Vocabulary file names are of the form: NameTokenTypes.txt.

Why do token types start at 4?

Because ANTLR needs some special token types during analysis.  User-defined token types must begin after 3.

What files associated with vocabulary does ANTLR generate?

ANTLR generates VTokenTypes.txt and VTokenTypes.java for vocabulary V where V is either the name of the grammar or specified in an exportVocab=V option. The text file is sort of a "freezedried" token manager and represents the persistent state needed by ANTLR to allow a grammar in a different file to see a grammar's vocabulary including string literals etc...  The Java file is an interface containing the token type constant definitions.  Generated parsers implement one of these interfaces to obtain the appropriate token type definitions.

How does ANTLR synchronize the symbol-type mappings between grammars in the same file and in different files?

The export vocabulary for one grammar must become the import vocabulary for another or the two grammars must share a common import vocabulary.

Imagine a parser P in p.g:

// yields PTokenTypes.txt
class P extends Parser;
// options {exportVocab=P;} ---> default!
decl : "int" ID ;

and a lexer L in l.g

class L extends Lexer;
options {
  importVocab=P; // reads PTokenTypes.txt
}
ID : ('a'..'z')+ ;

ANTLR generates LTokenTypes.txt and LTokenTypes.java even though L is primed with values from P's vocabulary.

Grammars in different files that must share the same token type space should use the importVocab option to preload the same vocabulary.

If these grammars are in the same file, ANTLR behaves in exactly same way.   However, you can get the two grammars to share the vocabulary (allowing them both to contribute to the same token space) by setting their export vocabularies to the same vocabulary name.  For example, with P and L in one file, you can do the following:

// yields PTokenTypes.txt
class P extends Parser;
// options {exportVocab=P;} ---> default!
decl : "int" ID ;
class L extends Lexer;
options {
  exportVocab=P; // shares vocab P
}
ID : ('a'..'z')+ ;

If you leave off the vocab options from L, it will choose to share the first export vocabulary in the file; in this case, it will share P's vocabulary.

// yields PTokenTypes.txt
class P extends Parser;
decl : "int" ID ;
// shares P's vocab
class L extends Lexer;
ID : ('a'..'z')+ ;

The token type mapping file looks like this

P    // exported token vocab name
LITERAL_int="int"=4
ID=5

Grammar Inheritance and Vocabularies

Grammars that extend supergrammars inherit rules, actions, and options but what vocabulary does the subgrammar use and what token vocabulary does it use?  ANTLR sees the subgrammar as if you had cut and paste all of the nonoverridden rules of the supergrammar into the subgrammar like an include.  Therefore, the set of tokens in the subgrammar is the union of the tokens defined in the supergrammar and in the supergrammar.  All grammars export a vocabulary file and so the subgrammar will export and use a different vocabulary than the supergrammar.  The subgrammar always imports the vocabulary of the supergrammar unless you override it with an importVocab option in the subgrammar.

A grammar Q that extends P primes its vocabulary with P's vocabulary as if Q had specified option importVocab=P.  For example, the following grammar has two token symbols.

class P extends Parser;
a : A Z ;

The subgrammar, Q, initially has the same vocabulary as the supergrammar, but may add additional symbols.

class Q extends P;
f : B ;

In this case, Q defines one more symbol, B, yielding a vocabulary for Q of {A,B,C}.

The vocabulary of a subgrammar is always a superset of the supergrammar's vocabulary.   Note that overriding rules does not affect the initial vocabulary.

If your subgrammar requires new lexical structures, unused by the supergrammar, you probably need to have the subparser use a sublexer.  Override the initial vocabulary with an importVocab option that specifies the vocabulary of the sublexer.  For example, assume parser P uses PL as a lexer.  Without an importVocab override, Q's vocabulary would use P's vocab and, consequently, PL's vocabulary.  If you would like Q to use token types from another lexer, say QL, do the following:

class Q extends P;
options {
  importVocab=QL;
}
f : B ;

Q's vocab will now be the same or a superset of QL's vocabulary.

Recognizer Generation Order

If all of your grammars are in one file, you do not have to worry about which grammar file ANTLR should process first, however, you still need to worry about the order in which ANTLR sees the grammars within the file.  If you try to import a vocabulary that will be exported by a grammar later in the file, ANTLR will complain that it cannot load the file.  The following grammar file will cause antlr to fail:

class P extends Parser;
options {
importVocab=L;
}

a : "int" ID;

class L extends Lexer;
ID : 'a';

ANTLR will complain that it cannot find LTokenTypes.txt because it has not seen grammar L yet in the grammar file.  On the other hand, if you happened to have LTokenTypes.txt lying around (from a previous run of ANTLR on the grammar file when P did not exist?), ANTLR will load it for P and then overwrite it again for L.  ANTLR must assume that you want to load a vocabulary generated from another file as it cannot know what grammars are approaching even in the same file.

In general, if you want grammar B to use token types from grammar A (regardless of grammar type), then you must run ANTLR on grammar A first.  So, for example, a tree grammar that uses the vocabulary of the parser grammar should be run after ANTLR has generated the parser.

When you want a parser and lexer, for example, to share the same vocabulary space, all you have to do is place them in the same file with their export vocabs pointing at the same place.  If they are in separate files, have the parser's import vocab set to the lexer's export vocab unless the parser is contributing lots of literals.  In this case, reverse the import/export relationship so the lexer uses the export vocabulary of the parser.

Tricky Vocabulary Stuff

What if your grammars are in separate files and you still want them to share all or part of a token space.  There are two solutions: (1) have the grammars import the same vocabulary or (2) have the grammars all inherit from the same base grammar that contains the common token space.

The first solution applies when you have two lexers and two parsers that must parse radically different portions of the input.  The example in examples/java/multiLexer of the ANTLR 2.6.0 distribution is such a situation.  The javadoc comments are parsed with a different lexer/parser than the regular Java portion of the input.  The "*/" terminating comment lexical structure is necessarily recognized by the javadoc lexer, but it is natural to have the Java parser enclose the launch of the javadoc parser with open/close token references:

javadoc
  : JAVADOC_OPEN
    {
    DemoJavaDocParser jdocparser =
      new DemoJavaDocParser(getInputState());
    jdocparser.content();
    }
    JAVADOC_CLOSE
  ;

The problem is: the javadoc lexer defines JAVADOC_CLOSE and hence defines its token type.  The vocabulary of the Java parser is based upon the Java lexer not the javadoc lexer, unfortunately.  To get the javadoc lexer and Java lexer to both see JAVADOC_CLOSE (and have the same token type), have both lexers import a vocabulary file that contains this token type definition.  Here are the heads of DemoJavaLexer and DemoJavaDocLexer:

class DemoJavaLexer extends Lexer;
options {
  importVocab = Common;
}
...
class DemoJavaDocLexer extends Lexer;
options {
  importVocab = Common;
}
...

CommonTokenTypes.txt contains:

Common // name of the vocab
JAVADOC_CLOSE=4

The second solution to vocabulary sharing applies when you have say one parser and three different lexers (e.g., for various flavors of C).  If you only want one parser for space efficiency, then the parser must see the vocabulary of all three lexers and prune out the unwanted structures grammatically (with semantic predicates probably).   Given CLexer, GCCLexer, and MSCLexer, make CLexer the supergrammar and have CLexer define the union of all tokens.  For example, if MSCLexer needs "_int32" then reserve a token type visible to all lexers in CLexer:

tokens {
  INT32;
}

In the MSCLexer then, you can actually attach a literal to it.

tokens {
  INT32="_int32"
}

In this manner, the lexers will all share the same token space allowing you to have a single parser recognize input for multiple C variants.

Version: $Id: //depot/code/org.antlr/release/antlr-2.7.5/doc/vocab.html#1 $
nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/000077500000000000000000000000001161462365500216145ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/Makefile.in000066400000000000000000000005251161462365500236630ustar00rootroot00000000000000############################################################################### # $Id:$ ############################################################################### ## do not change this value subdir=examples ## get configured variables @stdvars@ ## get configured (standard) rules @stdmake@ ## get configured dependencies @stddeps@ nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/boo/000077500000000000000000000000001161462365500223735ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/boo/calc/000077500000000000000000000000001161462365500232755ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/boo/calc/calc.boo000066400000000000000000000006441161462365500247040ustar00rootroot00000000000000import antlr import System lexer = CalcLexer(CharBuffer(Console.In)) lexer.setFilename("") parser = CalcParser(lexer) parser.setFilename("") // Parse the input expression parser.expr() t as CommonAST = parser.getAST() // Print the resulting tree out in LISP notation print t.ToStringTree() walker = CalcTreeWalker() // Traverse the tree created by the parser r = walker.expr(t) print "value is ${r}" nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/boo/calc/calc.g000066400000000000000000000013251161462365500243500ustar00rootroot00000000000000options { language = "Boo"; } class CalcParser extends Parser; options { buildAST = true; // uses CommonAST by default } expr : mexpr (PLUS^ mexpr)* SEMI! ; mexpr : atom (STAR^ atom)* ; atom: INT ; class CalcLexer extends Lexer; WS : (' ' | '\t' | '\n' | '\r') { _ttype = Token.SKIP; } ; LPAREN: '(' ; RPAREN: ')' ; STAR: '*' ; PLUS: '+' ; SEMI: ';' ; protected DIGIT : '0'..'9' ; INT : (DIGIT)+ ; class CalcTreeWalker extends TreeParser; expr returns [single r] { a as single b as single r = 0 }: #(PLUS a=expr b=expr) { r = a+b; } | #(STAR a=expr b=expr) { r = a*b; } | i:INT { r = Convert.ToSingle(i.getText()); } ; nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/boo/calc/default.build000066400000000000000000000014471161462365500257500ustar00rootroot00000000000000 nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/cpp/000077500000000000000000000000001161462365500223765ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/cpp/ASTsupport/000077500000000000000000000000001161462365500244625ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/cpp/ASTsupport/Main.cpp000066400000000000000000000001411161462365500260460ustar00rootroot00000000000000#include #include "SupportTest.hpp" int main() { SupportTest::main(); return 0; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/cpp/ASTsupport/Makefile.in000066400000000000000000000050711161462365500265320ustar00rootroot00000000000000############################################################################### # $Id:$ ############################################################################### ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx @stdvars@ ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ## do not change this value subdir=examples/cpp/ASTsupport srcdir = @abs_top_srcdir@/examples/cpp/ASTsupport objdir = @abs_this_builddir@/examples/cpp/ASTsupport this : test test : test1 all: ASTSupport test g_FILES = \ $(srcdir)/test.g \ $(eol) g_cxx_FILES = \ $(objdir)/SupportTest.cpp \ $(eol) g_hpp_FILES = \ $(objdir)/SupportTest.hpp \ $(objdir)/SupportTestTokenTypes.hpp \ $(eol) g_txt_FILES = \ $(objdir)/SupportTestTokenTypes.txt \ $(eol) g_gen_FILES = \ $(g_cxx_FILES) \ $(g_hpp_FILES) \ $(g_txt_FILES) \ $(eol) g_obj_FILES = \ $(objdir)/SupportTest@OBJEXT@ \ $(eol) ## Source files ASTSupport_cxx_FILES = \ $(g_cxx_FILES) \ $(srcdir)/Main.cpp \ $(eol) ## Object files ASTSupport_obj_FILES = \ $(g_obj_FILES) \ $(objdir)/Main@OBJEXT@ \ $(eol) ## Test commands .. test1_cmd = $(objdir)/ASTSupport ## Tests to be executed test1 : ASTSupport @ v="$(verbose)"; \ if test $$v -gt 0 ; then \ echo "$(test1_cmd)" ; \ else \ echo "exec test1 .."; \ fi @ $(test1_cmd) ## How to make binaries ASTSupport : $(ASTSupport_obj_FILES) @ANTLR_LIB@ @ @CXX_LINK_CMD@ $@ $(ASTSupport_obj_FILES) ## How to compile ANTLR grammar files $(g_gen_FILES) : $(g_FILES) @ANTLR_JAR@ @ANTLR_COMPILE_CMD@ $(g_FILES) ## GNU make - how to make object file $(objdir)/%@OBJEXT@ : $(srcdir)/%.cpp @ @CXX_COMPILE_CMD@ $< ## GNU make - how to make object file $(objdir)/%@OBJEXT@ : $(objdir)/%.cpp @ @CXX_COMPILE_CMD@ $< ## GNU make - how to make object file %@OBJEXT@ : $(srcdir)/%.cpp @ @CXX_COMPILE_CMD@ $< ## GNU make - how to make object file %@OBJEXT@ : $(objdir)/%.cpp @ @CXX_COMPILE_CMD@ $< ## Housecleaning clean: rm -rf *.o *.obj *.exe ASTSupport $(g_gen_FILES) distclean: clean # Dependencies $(objdir)/SupportTest@OBJEXT@ : \ $(objdir)/SupportTest.hpp \ $(objdir)/SupportTestTokenTypes.hpp $(objdir)/Main@OBJEXT@ : \ $(objdir)/SupportTest.hpp \ $(objdir)/SupportTestTokenTypes.hpp ## If cxx.sh script changed .. $(ASTSupport_obj_FILES) : @abs_this_builddir@/scripts/cxx.sh ## If link.sh script changed .. $(ASTSupport_obj_FILES) : @abs_this_builddir@/scripts/link.sh ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx @stddeps@ ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/cpp/ASTsupport/test.g000066400000000000000000000161641161462365500256210ustar00rootroot00000000000000header { #include #include } options { language=Cpp; } { // into cpp file... ANTLR_USING_NAMESPACE(std); ANTLR_USING_NAMESPACE(antlr); void SupportTest::main(void) { try { bool r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17; // define "astFactory" so translation of #(...) works // do some dirty tricks to get an initialized astFactory. ASTFactory ast_factory; ParserSharedInputState is(new ParserInputState(0)); SupportTest dummy(is); dummy.initializeASTFactory(ast_factory); dummy.setASTFactory(&ast_factory); // dirty trick to get to our member variable and get the right // codegen... ASTFactory *astFactory = dummy.getASTFactory(); RefAST t = #([ASSIGN,"="], [ID,"a"], [INT,"1"]); // build "a=1" tree cout << "t is " << t->toStringList() << endl; RefAST u = #([ASSIGN,"="], [ID,"b"]); // build "b=?" tree cout << "u is " << u->toStringList() << endl; RefAST v = #([ASSIGN,"="], [INT,"4"]); // build "4=?" tree cout << "v is " << v->toStringList() << endl; RefAST w = #[ASSIGN,"="]; // build "=" tree cout << "w is " << w->toStringList() << endl; cout << endl; cout << "t.equalsTree(t) is " << (r1=t->equalsTree(t)) << endl; cout << "t.equalsTree(u) is " << (r2=t->equalsTree(u)) << endl; cout << "t.equalsTree(v) is " << (r3=t->equalsTree(v)) << endl; cout << "t.equalsTree(w) is " << (r4=t->equalsTree(w)) << endl; cout << "t.equalsTree(null) is " << (r5=t->equalsTree(nullAST)) << endl; cout << endl; cout << "t.equalsTreePartial(t) is " << (r6=t->equalsTreePartial(t)) << endl; cout << "t.equalsTreePartial(u) is " << (r7=t->equalsTreePartial(u)) << endl; cout << "t.equalsTreePartial(v) is " << (r8=t->equalsTreePartial(v)) << endl; cout << "t.equalsTreePartial(w) is " << (r9=t->equalsTreePartial(w)) << endl; cout << "t.equalsTreePartial(null) is " << (r10=t->equalsTreePartial(nullAST)) << endl; cout << endl; /* (A (B C (A B)) (A B) (F (A B)) (A (A B)) ) J Visually: A---------------------J | B-----A-----F----A | | | | C--A B A A | | | B B B */ RefAST a = #(nullAST, ([A,"A"], ([B,"B"], [C,"C"], ([A,"A"],[B,"B"])), ([A,"A"],[B,"B"]), ([F,"F"], #([A,"A"], [B,"B"])), ([A,"A"], #([A,"A"], [B,"B"]))), [J,"J"]); cout << "a is "<toStringList()<<"\n" << endl; cout << " A---------------------J" << endl; cout << " |" << endl; cout << " B-----A-----F----A" << endl; cout << " | | | |" << endl; cout << " C--A B A A" << endl; cout << " | | |" << endl; cout << " B B B\n" << endl; RefAST x = a->getFirstChild()->getNextSibling(); cout << "x is second sibling of upperleftmost A: "<toStringList() << endl; RefAST y = a->getFirstChild()->getNextSibling()->getFirstChild(); cout << "y is child B of x: "<toStringList() << endl; cout << "x.equalsTree(#(A B)) is "<<(r11=x->equalsTree(#([A,"A"],[B,"B"]))) << endl; cout << "x.equalsList(#(A B)) is "<<(r12=x->equalsList(#([A,"A"],[B,"B"]))) << endl; cout << "x.equalsListPartial(#(A B)) is "<<(r13=x->equalsListPartial(#([A,"A"],[B,"B"]))) << endl; cout << "a.equalsTree(#(A B)) is "<<(r14=a->equalsTree(#([A,"A"],[B,"B"]))) << endl; cout << "a.equalsTreePartial(#(A B)) is "<<(r15=a->equalsTreePartial(#([A,"A"],[B,"B"]))) << endl; cout << "y.equalsList(#[B]) is "<<(r16=y->equalsList(#[B,"B"])) << endl; cout << "y.equalsListPartial(#[B]) is "<<(r17=y->equalsList(#[B,"B"])) << endl; vector _enum; cout << "\na.findAllPartial(#(A B)):" << endl; _enum = a->findAllPartial(#([A,"A"],[B,"B"])); {for (vector::const_iterator i=_enum.begin();i!=_enum.end();i++) { cout << (*i)->toStringList() << endl; } } cout << "\na.findAllPartial(#[A])):" << endl; _enum = a->findAllPartial(#[A,"A"]); {for (vector::const_iterator i=_enum.begin();i!=_enum.end();i++) { cout << (*i)->toStringList() << endl; } } cout << "\na.findAll(#(A B)):" << endl; _enum = a->findAll(#([A,"A"],[B,"B"])); {for (vector::const_iterator i=_enum.begin();i!=_enum.end();i++) { cout << (*i)->toStringList() << endl; } } // check results cout << "\nTest results:" << endl; if ( r1==true && r2==false && r3==false && r4==false && r5==false && r11==true && r14==false) { cout << "equalsTree is ok" << endl; } else { cout << "equalsTree is bad" << endl; } if ( r6==true && r7==false && r8==false && r9==true && r10==true ) { cout << "equalsTreePartial is ok" << endl; } else { cout << "equalsTreePartial is bad" << endl; } if ( r12==false && r16==true ) { cout << "equalsList is ok" << endl; } else { cout << "equalslist is bad" << endl; } if ( r13==true && r17==true ) { cout << "equalsListPartial is ok" << endl; } else { cout << "equalslistPartial is bad" << endl; } } catch( ... ) { cout << "Exception caught"; } } } class SupportTest extends Parser; options { buildAST = true; genHashLines = false; } { /** Test the equals, equalsSubtree, and findAll methods plus AST enumeration. * The output should be: t is ( = a 1 ) u is ( = b ) v is ( = 4 ) w is = t.equalsTree(t) is true t.equalsTree(u) is false t.equalsTree(v) is false t.equalsTree(w) is false t.equalsTree(null) is false t.equalsTreePartial(t) is true t.equalsTreePartial(u) is false t.equalsTreePartial(v) is false t.equalsTreePartial(w) is true t.equalsTreePartial(null) is true a is ( A ( B C ( A B ) ) ( A B ) ( F ( A B ) ) ( A ( A B ) ) ) J A---------------------J | B-----A-----F----A | | | | C--A B A A | | | B B B x is second sibling of upperleftmost A: ( A B ) ( F ( A B ) ) ( A ( A B ) ) y is child B of x: B x.equalsTree(#(A B)) is true x.equalsList(#(A B)) is false x.equalsListPartial(#(A B)) is true a.equalsTree(#(A B)) is false a.equalsTreePartial(#(A B)) is true y.equalsList(#[B]) is true y.equalsListPartial(#[B]) is true a.findAllPartial(#(A B)): ( A ( B C ( A B ) ) ( A B ) ( F ( A B ) ) ( A ( A B ) ) ) J ( A B ) ( A B ) ( F ( A B ) ) ( A ( A B ) ) ( A B ) ( A B ) a.findAllPartial(#[A])): ( A ( B C ( A B ) ) ( A B ) ( F ( A B ) ) ( A ( A B ) ) ) J ( A B ) ( A B ) ( F ( A B ) ) ( A ( A B ) ) ( A B ) ( A ( A B ) ) ( A B ) a.findAll(#(A B)): ( A B ) ( A B ) ( F ( A B ) ) ( A ( A B ) ) ( A B ) ( A B ) Test results: equalsTree is ok equalsTreePartial is ok equalsList is ok equalsListPartial is ok */ public: static void main(); } defTokenTypes : ID INT ASSIGN PLUS A B C D E F G H I J K ; /* rule[AST t] : BLAH; another { #another = on here. // should translate } : rule[#another=foo] rule[#another] A // should get errors on those rule refs ; */ nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/cpp/HTML/000077500000000000000000000000001161462365500231425ustar00rootroot00000000000000nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/cpp/HTML/Main.cpp000066400000000000000000000011641161462365500245340ustar00rootroot00000000000000/* Simple class for testing antlr-generated HTML parser/lexer. Alexander Hinds, Magelang Institute ahinds@magelang.com */ #include #include "HTMLLexer.hpp" #include "HTMLParser.hpp" #include "antlr/TokenBuffer.hpp" int main( int, char** ) { ANTLR_USING_NAMESPACE(std) ANTLR_USING_NAMESPACE(antlr) try { HTMLLexer lexer(cin); TokenBuffer buffer(lexer); HTMLParser parser(buffer); parser.document(); } catch( ANTLRException& e ) { cerr << "exception: " << e.getMessage() << endl; return -1; } catch( exception& e ) { cerr << "exception: " << e.what() << endl; return -1; } return 0; } nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/cpp/HTML/Makefile.in000066400000000000000000000051331161462365500252110ustar00rootroot00000000000000############################################################################### # $Id:$ ############################################################################### ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx @stdvars@ ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ## do not change this value subdir=examples/cpp/HTML srcdir = @abs_top_srcdir@/examples/cpp/HTML objdir = @abs_this_builddir@/examples/cpp/HTML this : test test : test1 all: HTML test g_FILES = \ $(srcdir)/html.g \ $(eol) g_cxx_FILES = \ $(objdir)/HTMLLexer.cpp \ $(objdir)/HTMLParser.cpp \ $(eol) g_hpp_FILES = \ $(objdir)/HTMLLexer.hpp \ $(objdir)/HTMLParser.hpp \ $(objdir)/HTMLTokenTypes.hpp \ $(eol) g_txt_FILES = \ $(objdir)/HTMLTokenTypes.txt \ $(eol) g_gen_FILES = \ $(g_cxx_FILES) \ $(g_hpp_FILES) \ $(g_txt_FILES) \ $(eol) g_obj_FILES = \ $(objdir)/HTMLParser@OBJEXT@ \ $(objdir)/HTMLLexer@OBJEXT@ \ $(eol) ## Source files HTML_cxx_FILES = \ $(g_cxx_FILES) \ $(srcdir)/Main.cpp \ $(eol) ## Object files HTML_obj_FILES = \ $(g_obj_FILES) \ $(objdir)/Main@OBJEXT@ \ $(eol) ## Test commands .. test1_cmd = $(objdir)/HTML < $(srcdir)/test.html ## Tests to be executed test1 : HTML @ v="$(verbose)"; \ if test $$v -gt 0 ; then \ echo "$(test1_cmd)" ; \ else \ echo "exec test1 .."; \ fi @ $(test1_cmd) ## How to make binaries HTML : $(HTML_obj_FILES) @ANTLR_LIB@ @ @CXX_LINK_CMD@ $@ $(HTML_obj_FILES) ## How to compile ANTLR grammar files $(g_gen_FILES) : $(g_FILES) @ANTLR_JAR@ @ANTLR_COMPILE_CMD@ $(g_FILES) ## GNU make - how to make object file $(objdir)/%@OBJEXT@ : $(srcdir)/%.cpp @ @CXX_COMPILE_CMD@ $< ## GNU make - how to make object file $(objdir)/%@OBJEXT@ : $(objdir)/%.cpp @ @CXX_COMPILE_CMD@ $< ## GNU make - how to make object file %@OBJEXT@ : $(srcdir)/%.cpp @ @CXX_COMPILE_CMD@ $< ## GNU make - how to make object file %@OBJEXT@ : $(objdir)/%.cpp @ @CXX_COMPILE_CMD@ $< ## Housecleaning clean: rm -rf *.o *.obj *.exe HTML $(g_gen_FILES) distclean: clean # Dependencies $(objdir)/HTMLLexer.o: $(objdir)/HTMLLexer.cpp $(objdir)/HTMLLexer.hpp $(objdir)/HTMLTokenTypes.hpp $(objdir)/HTMLParser.o: $(objdir)/HTMLParser.cpp $(objdir)/HTMLParser.hpp $(objdir)/HTMLTokenTypes.hpp $(objdir)/Main.o: $(srcdir)/Main.cpp $(objdir)/HTMLLexer.hpp ## If cxx.sh script changed .. $(HTML_obj_FILES) : @abs_this_builddir@/scripts/cxx.sh ## If link.sh script changed .. $(HTML_obj_FILES) : @abs_this_builddir@/scripts/link.sh ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx @stddeps@ ##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx nant-0.9.5~git20110729.r1.202a430/lib/antlr-2.7.5/examples/cpp/HTML/html.g000066400000000000000000000350231161462365500242610ustar00rootroot00000000000000header { #include } /* Based on the HTML 3.2 spec. by the W3 (http://www.w3.org) Alexander Hinds & Terence Parr Magelang Institute, Ltd. Send comments to: parrt@jguru.com v1.1 Terence Parr (updated to 2.6.0) Fixed CCYTE->CCITE Fixed def of COMMENT_DATA so it scarfs stuff correctly. Also, fixed refs to (PCDATA)? -> (PCDATA)* because a comment between PCDATA returns 2 PCDATA--ya need the loop not optional. v1.0 Terence John Parr (version 2.5.0 of ANTLR required) Fixed how whitespace as handled, removing some ambiguities; some because of ANTLR lexical filtering in 2.5.0. Changed (PCDATA)* loops to (PCDATA)? general since PCDATA matches everything between valid tags (how could there be more than one between tags?) Made the DOCTYPE optional. Reduced lookahead from k=5 to k=1 on the parser and number of parser ambiguities to 2. Reduced lexer lookahead from 6 to 4; had to left factor a bunch of stuff. List items couldn't contain nested lists...fixed it. Fixed def of WORD so it can't be an INT. Removed '-' from WORD. Fixed HEXNUM so it will allow letters A..F. KNOWN ISSUES: 1. Does not handle "staggered" tags, eg:

2. Adhere's somewhat strictly to the html spec, so many pages won't parse without errors. 3. Doesn't convert &(a signifier) to it's proper single char representation 4. Checks only the syntax of element attributes, not the semantics, e.g. won't very that a base element's attribute is actually called "href" 5. Tags split across lines, for example, won't be properly recognized. TJP: I think I fixed this. 7. Lines not counted properly due to the def'n of PCDATA - see the alternate def'n for a possible fix. TJP: I think I fixed this. */ options { language="Cpp"; } class HTMLParser extends Parser; options { exportVocab=HTML; k = 1; } document : (PCDATA)? (DOCTYPE (PCDATA)?)? (OHTML (PCDATA)?)? (head)? (body)? (CHTML (PCDATA)?)? ; head: (OHEAD (PCDATA)?)? head_element (PCDATA | head_element)* (CHEAD (PCDATA)?)? ; head_element : title //bug need at least a title, rest optional | script | style | ISINDEX | BASE | META | LINK ; title : OTITLE (PCDATA)? CTITLE ; script : OSCRIPT (~CSCRIPT)+ CSCRIPT ; style : OSTYLE (~CSTYLE)+ CSTYLE ; body: ( OBODY (PCDATA)* )? body_content_no_PCDATA ( body_content )+ ( CBODY (PCDATA)* )? ; body_content_no_PCDATA : body_tag | text_tag ; body_tag : heading | block | ADDRESS ; body_content : body_tag | text ; /*revised*/ heading : h1 | h2 | h3 | h4 | h5 | h6 ; block : paragraph | list | preformatted | div | center | blockquote | HR | table ; //bug - ?FORM v %form, ISINDEX here too? font: teletype | italic | bold | underline | strike | big | small | subscript | superscript ; phrase : emphasize | strong | definition | code | sample_output| keyboard_text | variable | citation ; special : anchor | IMG | applet | font_dfn | BFONT | map | BR ; text_tag : font | phrase | special | form ; text: PCDATA | text_tag ; /*end*/ /*BLOCK ELEMENTS*/ h1 : OH1 (block | text)* CH1 ; h2 : OH2 (block | text)* CH2 ; h3 : OH3 (block | text)* CH3 ; h4 : OH4 (block | text)* CH4 ; h5 : OH5 (block | text)* CH5 ; h6 : OH6 (block | text)* CH6 ; address : OADDRESS (PCDATA)* CADDRESS ; //NOTE: according to the standard, paragraphs can't contain block elements //like HR. Netscape may insert these elements into paragraphs. //We adhere strictly here. paragraph : OPARA ( /* Rule body_content may also be just plain text because HTML is so loose. When body puts body_content in a loop, ANTLR doesn't know whether you want it to match all the text as part of this paragraph (in the case where the

is missing) or if the body rule should scarf it. This is analogous to the dangling-else clause. I shut off the warning. */ options { generateAmbigWarnings=false; } : text )* (CPARA)? ; list: unordered_list | ordered_list | def_list ; unordered_list : OULIST (PCDATA)* (list_item)+ CULIST ; ordered_list : OOLIST (PCDATA)* (list_item)+ COLIST ; def_list : ODLIST (PCDATA)* (def_list_item)+ CDLIST ; list_item : OLITEM ( text | list )+ (CLITEM (PCDATA)*)? ; def_list_item : dt | dd ; dt : ODTERM (text)+ CDTERM (PCDATA)* ; dd : ODDEF (text | block)+ CDTERM (PCDATA)* ; dir : ODIR (list_item)+ CDIR ; menu: OMENU (list_item)+ CMENU ; preformatted : OPRE (text)+ CPRE ; div : ODIV (body_content)* CDIV //semi-revised ; center : OCENTER (body_content)* CCENTER //semi-revised ; blockquote : OBQUOTE (body_content)* CBQUOTE ; form: OFORM (form_field | body_content)* CFORM ; table : OTABLE (caption)? (PCDATA)* (tr)+ CTABLE ; caption : OCAP (text)* CCAP ; tr : O_TR (PCDATA)* (th_or_td)* (C_TR (PCDATA)*)? ; th_or_td : O_TH_OR_TD (body_content)* (C_TH_OR_TD (PCDATA)*)? ; /*TEXT ELEMENTS*/ /*font style*/ teletype : OTTYPE ( text )+ CTTYPE ; italic : OITALIC ( text )+ CITALIC ; bold: OBOLD ( text )+ CBOLD ; underline : OUNDER ( text )+ CUNDER ; strike : OSTRIKE ( text )+ CSTRIKE ; big : OBIG ( text )+ CBIG ; small : OSMALL ( text )+ CSMALL ; subscript : OSUB ( text )+ CSUB ; superscript : OSUP ( text )+ CSUP ; /*phrase elements*/ emphasize : OEM ( text )+ CEM ; strong : OSTRONG ( text )+ CSTRONG ; definition : ODFN ( text )+ CDFN ; code : OCODE ( text )+ CCODE ; sample_output : OSAMP ( text )+ CSAMP ; keyboard_text : OKBD ( text )+ CKBD ; variable : OVAR ( text )+ CVAR ; citation : OCITE ( text )+ CCITE ; /* form fields (combined with body_content elsewhere so no PCDATA on end) */ form_field : INPUT | select | textarea ; select : OSELECT (PCDATA)* (select_option)+ CSELECT ; select_option : SELOPT (PCDATA)* ; textarea : OTAREA (PCDATA)* CTAREA ; /* special text level elements*/ anchor : OANCHOR (text)* CANCHOR ; applet : OAPPLET (APARAM)? (PCDATA)* CAPPLET ; //not w3-no blocks allowed; www.microsoft.com uses font_dfn : OFONT (text)* CFONT ; map : OMAP (AREA)+ CMAP ; class HTMLLexer extends Lexer; options { k = 4; exportVocab=HTML; charVocabulary = '\3'..'\377'; caseSensitive=false; filter=UNDEFINED_TOKEN; } /* STRUCTURAL tags */ DOCTYPE options { ignore=WS_; } : "' ; OHTML : "" ; CHTML : "" ; OHEAD : "" ; CHEAD : "" ; OBODY : "' ; CBODY : "" ; /* HEAD ELEMENTS */ OTITLE : "" ; CTITLE : "" ; OSCRIPT : "" ; ISINDEX : "' ; META : "' ; LINK : "' ; /* headings */ OH1 : "' ; CH1 : "" ; OH2 : "' ; CH2 : "" ; OH3 : "' ; CH3 : "" ; OH4 : "' ; CH4 : "" ; OH5 : "' ; CH5 : "" ; OH6 : "' ; CH6 : "" ; OADDRESS : "
" ; CADDRESS : "
" ; OPARA : "' ; CPARA : "

" //it's optional ; /*UNORDERED LIST*/ OULIST : "' ; CULIST : "" ; /*ORDERED LIST*/ OOLIST : "' ; COLIST : "" ; /*LIST ITEM*/ OLITEM : "' ; CLITEM : "" ; /*DEFINITION LIST*/ ODLIST : "' ; CDLIST : "" ; ODTERM : "
" ; CDTERM : "
" ; ODDEF : "
" ; CDDEF : "
" ; ODIR: "" ; CDIR_OR_CDIV : "' ; ODIV: "' ; OMENU : "" ; CMENU : "" ; OPRE: ("
" | "") ('\n')? 
	;

CPRE:	 "</pre>" | "" 
	;

OCENTER
	:	"
" ; CCENTER : "
" ; OBQUOTE : "
" ; CBQUOTE : "
" ; //this is block element and thus can't be nested inside of //other block elements, ex: paragraphs. //Netscape appears to generate bad HTML vis-a-vis the standard. HR : "' ; OTABLE : "' ; CTABLE : "" ; OCAP: "' ; CCAP: "" ; O_TR : "' ; C_TR: "" ; O_TH_OR_TD : ("' ; C_TH_OR_TD : "" | "" ; /* PCDATA-LEVEL ELEMENTS */ /* font style elemens*/ OTTYPE : "" ; CTTYPE : "" ; OITALIC : "" ; CITALIC : "" ; OBOLD : "" ; CBOLD : "" ; OUNDER : "" ; CUNDER : "" ; /** Left-factor and to reduce lookahead */ OSTRIKE_OR_OSTRONG : "' ; CST_LEFT_FACTORED : "' ; OSTYLE : "