source: GTP/trunk/App/Demos/Geom/OgreStuff/include/OgreCompiler2Pass.h @ 1812

Revision 1812, 32.6 KB checked in by gumbau, 18 years ago (diff)
Line 
1/*
2-----------------------------------------------------------------------------
3This source file is part of OGRE
4    (Object-oriented Graphics Rendering Engine)
5For the latest info, see http://www.stevestreeting.com/ogre/
6
7Copyright (c) 2000-2005 The OGRE Team
8Also see acknowledgements in Readme.html
9
10This program is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free Software
12Foundation; either version 2 of the License, or (at your option) any later
13version.
14
15This program is distributed in the hope that it will be useful, but WITHOUT
16ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
18
19You should have received a copy of the GNU General Public License along with
20this program; if not, write to the Free Software Foundation, Inc., 59 Temple
21Place - Suite 330, Boston, MA 02111-1307, USA, or go to
22http://www.gnu.org/copyleft/gpl.html.
23-----------------------------------------------------------------------------
24*/
25
26
27#ifndef __Compiler2Pass_H__
28#define __Compiler2Pass_H__
29
30#include <vector>
31#include "OgrePrerequisites.h"
32
33namespace Ogre {
34
35    /** Compiler2Pass is a generic 2 pass compiler/assembler
36    @remarks
37            provides a tokenizer in pass 1 and relies on the subclass to provide the virtual method for pass 2
38
39            PASS 1 - tokenize source: this is a simple brute force lexical scanner/analyzer that also parses
40                            the formed token for proper semantics and context in one pass
41                            it uses top down (recursive descent) ruling based on Backus - Naur Form (BNF) notation for semantic
42                            checking.
43
44                            During Pass1, if a terminal token is identified as having an action then that action gets triggered
45                            when the next terminal token is uncountered that has an action.
46
47            PASS 2 - generate application specific instructions ie native instructions based on the tokens in the instruction container.
48
49    @par
50            this class must be subclassed with the subclass providing some implementation details for Pass 2.  The subclass
51            is responsible for setting up the token libraries along with defining the language syntax and handling
52        token actions during the second pass.
53
54    @par
55        The sub class normally supplies a simplified BNF text description in its constructor prior to doing any parsing/tokenizing of source.
56        The simplified BNF text description defines the language syntax and rule structure.
57        The meta-symbols used in the BNF text description are:
58    @par
59        ::=  meaning "is defined as". "::=" starts the definition of a rule.  The left side of ::= must contain an <identifier>
60    @par
61        <>   angle brackets are used to surround syntax rule names. A syntax rule name is also called a non-terminal in that
62             it does not generate a terminal token in the instruction container for pass 2 processing.
63    @par
64        |    meaning "or". if the item on the left of the | fails then the item on the right is tested.
65             Example: <true_false> ::= 'true' | 'false';
66             whitespace is used to imply AND operation between left and right items.
67             Example: <terrain_shadaws> ::= 'terrain_shadows' <true_false>
68             the 'terrain_shadows' terminal token must be found and <true_false> rule must pass in order for <terrain_shadows> rule
69             to pass.
70    @par
71        []   optional rule identifier is enclosed in meta symbols [ and ].
72             Note that only one identifier or terminal token can take [] modifier.
73    @par
74        {}   repetitive identifier (zero or more times) is enclosed in meta symbols { and }
75             Note that only one identifier or terminal token can take {} modifier.
76    @par
77        ''   terminal tokens are surrounded by single quotes.  A terminal token is always one or more characters.
78             For example: 'Colour' defines a character sequence that must be matched in whole.  Note that matching is case
79             sensitive.
80    @par
81        @    turn on single character scanning and don't skip white space.
82             Mainly used for label processing that allow white space.
83             Example: '@ ' prevents the white space between the quotes from being skipped
84    @par
85        -''  no terminal token is generated when a - precedes the first single quote but the text in between the quotes is still
86             tested against the characters in the source being parsed.
87    @par
88        (?! ) negative lookahead (not test) inspired by Perl 5. Scans ahead for a non-terminal or terminal expression
89             that should fail in order to make the rule production pass.
90             Does not generate a token or advance the cursur.  If the lookahead result fails ie token is found,
91             then the current rule fails and rollback occurs.  Mainly used to solve multiple contexts of a token.
92             An Example of where not test is used to solve multiple contexts:
93
94             <rule>       ::=  <identifier>  "::="  <expression>\n
95             <expression> ::=  <and_term> { <or_term> }\n
96             <or_term>    ::=  "|" <and_term>\n
97             <and_term>   ::=  <term> { <term> }\n
98             <term>       ::=  <identifier_right> | <terminal_symbol> | <repeat_expression> | <optional_expression>\n
99             <identifier_right> ::= <identifier> (?!"::=")
100
101             <identiefier> appears on both sides of the ::= so (?!"::=") test to make sure that ::= is not on the
102             right which would indicate that a new rule was being formed.
103
104             Works on both terminals and non-terminals.
105             Note: lookahead failure causes the whole rule to fail and rollback to occur
106
107    @par
108        <#name> # indicates that a numerical value is to be parsed to form a terminal token.  Name is optional and is just a descriptor
109             to help with understanding what the value will be used for.
110             Example: <Colour> ::= <#red> <#green> <#blue>
111
112    @par
113        ()   parentheses enclose a set of characters that can be used to generate a user identifier. for example:
114             (0123456789) matches a single character found in that set.
115             An example of a user identifier:
116
117    @par
118             <Label> ::= <Character> {<Character>}\n
119             <Character> ::= (abcdefghijklmnopqrstuvwxyz)
120
121             This will generate a rule that accepts one or more lowercase letters to make up the Label.  The User identifier
122             stops collecting the characters into a string when a match cannot be found in the rule.
123
124    @par
125        (! ) if the first character in the set is a ! then any input character not found in the set will be
126             accepted.
127             An example:
128
129             <Label> ::= <AnyCharacter_NoLineBreak> {<AnyCharacter_NoLineBreak>}\n
130             <AnyCharacter_NoLineBreak> ::= (!\n\r)
131
132             any character but \n or \r is accepted in the input.
133
134    @par
135        :   Insert the terminal token on the left before the next terminal token on the right if the next terminal token on right parses.
136             Usefull for when terminal tokens don't have a definate text state but only context state based on another terminal or character token.
137             An example:
138
139             <Last_Resort> ::= 'external_command' : <Special_Label>\n
140             <Special_Label> ::= (!\n\r\t)
141
142             In the example, <Last_Resort> gets processed when all other rules fail to parse.
143             if <Special_Label> parses (reads in any character but \n\r\t) then the terminal token 'external_command'
144             is inserted prior to the Special_Label for pass 2 processing.  'external_command' does not have have an explicit text
145             representation but based on the context of no other rules matching and <Special_Label> parsing, 'external_command' is
146             considered parsed.
147    */
148    class _OgreExport Compiler2Pass
149    {
150
151    protected:
152
153            // BNF operation types
154            enum OperationType {otUNKNOWN, otRULE, otAND, otOR, otOPTIONAL,
155                            otREPEAT, otDATA, otNOT_TEST, otINSERT_TOKEN, otEND};
156
157            /** structure used to build rule paths
158
159            */
160            struct TokenRule
161        {
162                    OperationType operation;
163                    size_t tokenID;
164
165            TokenRule(void) : operation(otUNKNOWN), tokenID(0) {}
166            TokenRule(const OperationType ot, const size_t token)
167                : operation(ot), tokenID(token) {}
168            };
169
170            typedef std::vector<TokenRule> TokenRuleContainer;
171            typedef TokenRuleContainer::iterator TokenRuleIterator;
172
173        static const size_t SystemTokenBase = 1000;
174        enum SystemRuleToken {
175            _no_token_ = SystemTokenBase,
176            _character_,
177            _value_,
178            _no_space_skip_
179        };
180
181            enum BNF_ID {BNF_UNKOWN = 0,
182            BNF_SYNTAX, BNF_RULE, BNF_IDENTIFIER, BNF_IDENTIFIER_RIGHT, BNF_IDENTIFIER_CHARACTERS, BNF_ID_BEGIN, BNF_ID_END,
183            BNF_CONSTANT_BEGIN, BNF_SET_RULE, BNF_EXPRESSION,
184            BNF_AND_TERM, BNF_OR_TERM, BNF_TERM, BNF_TERM_ID, BNF_CONSTANT, BNF_OR, BNF_TERMINAL_SYMBOL, BNF_TERMINAL_START,
185            BNF_REPEAT_EXPRESSION, BNF_REPEAT_BEGIN, BNF_REPEAT_END, BNF_SET, BNF_SET_BEGIN, BNF_SET_END,
186            BNF_NOT_TEST, BNF_NOT_TEST_BEGIN, BNF_CONDITIONAL_TOKEN_INSERT, BNF_OPTIONAL_EXPRESSION,
187            BNF_NOT_EXPRESSION, BNF_NOT_CHK,
188            BNF_OPTIONAL_BEGIN, BNF_OPTIONAL_END, BNF_NO_TOKEN_START, BNF_SINGLEQUOTE, BNF_SINGLE_QUOTE_EXC, BNF_SET_END_EXC,
189            BNF_ANY_CHARACTER, BNF_SPECIAL_CHARACTERS1,
190            BNF_SPECIAL_CHARACTERS2, BNF_WHITE_SPACE_CHK,
191
192            BNF_LETTER, BNF_LETTER_DIGIT, BNF_DIGIT, BNF_WHITE_SPACE,
193            BNF_ALPHA_SET, BNF_NUMBER_SET, BNF_SPECIAL_CHARACTER_SET1,
194                        BNF_SPECIAL_CHARACTER_SET2, BNF_SPECIAL_CHARACTER_SET3, BNF_NOT_CHARS
195        };
196
197
198            /** structure used to build lexeme Type library */
199            struct LexemeTokenDef
200        {
201                size_t ID;                                      /// Token ID which is the index into the Lexeme Token Definition Container
202            bool hasAction;            /// has an action associated with it. only applicable to terminal tokens
203            bool isNonTerminal;        /// if true then token is non-terminal
204                size_t ruleID;                          /// index into Rule database for non-terminal token rulepath and lexeme
205                bool isCaseSensitive;        /// if true use case sensitivity when comparing lexeme to source
206            String lexeme;             /// text representation of token or valid characters for label parsing
207
208            LexemeTokenDef(void) : ID(0), hasAction(false), isNonTerminal(false), ruleID(0), isCaseSensitive(false) {}
209            LexemeTokenDef( const size_t ID, const String& lexeme, const bool hasAction = false, const bool caseSensitive = false )
210                : ID(ID)
211                , hasAction(hasAction)
212                , isNonTerminal(false)
213                , ruleID(0)
214                , isCaseSensitive(caseSensitive)
215                , lexeme(lexeme)
216            {
217            }
218
219            };
220
221        typedef std::vector<LexemeTokenDef> LexemeTokenDefContainer;
222        typedef LexemeTokenDefContainer::iterator LexemeTokenDefIterator;
223
224        typedef std::map<std::string, size_t> LexemeTokenMap;
225        typedef LexemeTokenMap::iterator TokenKeyIterator;
226        /// map used to lookup client token based on previously defined lexeme
227
228
229            /** structure for Token instructions that are constructed during first pass*/
230            struct TokenInst
231        {
232            size_t NTTRuleID;                   /// Non-Terminal Token Rule ID that generated Token
233            size_t tokenID;                                     /// expected Token ID. Could be UNKNOWN if valid token was not found.
234            size_t line;                                /// line number in source code where Token was found
235            size_t pos;                         /// Character position in source where Token was found
236        bool found;                /// is true if expected token was found
237            };
238
239            typedef std::vector<TokenInst> TokenInstContainer;
240            typedef TokenInstContainer::iterator TokenInstIterator;
241
242        // token que, definitions, rules
243        struct TokenState
244        {
245            TokenInstContainer       tokenQue;
246            LexemeTokenDefContainer  lexemeTokenDefinitions;
247                TokenRuleContainer       rootRulePath;
248            LexemeTokenMap           lexemeTokenMap;
249        };
250
251        TokenState* mClientTokenState;
252
253            /// Active token que, definitions, rules currntly being used by parser
254        TokenState* mActiveTokenState;
255        /// the location within the token instruction container where pass 2 is
256        mutable size_t mPass2TokenQuePosition;
257        /** the que position of the previous token that had an action.
258            A token's action is fired on the next token having an action.
259        */
260        size_t mPreviousActionQuePosition;
261        /** the que position for the next token that has an action.
262        */
263        size_t mNextActionQuePosition;
264
265            /// pointer to the source to be compiled
266            const String* mSource;
267            /// name of the source to be compiled
268            String mSourceName;
269            size_t mEndOfSource;
270
271            size_t mCurrentLine; /// current line number in source being tokenized
272        size_t mCharPos;     /// position in current line in source being tokenized
273        size_t mErrorCharPos; /// character position in source where last error occured
274
275            /// storage container for constants defined in source
276        /// container uses Token index as a key associated with a float constant
277            std::map<size_t, float> mConstants;
278            /// storage container for string labels defined in source
279        /// container uses Token index as a key associated with a label
280        std::map<size_t, String> mLabels;
281        /// flag indicates when a label is being parsed.
282        /// It gets set false when a terminal token not of _character_ is encountered
283        bool mLabelIsActive;
284        /// the key of the active label being built during pass 1.
285        /// a new key is calculated when mLabelIsActive switches from false to true
286        size_t mActiveLabelKey;
287        /// flag being true indicates that spaces are not to be skipped
288        /// automatically gets set to false when mLabelIsActive goes to false
289        bool mNoSpaceSkip;
290        /// if flag is true then next terminal token is not added to token que if found
291        /// but does effect rule path flow
292        bool mNoTerminalToken;
293        /// TokenID to insert if next rule finds a terminal token
294        /// if zero then no token inserted
295        size_t mInsertTokenID;
296
297            /// Active Contexts pattern used in pass 1 to determine which tokens are valid for a certain context
298            uint mActiveContexts;
299
300            /** perform pass 1 of compile process
301                    scans source for lexemes that can be tokenized and then
302                    performs general semantic and context verification on each lexeme before it is tokenized.
303                    A tokenized instruction list is built to be used by Pass 2.
304            A rule path can trigger Pass 2 execution if enough tokens have been generated in Pass 1.
305            Pass 1 will then pass control to pass 2 temporarily until the current tokens have been consumed.
306
307            */
308            bool doPass1();
309
310            /** performs Pass 2 of compile process which is execution of the tokens
311            @remark
312                    Pass 2 takes the token instructions generated in Pass 1 and
313                    builds the application specific instructions along with verifying
314                    symantic and context rules that could not be checked in Pass 1.
315        @par
316            Pass 2 execution consumes tokens and moves the Pass 2 token instruction position towards the end
317            of the token container.  Token execution can insert new tokens into the token container.
318            */
319            bool doPass2();
320
321        /** execute the action associated with the token pointed to by the Pass 2 token instruction position.
322        @remarks
323            Its upto the child class to implement how it will associate a token key with and action.
324            Actions should be placed at positions withing the BNF grammer (instruction que) that indicate
325            enough tokens exist for pass 2 processing to take place.
326        */
327        virtual void executeTokenAction(const size_t tokenID) = 0;
328        /** setup client token definitions.  Gets called when BNF grammer is being setup.
329        */
330        virtual void setupTokenDefinitions(void) = 0;
331        /** Gets the next token from the instruction que.
332        @remarks
333            If an unkown token is found then an exception is raised but
334            the instruction pointer is still moved passed the unknown token.  The subclass should catch the exception,
335            provide an error message, and attempt recovery.
336
337        @param expectedTokenID if greater than 0 then an exception is raised if tokenID does not match.
338        */
339        const TokenInst& getNextToken(const size_t expectedTokenID = 0) const
340        {
341            skipToken();
342            return getCurrentToken(expectedTokenID);
343        }
344        /** Gets the current token from the instruction que.
345        @remarks
346            If an unkown token is found then an exception is raised.
347            The subclass should catch the exception, provide an error message, and attempt recovery.
348
349        @param expectedTokenID if greater than 0 then an exception is raised if tokenID does not match.
350
351        */
352        const TokenInst& getCurrentToken(const size_t expectedTokenID = 0) const;
353        /** If a next token instruction exist then test if its token ID matches.
354        @remarks
355            This method is usefull for peeking ahead during pass 2 to see if a certain
356            token exists.
357        @param expectedTokenID is the ID of the token to match.
358        */
359        bool testNextTokenID(const size_t expectedTokenID) const;
360
361        /** If a current token instruction exist then test if its token ID matches.
362        @param expectedTokenID is the ID of the token to match.
363        */
364        bool testCurrentTokenID(const size_t expectedTokenID) const
365        {
366            return mActiveTokenState->tokenQue[mPass2TokenQuePosition].tokenID == expectedTokenID;
367        }
368        /** skip to the next token in the pass2 queue.
369        */
370        void skipToken(void) const;
371        /** go back to the previous token in the pass2 queue.
372        */
373        void replaceToken(void);
374        /** Gets the next token's associated floating point value in the instruction que that was parsed from the
375            text source.  If an unkown token is found or no associated value was found then an exception is raised but
376            the instruction pointer is still moved passed the unknown token.  The subclass should catch the exception,
377            provide an error message, and attempt recovery.
378        */
379        float getNextTokenValue(void) const
380        {
381            skipToken();
382            return getCurrentTokenValue();
383        }
384        /** Gets the current token's associated floating point value in the instruction que that was parsed from the
385            text source.
386        @remarks
387            If an unkown token is found or no associated value was found then an exception is raised.
388            The subclass should catch the exception, provide an error message, and attempt recovery.
389        */
390        float getCurrentTokenValue(void) const;
391        /** Gets the next token's associated text label in the instruction que that was parsed from the
392            text source.
393        @remarks
394            If an unkown token is found or no associated label was found then an exception is raised but
395            the instruction pointer is still moved passed the unknown token.  The subclass should catch the exception,
396            provide an error message, and attempt recovery.
397        */
398        const String& getNextTokenLabel(void) const
399        {
400            skipToken();
401            return getCurrentTokenLabel();
402        }
403        /** Gets the next token's associated text label in the instruction que that was parsed from the
404            text source.  If an unkown token is found or no associated label was found then an exception is raised.
405            The subclass should catch the exception, provide an error message, and attempt recovery.
406        */
407        const String& getCurrentTokenLabel(void) const;
408        /** Get the next token's ID value.
409        */
410        size_t getNextTokenID(void) const { return getNextToken().tokenID; }
411        /** Get the current token's ID value.
412        */
413        size_t getCurrentTokenID(void) const { return getCurrentToken().tokenID; }
414        /** Get the next token's lexeme string.  Handy when you don't want the ID but want the string
415            representation.
416        */
417        const String& getNextTokenLexeme(void) const
418        {
419            skipToken();
420            return getCurrentTokenLexeme();
421        }
422        /** Get the current token's lexeme string.  Handy when you don't want the ID but want the string
423            representation.
424        */
425        const String& getCurrentTokenLexeme(void) const;
426        /** Gets the number of tokens waiting in the instruction que that need to be processed by an token action in pass 2.
427        */
428        size_t getPass2TokenQueCount(void) const;
429        /** Get the number of tokens not processed by action token.
430            Client Actions should use this method to retreive the number of parameters(tokens)
431            remaining to be processed in the action.
432        */
433        size_t getRemainingTokensForAction(void) const;
434        /** Manualy set the Pass2 Token que position.
435        @remarks
436            This method will also set the position of the next token in the pass2 que that
437            has an action ensuring that getRemainingTokensForAction works currectly.
438            This method is useful for when the token que must be reprocessed after
439            pass1 and the position in the que must be changed so that an action will be triggered.
440        @param pos is the new position within the Pass2 que
441        @param activateAction if set true and the token at the new position has an action then the
442            action is activated.
443        */
444        void setPass2TokenQuePosition(size_t pos, const bool activateAction = false);
445        /** Get the current position in the Pass2 Token Que.
446        */
447        size_t getPass2TokenQuePosition(void) const { return mPass2TokenQuePosition; }
448        /** Set the position of the next token action in the Pass2 Token Que.
449        @remarks
450            If the position is not within the que or there is no action associated with
451            the token at the position in the que then NextActionQuePosition is not set.
452        @param pos is the position in the Pass2 Token Que where the next action is.
453        @param search if true then the que is searched from pos until an action is found.
454            If the end of the que is reached and no action has been found then NextActionQuePosition
455            is set to the end of the que and false is returned.
456        */
457        bool setNextActionQuePosition(size_t pos, const bool search = false);
458        /** Add a lexeme token association.
459        @remarks
460            The backend compiler uses the associations between lexeme and token when
461            building the rule base from the BNF script so all associations must  be done
462            prior to compiling a source.
463        @param lexeme is the name of the token and use when parsing the source to determin a match for a token.
464        @param token is the ID associated with the lexeme
465        @param hasAction must be set true if the client wants an action triggered when this token is generated
466        @param caseSensitive should be set true if lexeme match should use case sensitivity
467        */
468        void addLexemeToken(const String& lexeme, const size_t token, const bool hasAction = false, const bool caseSensitive = false);
469
470        /** Sets up the parser rules for the client based on the BNF Grammer text passed in.
471        @remarks
472            Raises an exception if the grammer did not compile successfully.  This method gets called
473            when a call to compile occurs and no compiled BNF grammer exists, otherwise nothing will happen since the compiler has no rules to work
474            with.  The grammer only needs to be set once during the lifetime of the compiler unless the
475            grammer changes.
476        @note
477            BNF Grammer rules are cached once the BNF grammer source is compiled.
478            The client should never have to call this method directly.
479        */
480        void setClientBNFGrammer(void);
481
482
483
484        /// find the eol charater
485            void findEOL();
486
487            /** check to see if the text at the present position in the source is a numerical constant
488            @param fvalue is a reference that will receive the float value that is in the source
489            @param charsize reference to receive number of characters that make of the value in the source
490            @return
491                    true if characters form a valid float representation
492                    false if a number value could not be extracted
493            */
494            bool isFloatValue(float& fvalue, size_t& charsize) const;
495
496        /** Check if source at current position is supposed to be a user defined character label.
497        A new label is processed when previous operation was not _character_ otherwise the processed
498        character (if match was found) is added to the current label.  This allows _character_ operations
499        to be chained together to form a crude regular expression to build a label.
500            @param rulepathIDX index into rule path database of token to validate.
501            @return
502                    true if token was found for character label.
503        */
504        bool isCharacterLabel(const size_t rulepathIDX);
505            /** check to see if the text is in the lexeme text library
506            @param lexeme points to begining of text where a lexem token might exist
507            @param caseSensitive set to true if match should be case sensitive
508            @return
509                    true if a matching token could be found in the token type library
510                    false if could not be tokenized
511            */
512            bool isLexemeMatch(const String& lexeme, const bool caseSensitive) const;
513            /// position to the next possible valid sysmbol
514            bool positionToNextLexeme();
515            /** process input source text using rulepath to determine allowed tokens
516            @remarks
517                    the method is reentrant and recursive
518                    if a non-terminal token is encountered in the current rule path then the method is
519                    called using the new rule path referenced by the non-terminal token
520                    Tokens can have the following operation states which effects the flow path of the rule
521                            RULE: defines a rule path for the non-terminal token
522                            AND: the token is required for the rule to pass
523                            OR: if the previous tokens failed then try these ones
524                            OPTIONAL: the token is optional and does not cause the rule to fail if the token is not found
525                            REPEAT: the token is required but there can be more than one in a sequence
526                DATA: Used by a previous token ie for character sets
527                NOTTEST: performs negative lookahead ie make sure the next token is not of a certain type
528                            END: end of the rule path - the method returns the succuss of the rule
529
530            @param rulepathIDX index into an array of Token Rules that define a rule path to be processed
531            @return
532                    true if rule passed - all required tokens found
533                    false if one or more tokens required to complete the rule were not found
534            */
535            bool processRulePath( size_t rulepathIDX);
536
537
538            /** setup ActiveContexts - should be called by subclass to setup initial language contexts
539        */
540            void setActiveContexts(const uint contexts){ mActiveContexts = contexts; }
541
542            /// comment specifiers are hard coded
543            void skipComments();
544
545            /// find end of line marker and move past it
546            void skipEOL();
547
548            /// skip all the white space which includes spaces and tabs
549            void skipWhiteSpace();
550
551
552            /** check if current position in source has the lexeme text equivalent to the TokenID
553            @param rulepathIDX index into rule path database of token to validate
554            @param activeRuleID index of non-terminal rule that generated the token
555            @return
556                    true if token was found
557                    false if token lexeme text does not match the source text
558                    if token is non-terminal then processRulePath is called
559            */
560            bool ValidateToken(const size_t rulepathIDX, const size_t activeRuleID);
561
562            /** scan through all the rules and initialize token definition with index to rules for non-terminal tokens.
563            Gets called when internal grammer is being verified or after client grammer has been parsed.
564        @param grammerName is the name of the grammer the token rules are for
565            */
566            void verifyTokenRuleLinks(const String& grammerName);
567            /** Checks the last token instruction and if it has an action then it triggers the action of the previously
568            found token having an action.
569            */
570            void checkTokenActionTrigger(void);
571            /** Get the text representation of the rule path.  This is a good way to way to visually verify
572            that the BNF grammer did compile correctly.
573            @param ruleID is the index into the rule path.
574            @param level is the number of levels a non-terminal will expand to. Defaults to 0 if not set which
575            will cause non-terminals to not expand.
576            */
577            String getBNFGrammerTextFromRulePath(size_t ruleID, const size_t level = 0);
578
579
580    private:
581        // used for interpreting BNF script
582        // keep it as static so that only one structure is created
583        // no matter how many times this class is instantiated.
584        static TokenState mBNFTokenState;
585        // maintain a map of BNF grammer
586        typedef std::map<String, TokenState> TokenStateContainer;
587        static TokenStateContainer mClientTokenStates;
588        /// if a previous token action was setup then activate it now
589        void activatePreviousTokenAction(void);
590        /// initialize token definitions and rule paths
591        void initBNFCompiler(void);
592        /// Convert BNF grammer token que created in pass 1 into a BNF rule path
593        void buildClientBNFRulePaths(void);
594        /// modify the last rule in the container. An end operation is added to the rule path.
595        void modifyLastRule(const OperationType pendingRuleOp, const size_t tokenID);
596        /** get the token ID for a lexeme in the client state. If the lexeme is not found then
597            it is added to the map and definition container and a new tokenID created.
598        @return the ID of the token.
599        */
600        size_t getClientLexemeTokenID(const String& lexeme, const bool isCaseSensitive = false);
601        /// Extract a Non Terminal identifier from the token que
602        void extractNonTerminal(const OperationType pendingRuleOp);
603        /// Extract a Terminal lexeme from the token que and add to current rule expression
604        void extractTerminal(const OperationType pendingRuleOp, const bool notoken = false);
605        /// Extract a set from the token que and add to current rule expression
606        void extractSet(const OperationType pendingRuleOp);
607        /// Extract a numeric constant from the token que and add it to the current rule expression
608        void extractNumericConstant(const OperationType pendingRuleOp);
609        /// changes previous terminal token rule into a conditional terminal token insert rule
610        void setConditionalTokenInsert(void);
611        /// get the lexem text of a rule.
612        String getLexemeText(size_t& ruleID, const size_t level = 0);
613
614    public:
615
616            /// constructor
617            Compiler2Pass();
618        virtual ~Compiler2Pass() {}
619
620            /** compile the source - performs 2 passes.
621                    First pass is to tokinize, check semantics and context.
622                    The second pass is performed by using tokens to look up function implementors and executing
623            them which convert tokens to application specific instructions.
624            @remark
625                    Pass 2 only gets executed if Pass 1 has built enough tokens to complete a rule path and found no errors
626            @param source a pointer to the source text to be compiled
627            @return
628                    true if Pass 1 and Pass 2 are successfull
629                    false if any errors occur in Pass 1 or Pass 2
630            */
631            bool compile(const String& source, const String& sourceName);
632        /** gets BNF Grammer.  Gets called when BNF grammer has to be compiled for the first time.
633        */
634        virtual const String& getClientBNFGrammer(void) = 0;
635
636        /** get the name of the BNF grammer.
637        */
638        virtual const String& getClientGrammerName(void) const = 0;
639
640    };
641
642}
643
644#endif
645
Note: See TracBrowser for help on using the repository browser.