Source code for robot.parsing.lexer.blocklexers

#  Copyright 2008-2015 Nokia Networks
#  Copyright 2016-     Robot Framework Foundation
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.

from .tokens import Token
from .statementlexers import (Lexer,
                              SettingSectionHeaderLexer, SettingLexer,
                              VariableSectionHeaderLexer, VariableLexer,
                              TestCaseSectionHeaderLexer,
                              KeywordSectionHeaderLexer,
                              CommentSectionHeaderLexer, CommentLexer,
                              ErrorSectionHeaderLexer,
                              TestOrKeywordSettingLexer,
                              KeywordCallLexer,
                              ForLoopHeaderLexer,
                              EndLexer)


[docs]class BlockLexer(Lexer): def __init__(self, ctx): """:type ctx: :class:`robot.parsing.lexer.context.FileContext`""" Lexer.__init__(self, ctx) self.lexers = []
[docs] def accepts_more(self, statement): return True
[docs] def input(self, statement): if self.lexers and self.lexers[-1].accepts_more(statement): lexer = self.lexers[-1] else: lexer = self.lexer_for(statement) self.lexers.append(lexer) lexer.input(statement) return lexer
[docs] def lexer_for(self, statement): for cls in self.lexer_classes(): lexer = cls(self.ctx) if lexer.handles(statement): return lexer raise TypeError("%s did not find lexer for statement %s." % (type(self).__name__, statement))
[docs] def lexer_classes(self): return ()
[docs] def lex(self): for lexer in self.lexers: lexer.lex()
def _lex_with_priority(self, priority): for lexer in self.lexers: if isinstance(lexer, priority): lexer.lex() for lexer in self.lexers: if not isinstance(lexer, priority): lexer.lex()
[docs]class FileLexer(BlockLexer):
[docs] def lex(self): self._lex_with_priority(priority=SettingSectionLexer)
[docs] def lexer_classes(self): return (SettingSectionLexer, VariableSectionLexer, TestCaseSectionLexer, KeywordSectionLexer, CommentSectionLexer, ErrorSectionLexer, ImplicitCommentSectionLexer)
[docs]class SectionLexer(BlockLexer):
[docs] def accepts_more(self, statement): return not statement[0].value.startswith('*')
[docs]class SettingSectionLexer(SectionLexer):
[docs] def handles(self, statement): return self.ctx.setting_section(statement)
[docs] def lexer_classes(self): return (SettingSectionHeaderLexer, SettingLexer)
[docs]class VariableSectionLexer(SectionLexer):
[docs] def handles(self, statement): return self.ctx.variable_section(statement)
[docs] def lexer_classes(self): return (VariableSectionHeaderLexer, VariableLexer)
[docs]class TestCaseSectionLexer(SectionLexer):
[docs] def handles(self, statement): return self.ctx.test_case_section(statement)
[docs] def lexer_classes(self): return (TestCaseSectionHeaderLexer, TestCaseLexer)
[docs]class KeywordSectionLexer(SettingSectionLexer):
[docs] def handles(self, statement): return self.ctx.keyword_section(statement)
[docs] def lexer_classes(self): return (KeywordSectionHeaderLexer, KeywordLexer)
[docs]class CommentSectionLexer(SectionLexer):
[docs] def handles(self, statement): return self.ctx.comment_section(statement)
[docs] def lexer_classes(self): return (CommentSectionHeaderLexer, CommentLexer)
[docs]class ImplicitCommentSectionLexer(SectionLexer):
[docs] def handles(self, statement): return True
[docs] def lexer_classes(self): return (CommentLexer,)
[docs]class ErrorSectionLexer(SectionLexer):
[docs] def handles(self, statement): return statement and statement[0].value.startswith('*')
[docs] def lexer_classes(self): return (ErrorSectionHeaderLexer, CommentLexer)
[docs]class TestOrKeywordLexer(BlockLexer): name_type = NotImplemented _name_seen = False
[docs] def accepts_more(self, statement): return not statement[0].value
[docs] def input(self, statement): self._handle_name_or_indentation(statement) if statement: BlockLexer.input(self, statement)
def _handle_name_or_indentation(self, statement): if not self._name_seen: statement.pop(0).type = self.name_type self._name_seen = True else: while statement and not statement[0].value: statement.pop(0).type = None # These tokens will be ignored
[docs] def lexer_classes(self): return (TestOrKeywordSettingLexer, ForLoopLexer, KeywordCallLexer)
[docs]class TestCaseLexer(TestOrKeywordLexer): name_type = Token.TESTCASE_NAME def __init__(self, ctx): TestOrKeywordLexer.__init__(self, ctx.test_case_context())
[docs] def lex(self,): self._lex_with_priority(priority=TestOrKeywordSettingLexer)
[docs]class KeywordLexer(TestOrKeywordLexer): name_type = Token.KEYWORD_NAME def __init__(self, ctx): TestOrKeywordLexer.__init__(self, ctx.keyword_context())
[docs]class ForLoopLexer(BlockLexer): def __init__(self, ctx): BlockLexer.__init__(self, ctx) self._old_style_for = False self._end_seen = False
[docs] def handles(self, statement): return ForLoopHeaderLexer(self.ctx).handles(statement)
[docs] def accepts_more(self, statement): if statement[0].value == '\\': statement[0].type = Token.OLD_FOR_INDENT self._old_style_for = True return True elif self._old_style_for: return EndLexer(self.ctx).handles(statement) return not self._end_seen
[docs] def input(self, statement): lexer = BlockLexer.input(self, statement) if isinstance(lexer, EndLexer): self._end_seen = True elif statement[0].type == Token.OLD_FOR_INDENT: statement.pop(0)
[docs] def lexer_classes(self): return (ForLoopHeaderLexer, EndLexer, KeywordCallLexer)