# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import normalize_whitespace
from .tokens import Token
from .statementlexers import (Lexer,
SettingSectionHeaderLexer, SettingLexer,
VariableSectionHeaderLexer, VariableLexer,
TestCaseSectionHeaderLexer,
KeywordSectionHeaderLexer,
CommentSectionHeaderLexer, CommentLexer,
ErrorSectionHeaderLexer,
TestOrKeywordSettingLexer,
KeywordCallLexer,
IfHeaderLexer, ElseIfHeaderLexer, ElseHeaderLexer,
InlineIfHeaderLexer, EndLexer,
TryHeaderLexer, ExceptHeaderLexer, FinallyHeaderLexer,
ForHeaderLexer, WhileHeaderLexer,
ContinueLexer, BreakLexer, ReturnLexer)
[docs]class BlockLexer(Lexer):
def __init__(self, ctx):
""":type ctx: :class:`robot.parsing.lexer.context.FileContext`"""
super().__init__(ctx)
self.lexers = []
[docs] def accepts_more(self, statement):
return True
[docs] def lexer_for(self, statement):
for cls in self.lexer_classes():
lexer = cls(self.ctx)
if lexer.handles(statement):
return lexer
raise TypeError("%s did not find lexer for statement %s."
% (type(self).__name__, statement))
[docs] def lexer_classes(self):
return ()
[docs] def lex(self):
for lexer in self.lexers:
lexer.lex()
def _lex_with_priority(self, priority):
for lexer in self.lexers:
if isinstance(lexer, priority):
lexer.lex()
for lexer in self.lexers:
if not isinstance(lexer, priority):
lexer.lex()
[docs]class FileLexer(BlockLexer):
[docs] def lex(self):
self._lex_with_priority(priority=SettingSectionLexer)
[docs] def lexer_classes(self):
return (SettingSectionLexer, VariableSectionLexer,
TestCaseSectionLexer, KeywordSectionLexer,
CommentSectionLexer, ErrorSectionLexer,
ImplicitCommentSectionLexer)
[docs]class SectionLexer(BlockLexer):
[docs] def accepts_more(self, statement):
return not statement[0].value.startswith('*')
[docs]class SettingSectionLexer(SectionLexer):
[docs] def handles(self, statement):
return self.ctx.setting_section(statement)
[docs] def lexer_classes(self):
return (SettingSectionHeaderLexer, SettingLexer)
[docs]class VariableSectionLexer(SectionLexer):
[docs] def handles(self, statement):
return self.ctx.variable_section(statement)
[docs] def lexer_classes(self):
return (VariableSectionHeaderLexer, VariableLexer)
[docs]class TestCaseSectionLexer(SectionLexer):
[docs] def handles(self, statement):
return self.ctx.test_case_section(statement)
[docs] def lexer_classes(self):
return (TestCaseSectionHeaderLexer, TestCaseLexer)
[docs]class KeywordSectionLexer(SettingSectionLexer):
[docs] def handles(self, statement):
return self.ctx.keyword_section(statement)
[docs] def lexer_classes(self):
return (KeywordSectionHeaderLexer, KeywordLexer)
[docs]class ErrorSectionLexer(SectionLexer):
[docs] def handles(self, statement):
return statement and statement[0].value.startswith('*')
[docs] def lexer_classes(self):
return (ErrorSectionHeaderLexer, CommentLexer)
[docs]class TestOrKeywordLexer(BlockLexer):
name_type = NotImplemented
_name_seen = False
[docs] def accepts_more(self, statement):
return not statement[0].value
def _handle_name_or_indentation(self, statement):
if not self._name_seen:
token = statement.pop(0)
token.type = self.name_type
if statement:
token._add_eos_after = True
self._name_seen = True
else:
while statement and not statement[0].value:
statement.pop(0).type = None # These tokens will be ignored
[docs] def lexer_classes(self):
return (TestOrKeywordSettingLexer, BreakLexer, ContinueLexer,
ForLexer, InlineIfLexer, IfLexer, ReturnLexer, TryLexer,
WhileLexer, KeywordCallLexer)
[docs]class TestCaseLexer(TestOrKeywordLexer):
name_type = Token.TESTCASE_NAME
def __init__(self, ctx):
""":type ctx: :class:`robot.parsing.lexer.context.TestCaseFileContext`"""
super().__init__(ctx.test_case_context())
[docs] def lex(self,):
self._lex_with_priority(priority=TestOrKeywordSettingLexer)
[docs]class KeywordLexer(TestOrKeywordLexer):
name_type = Token.KEYWORD_NAME
def __init__(self, ctx):
super().__init__(ctx.keyword_context())
[docs]class NestedBlockLexer(BlockLexer):
def __init__(self, ctx):
super().__init__(ctx)
self._block_level = 0
[docs] def accepts_more(self, statement):
return self._block_level > 0
[docs]class ForLexer(NestedBlockLexer):
[docs] def handles(self, statement):
return ForHeaderLexer(self.ctx).handles(statement)
[docs] def lexer_classes(self):
return (ForHeaderLexer, InlineIfLexer, IfLexer, TryLexer, WhileLexer, EndLexer,
ReturnLexer, ContinueLexer, BreakLexer, KeywordCallLexer)
[docs]class WhileLexer(NestedBlockLexer):
[docs] def handles(self, statement):
return WhileHeaderLexer(self.ctx).handles(statement)
[docs] def lexer_classes(self):
return (WhileHeaderLexer, ForLexer, InlineIfLexer, IfLexer, TryLexer, EndLexer,
ReturnLexer, ContinueLexer, BreakLexer, KeywordCallLexer)
[docs]class IfLexer(NestedBlockLexer):
[docs] def handles(self, statement):
return IfHeaderLexer(self.ctx).handles(statement)
[docs] def lexer_classes(self):
return (InlineIfLexer, IfHeaderLexer, ElseIfHeaderLexer, ElseHeaderLexer,
ForLexer, TryLexer, WhileLexer, EndLexer, ReturnLexer, ContinueLexer,
BreakLexer, KeywordCallLexer)
[docs]class InlineIfLexer(BlockLexer):
[docs] def handles(self, statement):
if len(statement) <= 2:
return False
return InlineIfHeaderLexer(self.ctx).handles(statement)
[docs] def accepts_more(self, statement):
return False
[docs] def lexer_classes(self):
return (InlineIfHeaderLexer, ElseIfHeaderLexer, ElseHeaderLexer,
ReturnLexer, ContinueLexer, BreakLexer, KeywordCallLexer)
def _split(self, statement):
current = []
expect_condition = False
for token in statement:
if expect_condition:
if token is not statement[-1]:
token._add_eos_after = True
current.append(token)
yield current
current = []
expect_condition = False
elif token.value == 'IF':
current.append(token)
expect_condition = True
elif normalize_whitespace(token.value) == 'ELSE IF':
token._add_eos_before = True
yield current
current = [token]
expect_condition = True
elif token.value == 'ELSE':
token._add_eos_before = True
if token is not statement[-1]:
token._add_eos_after = True
yield current
current = []
yield [token]
else:
current.append(token)
yield current
[docs]class TryLexer(NestedBlockLexer):
[docs] def handles(self, statement):
return TryHeaderLexer(self.ctx).handles(statement)
[docs] def lexer_classes(self):
return (TryHeaderLexer, ExceptHeaderLexer, ElseHeaderLexer, FinallyHeaderLexer,
ForLexer, InlineIfLexer, IfLexer, WhileLexer, EndLexer, ReturnLexer,
BreakLexer, ContinueLexer, KeywordCallLexer)