Source code for robot.parsing.lexer.statementlexers

#  Copyright 2008-2015 Nokia Networks
#  Copyright 2016-     Robot Framework Foundation
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.

from robot.utils import normalize_whitespace
from robot.variables import is_assign

from .tokens import Token


[docs]class Lexer: """Base class for lexers.""" def __init__(self, ctx): self.ctx = ctx
[docs] def handles(self, statement): return True
[docs] def accepts_more(self, statement): raise NotImplementedError
[docs] def input(self, statement): raise NotImplementedError
[docs] def lex(self): raise NotImplementedError
[docs]class StatementLexer(Lexer): token_type = None def __init__(self, ctx): super().__init__(ctx) self.statement = None
[docs] def accepts_more(self, statement): return False
[docs] def input(self, statement): self.statement = statement
[docs] def lex(self): raise NotImplementedError
[docs]class SingleType(StatementLexer):
[docs] def lex(self): for token in self.statement: token.type = self.token_type
[docs]class TypeAndArguments(StatementLexer):
[docs] def lex(self): self.statement[0].type = self.token_type for token in self.statement[1:]: token.type = Token.ARGUMENT
[docs]class SectionHeaderLexer(SingleType):
[docs] def handles(self, statement): return statement[0].value.startswith('*')
[docs]class SettingSectionHeaderLexer(SectionHeaderLexer): token_type = Token.SETTING_HEADER
[docs]class VariableSectionHeaderLexer(SectionHeaderLexer): token_type = Token.VARIABLE_HEADER
[docs]class TestCaseSectionHeaderLexer(SectionHeaderLexer): token_type = Token.TESTCASE_HEADER
[docs]class KeywordSectionHeaderLexer(SectionHeaderLexer): token_type = Token.KEYWORD_HEADER
[docs]class CommentSectionHeaderLexer(SectionHeaderLexer): token_type = Token.COMMENT_HEADER
[docs]class ErrorSectionHeaderLexer(SectionHeaderLexer):
[docs] def lex(self): self.ctx.lex_invalid_section(self.statement)
[docs]class CommentLexer(SingleType): token_type = Token.COMMENT
[docs]class SettingLexer(StatementLexer):
[docs] def lex(self): self.ctx.lex_setting(self.statement)
[docs]class TestOrKeywordSettingLexer(SettingLexer):
[docs] def handles(self, statement): marker = statement[0].value return marker and marker[0] == '[' and marker[-1] == ']'
[docs]class VariableLexer(TypeAndArguments): token_type = Token.VARIABLE
[docs]class KeywordCallLexer(StatementLexer):
[docs] def lex(self): if self.ctx.template_set: self._lex_as_template() else: self._lex_as_keyword_call()
def _lex_as_template(self): for token in self.statement: token.type = Token.ARGUMENT def _lex_as_keyword_call(self): keyword_seen = False for token in self.statement: if keyword_seen: token.type = Token.ARGUMENT elif is_assign(token.value, allow_assign_mark=True): token.type = Token.ASSIGN else: token.type = Token.KEYWORD keyword_seen = True
[docs]class ForHeaderLexer(StatementLexer): separators = ('IN', 'IN RANGE', 'IN ENUMERATE', 'IN ZIP')
[docs] def handles(self, statement): return statement[0].value == 'FOR'
[docs] def lex(self): self.statement[0].type = Token.FOR separator_seen = False for token in self.statement[1:]: if separator_seen: token.type = Token.ARGUMENT elif normalize_whitespace(token.value) in self.separators: token.type = Token.FOR_SEPARATOR separator_seen = True else: token.type = Token.VARIABLE
[docs]class IfHeaderLexer(TypeAndArguments): token_type = Token.IF
[docs] def handles(self, statement): return statement[0].value == 'IF' and len(statement) <= 2
[docs]class InlineIfHeaderLexer(StatementLexer): token_type = Token.INLINE_IF
[docs] def handles(self, statement): for token in statement: if token.value == 'IF': return True if not is_assign(token.value, allow_assign_mark=True): return False return False
[docs] def lex(self): if_seen = False for token in self.statement: if if_seen: token.type = Token.ARGUMENT elif token.value == 'IF': token.type = Token.INLINE_IF if_seen = True else: token.type = Token.ASSIGN
[docs]class ElseIfHeaderLexer(TypeAndArguments): token_type = Token.ELSE_IF
[docs] def handles(self, statement): return normalize_whitespace(statement[0].value) == 'ELSE IF'
[docs]class ElseHeaderLexer(TypeAndArguments): token_type = Token.ELSE
[docs] def handles(self, statement): return statement[0].value == 'ELSE'
[docs]class TryHeaderLexer(TypeAndArguments): token_type = Token.TRY
[docs] def handles(self, statement): return statement[0].value == 'TRY'
[docs]class ExceptHeaderLexer(StatementLexer): token_type = Token.EXCEPT
[docs] def handles(self, statement): return statement[0].value == 'EXCEPT'
[docs] def lex(self): self.statement[0].type = Token.EXCEPT last_pattern = None as_seen = False for token in self.statement[1:]: if token.value == 'AS': token.type = Token.AS as_seen = True elif as_seen: token.type = Token.VARIABLE else: token.type = Token.ARGUMENT last_pattern = token if last_pattern and last_pattern.value.startswith('type='): last_pattern.type = Token.OPTION
[docs]class FinallyHeaderLexer(TypeAndArguments): token_type = Token.FINALLY
[docs] def handles(self, statement): return statement[0].value == 'FINALLY'
[docs]class WhileHeaderLexer(StatementLexer): token_type = Token.WHILE
[docs] def handles(self, statement): return statement[0].value == 'WHILE'
[docs] def lex(self): self.statement[0].type = Token.WHILE for token in self.statement[1:]: token.type = Token.ARGUMENT if self.statement[-1].value.startswith('limit='): self.statement[-1].type = Token.OPTION
[docs]class EndLexer(TypeAndArguments): token_type = Token.END
[docs] def handles(self, statement): return statement[0].value == 'END'
[docs]class ReturnLexer(TypeAndArguments): token_type = Token.RETURN_STATEMENT
[docs] def handles(self, statement): return statement[0].value == 'RETURN'
[docs]class ContinueLexer(TypeAndArguments): token_type = Token.CONTINUE
[docs] def handles(self, statement): return statement[0].value == 'CONTINUE'
[docs]class BreakLexer(TypeAndArguments): token_type = Token.BREAK
[docs] def handles(self, statement): return statement[0].value == 'BREAK'