robot.parsing.lexer package

Submodules

robot.parsing.lexer.blocklexers module

class robot.parsing.lexer.blocklexers.BlockLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.Lexer

accepts_more(statement)[source]
input(statement)[source]
lexer_for(statement)[source]
lexer_classes()[source]
lex()[source]
handles(statement)
class robot.parsing.lexer.blocklexers.FileLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.BlockLexer

lex()[source]
lexer_classes()[source]
accepts_more(statement)
handles(statement)
input(statement)
lexer_for(statement)
class robot.parsing.lexer.blocklexers.SectionLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.BlockLexer

accepts_more(statement)[source]
handles(statement)
input(statement)
lex()
lexer_classes()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.SettingSectionLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.VariableSectionLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.TestCaseSectionLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.KeywordSectionLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.SettingSectionLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.CommentSectionLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.ImplicitCommentSectionLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.ErrorSectionLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.TestOrKeywordLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.BlockLexer

name_type = NotImplemented
accepts_more(statement)[source]
input(statement)[source]
lexer_classes()[source]
handles(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.TestCaseLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.TestOrKeywordLexer

name_type = 'TESTCASE NAME'
lex()[source]
accepts_more(statement)
handles(statement)
input(statement)
lexer_classes()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.KeywordLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.TestOrKeywordLexer

name_type = 'KEYWORD NAME'
accepts_more(statement)
handles(statement)
input(statement)
lex()
lexer_classes()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.NestedBlockLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.BlockLexer

accepts_more(statement)[source]
input(statement)[source]
handles(statement)
lex()
lexer_classes()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.ForLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.NestedBlockLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.WhileLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.NestedBlockLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.IfLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.NestedBlockLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.InlineIfLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.BlockLexer

handles(statement)[source]
accepts_more(statement)[source]
lexer_classes()[source]
input(statement)[source]
lex()
lexer_for(statement)
class robot.parsing.lexer.blocklexers.TryLexer(ctx)[source]

Bases: robot.parsing.lexer.blocklexers.NestedBlockLexer

handles(statement)[source]
lexer_classes()[source]
accepts_more(statement)
input(statement)
lex()
lexer_for(statement)

robot.parsing.lexer.context module

class robot.parsing.lexer.context.LexingContext(settings=None)[source]

Bases: object

settings_class = None
lex_setting(statement)[source]
class robot.parsing.lexer.context.FileContext(settings=None)[source]

Bases: robot.parsing.lexer.context.LexingContext

sections_class = None
setting_section(statement)[source]
variable_section(statement)[source]
test_case_section(statement)[source]
keyword_section(statement)[source]
comment_section(statement)[source]
keyword_context()[source]
lex_invalid_section(statement)[source]
lex_setting(statement)
settings_class = None
class robot.parsing.lexer.context.TestCaseFileContext(settings=None)[source]

Bases: robot.parsing.lexer.context.FileContext

sections_class

alias of robot.parsing.lexer.sections.TestCaseFileSections

settings_class

alias of robot.parsing.lexer.settings.TestCaseFileSettings

test_case_context()[source]
comment_section(statement)
keyword_context()
keyword_section(statement)
lex_invalid_section(statement)
lex_setting(statement)
setting_section(statement)
test_case_section(statement)
variable_section(statement)
class robot.parsing.lexer.context.ResourceFileContext(settings=None)[source]

Bases: robot.parsing.lexer.context.FileContext

sections_class

alias of robot.parsing.lexer.sections.ResourceFileSections

settings_class

alias of robot.parsing.lexer.settings.ResourceFileSettings

comment_section(statement)
keyword_context()
keyword_section(statement)
lex_invalid_section(statement)
lex_setting(statement)
setting_section(statement)
test_case_section(statement)
variable_section(statement)
class robot.parsing.lexer.context.InitFileContext(settings=None)[source]

Bases: robot.parsing.lexer.context.FileContext

sections_class

alias of robot.parsing.lexer.sections.InitFileSections

settings_class

alias of robot.parsing.lexer.settings.InitFileSettings

comment_section(statement)
keyword_context()
keyword_section(statement)
lex_invalid_section(statement)
lex_setting(statement)
setting_section(statement)
test_case_section(statement)
variable_section(statement)
class robot.parsing.lexer.context.TestCaseContext(settings=None)[source]

Bases: robot.parsing.lexer.context.LexingContext

template_set
lex_setting(statement)
settings_class = None
class robot.parsing.lexer.context.KeywordContext(settings=None)[source]

Bases: robot.parsing.lexer.context.LexingContext

template_set
lex_setting(statement)
settings_class = None

robot.parsing.lexer.lexer module

robot.parsing.lexer.lexer.get_tokens(source, data_only=False, tokenize_variables=False)[source]

Parses the given source to tokens.

Parameters:
  • source – The source where to read the data. Can be a path to a source file as a string or as pathlib.Path object, an already opened file object, or Unicode text containing the date directly. Source files must be UTF-8 encoded.
  • data_only – When False (default), returns all tokens. When set to True, omits separators, comments, continuation markers, and other non-data tokens.
  • tokenize_variables – When True, possible variables in keyword arguments and elsewhere are tokenized. See the tokenize_variables() method for details.

Returns a generator that yields Token instances.

robot.parsing.lexer.lexer.get_resource_tokens(source, data_only=False, tokenize_variables=False)[source]

Parses the given source to resource file tokens.

Otherwise same as get_tokens() but the source is considered to be a resource file. This affects, for example, what settings are valid.

robot.parsing.lexer.lexer.get_init_tokens(source, data_only=False, tokenize_variables=False)[source]

Parses the given source to init file tokens.

Otherwise same as get_tokens() but the source is considered to be a suite initialization file. This affects, for example, what settings are valid.

class robot.parsing.lexer.lexer.Lexer(ctx, data_only=False, tokenize_variables=False)[source]

Bases: object

input(source)[source]
get_tokens()[source]

robot.parsing.lexer.sections module

class robot.parsing.lexer.sections.Sections[source]

Bases: object

setting_markers = ('Settings', 'Setting')
variable_markers = ('Variables', 'Variable')
test_case_markers = ('Test Cases', 'Test Case', 'Tasks', 'Task')
keyword_markers = ('Keywords', 'Keyword')
comment_markers = ('Comments', 'Comment')
setting(statement)[source]
variable(statement)[source]
test_case(statement)[source]
keyword(statement)[source]
comment(statement)[source]
lex_invalid(statement)[source]
class robot.parsing.lexer.sections.TestCaseFileSections[source]

Bases: robot.parsing.lexer.sections.Sections

test_case(statement)[source]
comment(statement)
comment_markers = ('Comments', 'Comment')
keyword(statement)
keyword_markers = ('Keywords', 'Keyword')
lex_invalid(statement)
setting(statement)
setting_markers = ('Settings', 'Setting')
test_case_markers = ('Test Cases', 'Test Case', 'Tasks', 'Task')
variable(statement)
variable_markers = ('Variables', 'Variable')
class robot.parsing.lexer.sections.ResourceFileSections[source]

Bases: robot.parsing.lexer.sections.Sections

comment(statement)
comment_markers = ('Comments', 'Comment')
keyword(statement)
keyword_markers = ('Keywords', 'Keyword')
lex_invalid(statement)
setting(statement)
setting_markers = ('Settings', 'Setting')
test_case(statement)
test_case_markers = ('Test Cases', 'Test Case', 'Tasks', 'Task')
variable(statement)
variable_markers = ('Variables', 'Variable')
class robot.parsing.lexer.sections.InitFileSections[source]

Bases: robot.parsing.lexer.sections.Sections

comment(statement)
comment_markers = ('Comments', 'Comment')
keyword(statement)
keyword_markers = ('Keywords', 'Keyword')
lex_invalid(statement)
setting(statement)
setting_markers = ('Settings', 'Setting')
test_case(statement)
test_case_markers = ('Test Cases', 'Test Case', 'Tasks', 'Task')
variable(statement)
variable_markers = ('Variables', 'Variable')

robot.parsing.lexer.settings module

class robot.parsing.lexer.settings.Settings[source]

Bases: object

names = ()
aliases = {}
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
lex(statement)[source]
class robot.parsing.lexer.settings.TestCaseFileSettings[source]

Bases: robot.parsing.lexer.settings.Settings

names = ('Documentation', 'Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Test Timeout', 'Force Tags', 'Default Tags', 'Library', 'Resource', 'Variables')
aliases = {'Task Setup': 'Test Setup', 'Task Teardown': 'Test Teardown', 'Task Template': 'Test Template', 'Task Timeout': 'Test Timeout'}
lex(statement)
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')
class robot.parsing.lexer.settings.InitFileSettings[source]

Bases: robot.parsing.lexer.settings.Settings

names = ('Documentation', 'Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Timeout', 'Force Tags', 'Library', 'Resource', 'Variables')
aliases = {}
lex(statement)
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')
class robot.parsing.lexer.settings.ResourceFileSettings[source]

Bases: robot.parsing.lexer.settings.Settings

names = ('Documentation', 'Library', 'Resource', 'Variables')
aliases = {}
lex(statement)
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')
class robot.parsing.lexer.settings.TestCaseSettings(parent)[source]

Bases: robot.parsing.lexer.settings.Settings

names = ('Documentation', 'Tags', 'Setup', 'Teardown', 'Template', 'Timeout')
template_set
aliases = {}
lex(statement)
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')
class robot.parsing.lexer.settings.KeywordSettings[source]

Bases: robot.parsing.lexer.settings.Settings

names = ('Documentation', 'Arguments', 'Teardown', 'Timeout', 'Tags', 'Return')
aliases = {}
lex(statement)
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')

robot.parsing.lexer.statementlexers module

class robot.parsing.lexer.statementlexers.Lexer(ctx)[source]

Bases: object

Base class for lexers.

handles(statement)[source]
accepts_more(statement)[source]
input(statement)[source]
lex()[source]
class robot.parsing.lexer.statementlexers.StatementLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.Lexer

token_type = None
accepts_more(statement)[source]
input(statement)[source]
lex()[source]
handles(statement)
class robot.parsing.lexer.statementlexers.SingleType(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

lex()[source]
accepts_more(statement)
handles(statement)
input(statement)
token_type = None
class robot.parsing.lexer.statementlexers.TypeAndArguments(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

lex()[source]
accepts_more(statement)
handles(statement)
input(statement)
token_type = None
class robot.parsing.lexer.statementlexers.SectionHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.SingleType

handles(statement)[source]
accepts_more(statement)
input(statement)
lex()
token_type = None
class robot.parsing.lexer.statementlexers.SettingSectionHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'SETTING HEADER'
accepts_more(statement)
handles(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.VariableSectionHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'VARIABLE HEADER'
accepts_more(statement)
handles(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.TestCaseSectionHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'TESTCASE HEADER'
accepts_more(statement)
handles(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.KeywordSectionHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'KEYWORD HEADER'
accepts_more(statement)
handles(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.CommentSectionHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'COMMENT HEADER'
accepts_more(statement)
handles(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.ErrorSectionHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

lex()[source]
accepts_more(statement)
handles(statement)
input(statement)
token_type = None
class robot.parsing.lexer.statementlexers.CommentLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.SingleType

token_type = 'COMMENT'
accepts_more(statement)
handles(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.SettingLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

lex()[source]
accepts_more(statement)
handles(statement)
input(statement)
token_type = None
class robot.parsing.lexer.statementlexers.TestOrKeywordSettingLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.SettingLexer

handles(statement)[source]
accepts_more(statement)
input(statement)
lex()
token_type = None
class robot.parsing.lexer.statementlexers.VariableLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'VARIABLE'
accepts_more(statement)
handles(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.KeywordCallLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

lex()[source]
accepts_more(statement)
handles(statement)
input(statement)
token_type = None
class robot.parsing.lexer.statementlexers.ForHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

separators = ('IN', 'IN RANGE', 'IN ENUMERATE', 'IN ZIP')
handles(statement)[source]
lex()[source]
accepts_more(statement)
input(statement)
token_type = None
class robot.parsing.lexer.statementlexers.IfHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'IF'
handles(statement)[source]
accepts_more(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.InlineIfHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

token_type = 'INLINE IF'
handles(statement)[source]
lex()[source]
accepts_more(statement)
input(statement)
class robot.parsing.lexer.statementlexers.ElseIfHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'ELSE IF'
handles(statement)[source]
accepts_more(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.ElseHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'ELSE'
handles(statement)[source]
accepts_more(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.TryHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'TRY'
handles(statement)[source]
accepts_more(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.ExceptHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

token_type = 'EXCEPT'
handles(statement)[source]
lex()[source]
accepts_more(statement)
input(statement)
class robot.parsing.lexer.statementlexers.FinallyHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'FINALLY'
handles(statement)[source]
accepts_more(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.WhileHeaderLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

token_type = 'WHILE'
handles(statement)[source]
lex()[source]
accepts_more(statement)
input(statement)
class robot.parsing.lexer.statementlexers.EndLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'END'
handles(statement)[source]
accepts_more(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.ReturnLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'RETURN STATEMENT'
handles(statement)[source]
accepts_more(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.ContinueLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'CONTINUE'
handles(statement)[source]
accepts_more(statement)
input(statement)
lex()
class robot.parsing.lexer.statementlexers.BreakLexer(ctx)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'BREAK'
handles(statement)[source]
accepts_more(statement)
input(statement)
lex()

robot.parsing.lexer.tokenizer module

class robot.parsing.lexer.tokenizer.Tokenizer[source]

Bases: object

tokenize(data, data_only=False)[source]

robot.parsing.lexer.tokens module

class robot.parsing.lexer.tokens.Token(type=None, value=None, lineno=-1, col_offset=-1, error=None)[source]

Bases: object

Token representing piece of Robot Framework data.

Each token has type, value, line number, column offset and end column offset in type, value, lineno, col_offset and end_col_offset attributes, respectively. Tokens representing error also have their error message in error attribute.

Token types are declared as class attributes such as SETTING_HEADER and EOL. Values of these constants have changed slightly in Robot Framework 4.0 and they may change again in the future. It is thus safer to use the constants, not their values, when types are needed. For example, use Token(Token.EOL) instead of Token('EOL') and token.type == Token.EOL instead of token.type == 'EOL'.

If value is not given when Token is initialized and type is IF, ELSE_IF, ELSE, FOR, END, WITH_NAME or CONTINUATION, the value is automatically set to the correct marker value like 'IF' or 'ELSE IF'. If type is EOL in this case, the value is set to '\n'.

SETTING_HEADER = 'SETTING HEADER'
VARIABLE_HEADER = 'VARIABLE HEADER'
TESTCASE_HEADER = 'TESTCASE HEADER'
KEYWORD_HEADER = 'KEYWORD HEADER'
COMMENT_HEADER = 'COMMENT HEADER'
TESTCASE_NAME = 'TESTCASE NAME'
KEYWORD_NAME = 'KEYWORD NAME'
DOCUMENTATION = 'DOCUMENTATION'
SUITE_SETUP = 'SUITE SETUP'
SUITE_TEARDOWN = 'SUITE TEARDOWN'
METADATA = 'METADATA'
TEST_SETUP = 'TEST SETUP'
TEST_TEARDOWN = 'TEST TEARDOWN'
TEST_TEMPLATE = 'TEST TEMPLATE'
TEST_TIMEOUT = 'TEST TIMEOUT'
FORCE_TAGS = 'FORCE TAGS'
DEFAULT_TAGS = 'DEFAULT TAGS'
LIBRARY = 'LIBRARY'
RESOURCE = 'RESOURCE'
VARIABLES = 'VARIABLES'
SETUP = 'SETUP'
TEARDOWN = 'TEARDOWN'
TEMPLATE = 'TEMPLATE'
TIMEOUT = 'TIMEOUT'
TAGS = 'TAGS'
ARGUMENTS = 'ARGUMENTS'
RETURN = 'RETURN'
RETURN_SETTING = 'RETURN'
NAME = 'NAME'
VARIABLE = 'VARIABLE'
ARGUMENT = 'ARGUMENT'
ASSIGN = 'ASSIGN'
KEYWORD = 'KEYWORD'
WITH_NAME = 'WITH NAME'
FOR = 'FOR'
FOR_SEPARATOR = 'FOR SEPARATOR'
END = 'END'
IF = 'IF'
INLINE_IF = 'INLINE IF'
ELSE_IF = 'ELSE IF'
ELSE = 'ELSE'
TRY = 'TRY'
EXCEPT = 'EXCEPT'
FINALLY = 'FINALLY'
AS = 'AS'
WHILE = 'WHILE'
RETURN_STATEMENT = 'RETURN STATEMENT'
CONTINUE = 'CONTINUE'
BREAK = 'BREAK'
OPTION = 'OPTION'
SEPARATOR = 'SEPARATOR'
COMMENT = 'COMMENT'
CONTINUATION = 'CONTINUATION'
EOL = 'EOL'
EOS = 'EOS'
ERROR = 'ERROR'
FATAL_ERROR = 'FATAL ERROR'
NON_DATA_TOKENS = frozenset({'COMMENT', 'CONTINUATION', 'EOS', 'SEPARATOR', 'EOL'})
SETTING_TOKENS = frozenset({'SETUP', 'TEST TEMPLATE', 'LIBRARY', 'FORCE TAGS', 'RESOURCE', 'TEMPLATE', 'TEST SETUP', 'VARIABLES', 'SUITE SETUP', 'TEARDOWN', 'TEST TIMEOUT', 'DEFAULT TAGS', 'METADATA', 'ARGUMENTS', 'DOCUMENTATION', 'TEST TEARDOWN', 'RETURN', 'TIMEOUT', 'SUITE TEARDOWN', 'TAGS'})
HEADER_TOKENS = frozenset({'SETTING HEADER', 'COMMENT HEADER', 'VARIABLE HEADER', 'TESTCASE HEADER', 'KEYWORD HEADER'})
ALLOW_VARIABLES = frozenset({'TESTCASE NAME', 'ARGUMENT', 'KEYWORD NAME', 'NAME'})
type
value
lineno
col_offset
error
end_col_offset
set_error(error, fatal=False)[source]
tokenize_variables()[source]

Tokenizes possible variables in token value.

Yields the token itself if the token does not allow variables (see Token.ALLOW_VARIABLES) or its value does not contain variables. Otherwise yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.

class robot.parsing.lexer.tokens.EOS(lineno=-1, col_offset=-1)[source]

Bases: robot.parsing.lexer.tokens.Token

Token representing end of a statement.

classmethod from_token(token, before=False)[source]
ALLOW_VARIABLES = frozenset({'TESTCASE NAME', 'ARGUMENT', 'KEYWORD NAME', 'NAME'})
ARGUMENT = 'ARGUMENT'
ARGUMENTS = 'ARGUMENTS'
AS = 'AS'
ASSIGN = 'ASSIGN'
BREAK = 'BREAK'
COMMENT = 'COMMENT'
COMMENT_HEADER = 'COMMENT HEADER'
CONTINUATION = 'CONTINUATION'
CONTINUE = 'CONTINUE'
DEFAULT_TAGS = 'DEFAULT TAGS'
DOCUMENTATION = 'DOCUMENTATION'
ELSE = 'ELSE'
ELSE_IF = 'ELSE IF'
END = 'END'
EOL = 'EOL'
EOS = 'EOS'
ERROR = 'ERROR'
EXCEPT = 'EXCEPT'
FATAL_ERROR = 'FATAL ERROR'
FINALLY = 'FINALLY'
FOR = 'FOR'
FORCE_TAGS = 'FORCE TAGS'
FOR_SEPARATOR = 'FOR SEPARATOR'
HEADER_TOKENS = frozenset({'SETTING HEADER', 'COMMENT HEADER', 'VARIABLE HEADER', 'TESTCASE HEADER', 'KEYWORD HEADER'})
IF = 'IF'
INLINE_IF = 'INLINE IF'
KEYWORD = 'KEYWORD'
KEYWORD_HEADER = 'KEYWORD HEADER'
KEYWORD_NAME = 'KEYWORD NAME'
LIBRARY = 'LIBRARY'
METADATA = 'METADATA'
NAME = 'NAME'
NON_DATA_TOKENS = frozenset({'COMMENT', 'CONTINUATION', 'EOS', 'SEPARATOR', 'EOL'})
OPTION = 'OPTION'
RESOURCE = 'RESOURCE'
RETURN = 'RETURN'
RETURN_SETTING = 'RETURN'
RETURN_STATEMENT = 'RETURN STATEMENT'
SEPARATOR = 'SEPARATOR'
SETTING_HEADER = 'SETTING HEADER'
SETTING_TOKENS = frozenset({'SETUP', 'TEST TEMPLATE', 'LIBRARY', 'FORCE TAGS', 'RESOURCE', 'TEMPLATE', 'TEST SETUP', 'VARIABLES', 'SUITE SETUP', 'TEARDOWN', 'TEST TIMEOUT', 'DEFAULT TAGS', 'METADATA', 'ARGUMENTS', 'DOCUMENTATION', 'TEST TEARDOWN', 'RETURN', 'TIMEOUT', 'SUITE TEARDOWN', 'TAGS'})
SETUP = 'SETUP'
SUITE_SETUP = 'SUITE SETUP'
SUITE_TEARDOWN = 'SUITE TEARDOWN'
TAGS = 'TAGS'
TEARDOWN = 'TEARDOWN'
TEMPLATE = 'TEMPLATE'
TESTCASE_HEADER = 'TESTCASE HEADER'
TESTCASE_NAME = 'TESTCASE NAME'
TEST_SETUP = 'TEST SETUP'
TEST_TEARDOWN = 'TEST TEARDOWN'
TEST_TEMPLATE = 'TEST TEMPLATE'
TEST_TIMEOUT = 'TEST TIMEOUT'
TIMEOUT = 'TIMEOUT'
TRY = 'TRY'
VARIABLE = 'VARIABLE'
VARIABLES = 'VARIABLES'
VARIABLE_HEADER = 'VARIABLE HEADER'
WHILE = 'WHILE'
WITH_NAME = 'WITH NAME'
col_offset
end_col_offset
error
lineno
set_error(error, fatal=False)
tokenize_variables()

Tokenizes possible variables in token value.

Yields the token itself if the token does not allow variables (see Token.ALLOW_VARIABLES) or its value does not contain variables. Otherwise yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.

type
value
class robot.parsing.lexer.tokens.END(lineno=-1, col_offset=-1, virtual=False)[source]

Bases: robot.parsing.lexer.tokens.Token

Token representing END token used to signify block ending.

Virtual END tokens have ‘’ as their value, with “real” END tokens the value is ‘END’.

classmethod from_token(token, virtual=False)[source]
ALLOW_VARIABLES = frozenset({'TESTCASE NAME', 'ARGUMENT', 'KEYWORD NAME', 'NAME'})
ARGUMENT = 'ARGUMENT'
ARGUMENTS = 'ARGUMENTS'
AS = 'AS'
ASSIGN = 'ASSIGN'
BREAK = 'BREAK'
COMMENT = 'COMMENT'
COMMENT_HEADER = 'COMMENT HEADER'
CONTINUATION = 'CONTINUATION'
CONTINUE = 'CONTINUE'
DEFAULT_TAGS = 'DEFAULT TAGS'
DOCUMENTATION = 'DOCUMENTATION'
ELSE = 'ELSE'
ELSE_IF = 'ELSE IF'
END = 'END'
EOL = 'EOL'
EOS = 'EOS'
ERROR = 'ERROR'
EXCEPT = 'EXCEPT'
FATAL_ERROR = 'FATAL ERROR'
FINALLY = 'FINALLY'
FOR = 'FOR'
FORCE_TAGS = 'FORCE TAGS'
FOR_SEPARATOR = 'FOR SEPARATOR'
HEADER_TOKENS = frozenset({'SETTING HEADER', 'COMMENT HEADER', 'VARIABLE HEADER', 'TESTCASE HEADER', 'KEYWORD HEADER'})
IF = 'IF'
INLINE_IF = 'INLINE IF'
KEYWORD = 'KEYWORD'
KEYWORD_HEADER = 'KEYWORD HEADER'
KEYWORD_NAME = 'KEYWORD NAME'
LIBRARY = 'LIBRARY'
METADATA = 'METADATA'
NAME = 'NAME'
NON_DATA_TOKENS = frozenset({'COMMENT', 'CONTINUATION', 'EOS', 'SEPARATOR', 'EOL'})
OPTION = 'OPTION'
RESOURCE = 'RESOURCE'
RETURN = 'RETURN'
RETURN_SETTING = 'RETURN'
RETURN_STATEMENT = 'RETURN STATEMENT'
SEPARATOR = 'SEPARATOR'
SETTING_HEADER = 'SETTING HEADER'
SETTING_TOKENS = frozenset({'SETUP', 'TEST TEMPLATE', 'LIBRARY', 'FORCE TAGS', 'RESOURCE', 'TEMPLATE', 'TEST SETUP', 'VARIABLES', 'SUITE SETUP', 'TEARDOWN', 'TEST TIMEOUT', 'DEFAULT TAGS', 'METADATA', 'ARGUMENTS', 'DOCUMENTATION', 'TEST TEARDOWN', 'RETURN', 'TIMEOUT', 'SUITE TEARDOWN', 'TAGS'})
SETUP = 'SETUP'
SUITE_SETUP = 'SUITE SETUP'
SUITE_TEARDOWN = 'SUITE TEARDOWN'
TAGS = 'TAGS'
TEARDOWN = 'TEARDOWN'
TEMPLATE = 'TEMPLATE'
TESTCASE_HEADER = 'TESTCASE HEADER'
TESTCASE_NAME = 'TESTCASE NAME'
TEST_SETUP = 'TEST SETUP'
TEST_TEARDOWN = 'TEST TEARDOWN'
TEST_TEMPLATE = 'TEST TEMPLATE'
TEST_TIMEOUT = 'TEST TIMEOUT'
TIMEOUT = 'TIMEOUT'
TRY = 'TRY'
VARIABLE = 'VARIABLE'
VARIABLES = 'VARIABLES'
VARIABLE_HEADER = 'VARIABLE HEADER'
WHILE = 'WHILE'
WITH_NAME = 'WITH NAME'
col_offset
end_col_offset
error
lineno
set_error(error, fatal=False)
tokenize_variables()

Tokenizes possible variables in token value.

Yields the token itself if the token does not allow variables (see Token.ALLOW_VARIABLES) or its value does not contain variables. Otherwise yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.

type
value