robot.parsing.lexer package¶
Submodules¶
robot.parsing.lexer.blocklexers module¶
-
class
robot.parsing.lexer.blocklexers.
BlockLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.Lexer
-
handles
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
FileLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.BlockLexer
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
SectionLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.BlockLexer
-
handles
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_classes
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
SettingSectionLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.SectionLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
VariableSectionLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.SectionLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
TestCaseSectionLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.SectionLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
KeywordSectionLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.SettingSectionLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
CommentSectionLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.SectionLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
ImplicitCommentSectionLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.SectionLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
ErrorSectionLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.SectionLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
TestOrKeywordLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.BlockLexer
-
name_type
= NotImplemented¶
-
handles
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
TestCaseLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.TestOrKeywordLexer
-
name_type
= 'TESTCASE NAME'¶
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
lexer_classes
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
KeywordLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.TestOrKeywordLexer
-
name_type
= 'KEYWORD NAME'¶
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_classes
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
NestedBlockLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.BlockLexer
-
handles
(statement)¶
-
lex
()¶
-
lexer_classes
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
ForLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.NestedBlockLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
WhileLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.NestedBlockLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
IfLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.NestedBlockLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
lexer_for
(statement)¶
-
-
class
robot.parsing.lexer.blocklexers.
InlineIfLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.blocklexers.BlockLexer
-
lex
()¶
-
lexer_for
(statement)¶
-
robot.parsing.lexer.context module¶
-
class
robot.parsing.lexer.context.
LexingContext
(settings=None)[source]¶ Bases:
object
-
settings_class
= None¶
-
-
class
robot.parsing.lexer.context.
FileContext
(settings=None)[source]¶ Bases:
robot.parsing.lexer.context.LexingContext
-
sections_class
= None¶
-
lex_setting
(statement)¶
-
settings_class
= None¶
-
-
class
robot.parsing.lexer.context.
TestCaseFileContext
(settings=None)[source]¶ Bases:
robot.parsing.lexer.context.FileContext
-
sections_class
¶
-
settings_class
¶
-
comment_section
(statement)¶
-
keyword_context
()¶
-
keyword_section
(statement)¶
-
lex_invalid_section
(statement)¶
-
lex_setting
(statement)¶
-
setting_section
(statement)¶
-
test_case_section
(statement)¶
-
variable_section
(statement)¶
-
-
class
robot.parsing.lexer.context.
ResourceFileContext
(settings=None)[source]¶ Bases:
robot.parsing.lexer.context.FileContext
-
sections_class
¶
-
settings_class
¶
-
comment_section
(statement)¶
-
keyword_context
()¶
-
keyword_section
(statement)¶
-
lex_invalid_section
(statement)¶
-
lex_setting
(statement)¶
-
setting_section
(statement)¶
-
test_case_section
(statement)¶
-
variable_section
(statement)¶
-
-
class
robot.parsing.lexer.context.
InitFileContext
(settings=None)[source]¶ Bases:
robot.parsing.lexer.context.FileContext
-
sections_class
¶
-
settings_class
¶
-
comment_section
(statement)¶
-
keyword_context
()¶
-
keyword_section
(statement)¶
-
lex_invalid_section
(statement)¶
-
lex_setting
(statement)¶
-
setting_section
(statement)¶
-
test_case_section
(statement)¶
-
variable_section
(statement)¶
-
robot.parsing.lexer.lexer module¶
-
robot.parsing.lexer.lexer.
get_tokens
(source, data_only=False, tokenize_variables=False)[source]¶ Parses the given source to tokens.
Parameters: - source – The source where to read the data. Can be a path to
a source file as a string or as
pathlib.Path
object, an already opened file object, or Unicode text containing the date directly. Source files must be UTF-8 encoded. - data_only – When
False
(default), returns all tokens. When set toTrue
, omits separators, comments, continuation markers, and other non-data tokens. - tokenize_variables – When
True
, possible variables in keyword arguments and elsewhere are tokenized. See thetokenize_variables()
method for details.
Returns a generator that yields
Token
instances.- source – The source where to read the data. Can be a path to
a source file as a string or as
-
robot.parsing.lexer.lexer.
get_resource_tokens
(source, data_only=False, tokenize_variables=False)[source]¶ Parses the given source to resource file tokens.
Otherwise same as
get_tokens()
but the source is considered to be a resource file. This affects, for example, what settings are valid.
-
robot.parsing.lexer.lexer.
get_init_tokens
(source, data_only=False, tokenize_variables=False)[source]¶ Parses the given source to init file tokens.
Otherwise same as
get_tokens()
but the source is considered to be a suite initialization file. This affects, for example, what settings are valid.
robot.parsing.lexer.sections module¶
-
class
robot.parsing.lexer.sections.
Sections
[source]¶ Bases:
object
-
setting_markers
= ('Settings', 'Setting')¶
-
variable_markers
= ('Variables', 'Variable')¶
-
test_case_markers
= ('Test Cases', 'Test Case', 'Tasks', 'Task')¶
-
keyword_markers
= ('Keywords', 'Keyword')¶
-
comment_markers
= ('Comments', 'Comment')¶
-
-
class
robot.parsing.lexer.sections.
TestCaseFileSections
[source]¶ Bases:
robot.parsing.lexer.sections.Sections
-
comment
(statement)¶
-
comment_markers
= ('Comments', 'Comment')¶
-
keyword
(statement)¶
-
keyword_markers
= ('Keywords', 'Keyword')¶
-
lex_invalid
(statement)¶
-
setting
(statement)¶
-
setting_markers
= ('Settings', 'Setting')¶
-
test_case_markers
= ('Test Cases', 'Test Case', 'Tasks', 'Task')¶
-
variable
(statement)¶
-
variable_markers
= ('Variables', 'Variable')¶
-
-
class
robot.parsing.lexer.sections.
ResourceFileSections
[source]¶ Bases:
robot.parsing.lexer.sections.Sections
-
comment
(statement)¶
-
comment_markers
= ('Comments', 'Comment')¶
-
keyword
(statement)¶
-
keyword_markers
= ('Keywords', 'Keyword')¶
-
lex_invalid
(statement)¶
-
setting
(statement)¶
-
setting_markers
= ('Settings', 'Setting')¶
-
test_case
(statement)¶
-
test_case_markers
= ('Test Cases', 'Test Case', 'Tasks', 'Task')¶
-
variable
(statement)¶
-
variable_markers
= ('Variables', 'Variable')¶
-
-
class
robot.parsing.lexer.sections.
InitFileSections
[source]¶ Bases:
robot.parsing.lexer.sections.Sections
-
comment
(statement)¶
-
comment_markers
= ('Comments', 'Comment')¶
-
keyword
(statement)¶
-
keyword_markers
= ('Keywords', 'Keyword')¶
-
lex_invalid
(statement)¶
-
setting
(statement)¶
-
setting_markers
= ('Settings', 'Setting')¶
-
test_case
(statement)¶
-
test_case_markers
= ('Test Cases', 'Test Case', 'Tasks', 'Task')¶
-
variable
(statement)¶
-
variable_markers
= ('Variables', 'Variable')¶
-
robot.parsing.lexer.settings module¶
-
class
robot.parsing.lexer.settings.
Settings
[source]¶ Bases:
object
-
names
= ()¶
-
aliases
= {}¶
-
multi_use
= ('Metadata', 'Library', 'Resource', 'Variables')¶
-
single_value
= ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')¶
-
name_and_arguments
= ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')¶
-
name_arguments_and_with_name
= ('Library',)¶
-
-
class
robot.parsing.lexer.settings.
TestCaseFileSettings
[source]¶ Bases:
robot.parsing.lexer.settings.Settings
-
names
= ('Documentation', 'Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Test Timeout', 'Force Tags', 'Default Tags', 'Library', 'Resource', 'Variables')¶
-
aliases
= {'Task Setup': 'Test Setup', 'Task Teardown': 'Test Teardown', 'Task Template': 'Test Template', 'Task Timeout': 'Test Timeout'}¶
-
lex
(statement)¶
-
multi_use
= ('Metadata', 'Library', 'Resource', 'Variables')¶
-
name_and_arguments
= ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')¶
-
name_arguments_and_with_name
= ('Library',)¶
-
single_value
= ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')¶
-
-
class
robot.parsing.lexer.settings.
InitFileSettings
[source]¶ Bases:
robot.parsing.lexer.settings.Settings
-
names
= ('Documentation', 'Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Timeout', 'Force Tags', 'Library', 'Resource', 'Variables')¶
-
aliases
= {}¶
-
lex
(statement)¶
-
multi_use
= ('Metadata', 'Library', 'Resource', 'Variables')¶
-
name_and_arguments
= ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')¶
-
name_arguments_and_with_name
= ('Library',)¶
-
single_value
= ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')¶
-
-
class
robot.parsing.lexer.settings.
ResourceFileSettings
[source]¶ Bases:
robot.parsing.lexer.settings.Settings
-
names
= ('Documentation', 'Library', 'Resource', 'Variables')¶
-
aliases
= {}¶
-
lex
(statement)¶
-
multi_use
= ('Metadata', 'Library', 'Resource', 'Variables')¶
-
name_and_arguments
= ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')¶
-
name_arguments_and_with_name
= ('Library',)¶
-
single_value
= ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')¶
-
-
class
robot.parsing.lexer.settings.
TestCaseSettings
(parent)[source]¶ Bases:
robot.parsing.lexer.settings.Settings
-
names
= ('Documentation', 'Tags', 'Setup', 'Teardown', 'Template', 'Timeout')¶
-
template_set
¶
-
aliases
= {}¶
-
lex
(statement)¶
-
multi_use
= ('Metadata', 'Library', 'Resource', 'Variables')¶
-
name_and_arguments
= ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')¶
-
name_arguments_and_with_name
= ('Library',)¶
-
single_value
= ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')¶
-
-
class
robot.parsing.lexer.settings.
KeywordSettings
[source]¶ Bases:
robot.parsing.lexer.settings.Settings
-
names
= ('Documentation', 'Arguments', 'Teardown', 'Timeout', 'Tags', 'Return')¶
-
aliases
= {}¶
-
lex
(statement)¶
-
multi_use
= ('Metadata', 'Library', 'Resource', 'Variables')¶
-
name_and_arguments
= ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')¶
-
name_arguments_and_with_name
= ('Library',)¶
-
single_value
= ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template')¶
-
robot.parsing.lexer.statementlexers module¶
-
class
robot.parsing.lexer.statementlexers.
StatementLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.Lexer
-
token_type
= None¶
-
handles
(statement)¶
-
-
class
robot.parsing.lexer.statementlexers.
SingleType
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.StatementLexer
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
token_type
= None¶
-
-
class
robot.parsing.lexer.statementlexers.
TypeAndArguments
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.StatementLexer
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
token_type
= None¶
-
-
class
robot.parsing.lexer.statementlexers.
SectionHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.SingleType
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
token_type
= None¶
-
-
class
robot.parsing.lexer.statementlexers.
SettingSectionHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.SectionHeaderLexer
-
token_type
= 'SETTING HEADER'¶
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
VariableSectionHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.SectionHeaderLexer
-
token_type
= 'VARIABLE HEADER'¶
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
TestCaseSectionHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.SectionHeaderLexer
-
token_type
= 'TESTCASE HEADER'¶
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
KeywordSectionHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.SectionHeaderLexer
-
token_type
= 'KEYWORD HEADER'¶
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
CommentSectionHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.SectionHeaderLexer
-
token_type
= 'COMMENT HEADER'¶
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
ErrorSectionHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.SectionHeaderLexer
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
token_type
= None¶
-
-
class
robot.parsing.lexer.statementlexers.
CommentLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.SingleType
-
token_type
= 'COMMENT'¶
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
SettingLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.StatementLexer
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
token_type
= None¶
-
-
class
robot.parsing.lexer.statementlexers.
TestOrKeywordSettingLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.SettingLexer
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
token_type
= None¶
-
-
class
robot.parsing.lexer.statementlexers.
VariableLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.TypeAndArguments
-
token_type
= 'VARIABLE'¶
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
KeywordCallLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.StatementLexer
-
accepts_more
(statement)¶
-
handles
(statement)¶
-
input
(statement)¶
-
token_type
= None¶
-
-
class
robot.parsing.lexer.statementlexers.
ForHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.StatementLexer
-
separators
= ('IN', 'IN RANGE', 'IN ENUMERATE', 'IN ZIP')¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
token_type
= None¶
-
-
class
robot.parsing.lexer.statementlexers.
IfHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.TypeAndArguments
-
token_type
= 'IF'¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
InlineIfHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.StatementLexer
-
token_type
= 'INLINE IF'¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
-
class
robot.parsing.lexer.statementlexers.
ElseIfHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.TypeAndArguments
-
token_type
= 'ELSE IF'¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
ElseHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.TypeAndArguments
-
token_type
= 'ELSE'¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
TryHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.TypeAndArguments
-
token_type
= 'TRY'¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
ExceptHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.StatementLexer
-
token_type
= 'EXCEPT'¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
-
class
robot.parsing.lexer.statementlexers.
FinallyHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.TypeAndArguments
-
token_type
= 'FINALLY'¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
WhileHeaderLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.StatementLexer
-
token_type
= 'WHILE'¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
-
class
robot.parsing.lexer.statementlexers.
EndLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.TypeAndArguments
-
token_type
= 'END'¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
-
class
robot.parsing.lexer.statementlexers.
ReturnLexer
(ctx)[source]¶ Bases:
robot.parsing.lexer.statementlexers.TypeAndArguments
-
token_type
= 'RETURN STATEMENT'¶
-
accepts_more
(statement)¶
-
input
(statement)¶
-
lex
()¶
-
robot.parsing.lexer.tokenizer module¶
robot.parsing.lexer.tokens module¶
-
class
robot.parsing.lexer.tokens.
Token
(type=None, value=None, lineno=-1, col_offset=-1, error=None)[source]¶ Bases:
object
Token representing piece of Robot Framework data.
Each token has type, value, line number, column offset and end column offset in
type
,value
,lineno
,col_offset
andend_col_offset
attributes, respectively. Tokens representing error also have their error message inerror
attribute.Token types are declared as class attributes such as
SETTING_HEADER
andEOL
. Values of these constants have changed slightly in Robot Framework 4.0 and they may change again in the future. It is thus safer to use the constants, not their values, when types are needed. For example, useToken(Token.EOL)
instead ofToken('EOL')
andtoken.type == Token.EOL
instead oftoken.type == 'EOL'
.If
value
is not given whenToken
is initialized andtype
isIF
,ELSE_IF
,ELSE
,FOR
,END
,WITH_NAME
orCONTINUATION
, the value is automatically set to the correct marker value like'IF'
or'ELSE IF'
. Iftype
isEOL
in this case, the value is set to'\n'
.-
SETTING_HEADER
= 'SETTING HEADER'¶
-
VARIABLE_HEADER
= 'VARIABLE HEADER'¶
-
TESTCASE_HEADER
= 'TESTCASE HEADER'¶
-
KEYWORD_HEADER
= 'KEYWORD HEADER'¶
-
COMMENT_HEADER
= 'COMMENT HEADER'¶
-
TESTCASE_NAME
= 'TESTCASE NAME'¶
-
KEYWORD_NAME
= 'KEYWORD NAME'¶
-
DOCUMENTATION
= 'DOCUMENTATION'¶
-
SUITE_SETUP
= 'SUITE SETUP'¶
-
SUITE_TEARDOWN
= 'SUITE TEARDOWN'¶
-
METADATA
= 'METADATA'¶
-
TEST_SETUP
= 'TEST SETUP'¶
-
TEST_TEARDOWN
= 'TEST TEARDOWN'¶
-
TEST_TEMPLATE
= 'TEST TEMPLATE'¶
-
TEST_TIMEOUT
= 'TEST TIMEOUT'¶
-
FORCE_TAGS
= 'FORCE TAGS'¶
-
DEFAULT_TAGS
= 'DEFAULT TAGS'¶
-
LIBRARY
= 'LIBRARY'¶
-
RESOURCE
= 'RESOURCE'¶
-
VARIABLES
= 'VARIABLES'¶
-
SETUP
= 'SETUP'¶
-
TEARDOWN
= 'TEARDOWN'¶
-
TEMPLATE
= 'TEMPLATE'¶
-
TIMEOUT
= 'TIMEOUT'¶
-
TAGS
= 'TAGS'¶
-
ARGUMENTS
= 'ARGUMENTS'¶
-
RETURN
= 'RETURN'¶
-
RETURN_SETTING
= 'RETURN'¶
-
NAME
= 'NAME'¶
-
VARIABLE
= 'VARIABLE'¶
-
ARGUMENT
= 'ARGUMENT'¶
-
ASSIGN
= 'ASSIGN'¶
-
KEYWORD
= 'KEYWORD'¶
-
WITH_NAME
= 'WITH NAME'¶
-
FOR
= 'FOR'¶
-
FOR_SEPARATOR
= 'FOR SEPARATOR'¶
-
END
= 'END'¶
-
IF
= 'IF'¶
-
INLINE_IF
= 'INLINE IF'¶
-
ELSE_IF
= 'ELSE IF'¶
-
ELSE
= 'ELSE'¶
-
TRY
= 'TRY'¶
-
EXCEPT
= 'EXCEPT'¶
-
FINALLY
= 'FINALLY'¶
-
AS
= 'AS'¶
-
WHILE
= 'WHILE'¶
-
RETURN_STATEMENT
= 'RETURN STATEMENT'¶
-
CONTINUE
= 'CONTINUE'¶
-
BREAK
= 'BREAK'¶
-
OPTION
= 'OPTION'¶
-
SEPARATOR
= 'SEPARATOR'¶
-
COMMENT
= 'COMMENT'¶
-
CONTINUATION
= 'CONTINUATION'¶
-
EOL
= 'EOL'¶
-
EOS
= 'EOS'¶
-
ERROR
= 'ERROR'¶
-
FATAL_ERROR
= 'FATAL ERROR'¶
-
NON_DATA_TOKENS
= frozenset({'EOL', 'COMMENT', 'EOS', 'CONTINUATION', 'SEPARATOR'})¶
-
SETTING_TOKENS
= frozenset({'DEFAULT TAGS', 'TEMPLATE', 'METADATA', 'TEST TEARDOWN', 'RETURN', 'SUITE TEARDOWN', 'ARGUMENTS', 'RESOURCE', 'TAGS', 'LIBRARY', 'TEST TIMEOUT', 'SUITE SETUP', 'FORCE TAGS', 'SETUP', 'TEST TEMPLATE', 'TEARDOWN', 'TEST SETUP', 'TIMEOUT', 'DOCUMENTATION', 'VARIABLES'})¶
-
HEADER_TOKENS
= frozenset({'COMMENT HEADER', 'KEYWORD HEADER', 'TESTCASE HEADER', 'VARIABLE HEADER', 'SETTING HEADER'})¶
-
ALLOW_VARIABLES
= frozenset({'NAME', 'KEYWORD NAME', 'TESTCASE NAME', 'ARGUMENT'})¶
-
type
¶
-
value
¶
-
lineno
¶
-
col_offset
¶
-
error
¶
-
end_col_offset
¶
-
tokenize_variables
()[source]¶ Tokenizes possible variables in token value.
Yields the token itself if the token does not allow variables (see
Token.ALLOW_VARIABLES
) or its value does not contain variables. Otherwise yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.
-
-
class
robot.parsing.lexer.tokens.
EOS
(lineno=-1, col_offset=-1)[source]¶ Bases:
robot.parsing.lexer.tokens.Token
Token representing end of a statement.
-
ALLOW_VARIABLES
= frozenset({'NAME', 'KEYWORD NAME', 'TESTCASE NAME', 'ARGUMENT'})¶
-
ARGUMENT
= 'ARGUMENT'¶
-
ARGUMENTS
= 'ARGUMENTS'¶
-
AS
= 'AS'¶
-
ASSIGN
= 'ASSIGN'¶
-
BREAK
= 'BREAK'¶
-
COMMENT
= 'COMMENT'¶
-
COMMENT_HEADER
= 'COMMENT HEADER'¶
-
CONTINUATION
= 'CONTINUATION'¶
-
CONTINUE
= 'CONTINUE'¶
-
DEFAULT_TAGS
= 'DEFAULT TAGS'¶
-
DOCUMENTATION
= 'DOCUMENTATION'¶
-
ELSE
= 'ELSE'¶
-
ELSE_IF
= 'ELSE IF'¶
-
END
= 'END'¶
-
EOL
= 'EOL'¶
-
EOS
= 'EOS'¶
-
ERROR
= 'ERROR'¶
-
EXCEPT
= 'EXCEPT'¶
-
FATAL_ERROR
= 'FATAL ERROR'¶
-
FINALLY
= 'FINALLY'¶
-
FOR
= 'FOR'¶
-
FORCE_TAGS
= 'FORCE TAGS'¶
-
FOR_SEPARATOR
= 'FOR SEPARATOR'¶
-
HEADER_TOKENS
= frozenset({'COMMENT HEADER', 'KEYWORD HEADER', 'TESTCASE HEADER', 'VARIABLE HEADER', 'SETTING HEADER'})¶
-
IF
= 'IF'¶
-
INLINE_IF
= 'INLINE IF'¶
-
KEYWORD
= 'KEYWORD'¶
-
KEYWORD_HEADER
= 'KEYWORD HEADER'¶
-
KEYWORD_NAME
= 'KEYWORD NAME'¶
-
LIBRARY
= 'LIBRARY'¶
-
METADATA
= 'METADATA'¶
-
NAME
= 'NAME'¶
-
NON_DATA_TOKENS
= frozenset({'EOL', 'COMMENT', 'EOS', 'CONTINUATION', 'SEPARATOR'})¶
-
OPTION
= 'OPTION'¶
-
RESOURCE
= 'RESOURCE'¶
-
RETURN
= 'RETURN'¶
-
RETURN_SETTING
= 'RETURN'¶
-
RETURN_STATEMENT
= 'RETURN STATEMENT'¶
-
SEPARATOR
= 'SEPARATOR'¶
-
SETTING_HEADER
= 'SETTING HEADER'¶
-
SETTING_TOKENS
= frozenset({'DEFAULT TAGS', 'TEMPLATE', 'METADATA', 'TEST TEARDOWN', 'RETURN', 'SUITE TEARDOWN', 'ARGUMENTS', 'RESOURCE', 'TAGS', 'LIBRARY', 'TEST TIMEOUT', 'SUITE SETUP', 'FORCE TAGS', 'SETUP', 'TEST TEMPLATE', 'TEARDOWN', 'TEST SETUP', 'TIMEOUT', 'DOCUMENTATION', 'VARIABLES'})¶
-
SETUP
= 'SETUP'¶
-
SUITE_SETUP
= 'SUITE SETUP'¶
-
SUITE_TEARDOWN
= 'SUITE TEARDOWN'¶
-
TAGS
= 'TAGS'¶
-
TEARDOWN
= 'TEARDOWN'¶
-
TEMPLATE
= 'TEMPLATE'¶
-
TESTCASE_HEADER
= 'TESTCASE HEADER'¶
-
TESTCASE_NAME
= 'TESTCASE NAME'¶
-
TEST_SETUP
= 'TEST SETUP'¶
-
TEST_TEARDOWN
= 'TEST TEARDOWN'¶
-
TEST_TEMPLATE
= 'TEST TEMPLATE'¶
-
TEST_TIMEOUT
= 'TEST TIMEOUT'¶
-
TIMEOUT
= 'TIMEOUT'¶
-
TRY
= 'TRY'¶
-
VARIABLE
= 'VARIABLE'¶
-
VARIABLES
= 'VARIABLES'¶
-
VARIABLE_HEADER
= 'VARIABLE HEADER'¶
-
WHILE
= 'WHILE'¶
-
WITH_NAME
= 'WITH NAME'¶
-
col_offset
¶
-
end_col_offset
¶
-
error
¶
-
lineno
¶
-
set_error
(error, fatal=False)¶
-
tokenize_variables
()¶ Tokenizes possible variables in token value.
Yields the token itself if the token does not allow variables (see
Token.ALLOW_VARIABLES
) or its value does not contain variables. Otherwise yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.
-
type
¶
-
value
¶
-
-
class
robot.parsing.lexer.tokens.
END
(lineno=-1, col_offset=-1, virtual=False)[source]¶ Bases:
robot.parsing.lexer.tokens.Token
Token representing END token used to signify block ending.
Virtual END tokens have ‘’ as their value, with “real” END tokens the value is ‘END’.
-
ALLOW_VARIABLES
= frozenset({'NAME', 'KEYWORD NAME', 'TESTCASE NAME', 'ARGUMENT'})¶
-
ARGUMENT
= 'ARGUMENT'¶
-
ARGUMENTS
= 'ARGUMENTS'¶
-
AS
= 'AS'¶
-
ASSIGN
= 'ASSIGN'¶
-
BREAK
= 'BREAK'¶
-
COMMENT
= 'COMMENT'¶
-
COMMENT_HEADER
= 'COMMENT HEADER'¶
-
CONTINUATION
= 'CONTINUATION'¶
-
CONTINUE
= 'CONTINUE'¶
-
DEFAULT_TAGS
= 'DEFAULT TAGS'¶
-
DOCUMENTATION
= 'DOCUMENTATION'¶
-
ELSE
= 'ELSE'¶
-
ELSE_IF
= 'ELSE IF'¶
-
END
= 'END'¶
-
EOL
= 'EOL'¶
-
EOS
= 'EOS'¶
-
ERROR
= 'ERROR'¶
-
EXCEPT
= 'EXCEPT'¶
-
FATAL_ERROR
= 'FATAL ERROR'¶
-
FINALLY
= 'FINALLY'¶
-
FOR
= 'FOR'¶
-
FORCE_TAGS
= 'FORCE TAGS'¶
-
FOR_SEPARATOR
= 'FOR SEPARATOR'¶
-
HEADER_TOKENS
= frozenset({'COMMENT HEADER', 'KEYWORD HEADER', 'TESTCASE HEADER', 'VARIABLE HEADER', 'SETTING HEADER'})¶
-
IF
= 'IF'¶
-
INLINE_IF
= 'INLINE IF'¶
-
KEYWORD
= 'KEYWORD'¶
-
KEYWORD_HEADER
= 'KEYWORD HEADER'¶
-
KEYWORD_NAME
= 'KEYWORD NAME'¶
-
LIBRARY
= 'LIBRARY'¶
-
METADATA
= 'METADATA'¶
-
NAME
= 'NAME'¶
-
NON_DATA_TOKENS
= frozenset({'EOL', 'COMMENT', 'EOS', 'CONTINUATION', 'SEPARATOR'})¶
-
OPTION
= 'OPTION'¶
-
RESOURCE
= 'RESOURCE'¶
-
RETURN
= 'RETURN'¶
-
RETURN_SETTING
= 'RETURN'¶
-
RETURN_STATEMENT
= 'RETURN STATEMENT'¶
-
SEPARATOR
= 'SEPARATOR'¶
-
SETTING_HEADER
= 'SETTING HEADER'¶
-
SETTING_TOKENS
= frozenset({'DEFAULT TAGS', 'TEMPLATE', 'METADATA', 'TEST TEARDOWN', 'RETURN', 'SUITE TEARDOWN', 'ARGUMENTS', 'RESOURCE', 'TAGS', 'LIBRARY', 'TEST TIMEOUT', 'SUITE SETUP', 'FORCE TAGS', 'SETUP', 'TEST TEMPLATE', 'TEARDOWN', 'TEST SETUP', 'TIMEOUT', 'DOCUMENTATION', 'VARIABLES'})¶
-
SETUP
= 'SETUP'¶
-
SUITE_SETUP
= 'SUITE SETUP'¶
-
SUITE_TEARDOWN
= 'SUITE TEARDOWN'¶
-
TAGS
= 'TAGS'¶
-
TEARDOWN
= 'TEARDOWN'¶
-
TEMPLATE
= 'TEMPLATE'¶
-
TESTCASE_HEADER
= 'TESTCASE HEADER'¶
-
TESTCASE_NAME
= 'TESTCASE NAME'¶
-
TEST_SETUP
= 'TEST SETUP'¶
-
TEST_TEARDOWN
= 'TEST TEARDOWN'¶
-
TEST_TEMPLATE
= 'TEST TEMPLATE'¶
-
TEST_TIMEOUT
= 'TEST TIMEOUT'¶
-
TIMEOUT
= 'TIMEOUT'¶
-
TRY
= 'TRY'¶
-
VARIABLE
= 'VARIABLE'¶
-
VARIABLES
= 'VARIABLES'¶
-
VARIABLE_HEADER
= 'VARIABLE HEADER'¶
-
WHILE
= 'WHILE'¶
-
WITH_NAME
= 'WITH NAME'¶
-
col_offset
¶
-
end_col_offset
¶
-
error
¶
-
lineno
¶
-
set_error
(error, fatal=False)¶
-
tokenize_variables
()¶ Tokenizes possible variables in token value.
Yields the token itself if the token does not allow variables (see
Token.ALLOW_VARIABLES
) or its value does not contain variables. Otherwise yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.
-
type
¶
-
value
¶
-