robot.parsing.lexer package

Submodules

robot.parsing.lexer.blocklexers module

class robot.parsing.lexer.blocklexers.BlockLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.Lexer, abc.ABC

accepts_more(statement: List[Token]) → bool[source]
input(statement: List[Token])[source]
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer[source]
lexer_classes() → tuple[source]
lex()[source]
handles(statement: List[Token]) → bool
class robot.parsing.lexer.blocklexers.FileLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.BlockLexer

lex()[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.SectionLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.BlockLexer, abc.ABC

accepts_more(statement: List[Token]) → bool[source]
handles(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_classes() → tuple
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.SettingSectionLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.VariableSectionLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.TestCaseSectionLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.TaskSectionLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.KeywordSectionLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.SettingSectionLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.CommentSectionLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.ImplicitCommentSectionLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.InvalidSectionLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.SectionLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.TestOrKeywordLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.blocklexers.BlockLexer, abc.ABC

accepts_more(statement: List[Token]) → bool[source]
input(statement: List[Token])[source]
handles(statement: List[Token]) → bool
lex()
lexer_classes() → tuple
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.TestCaseLexer(ctx: robot.parsing.lexer.context.SuiteFileContext)[source]

Bases: robot.parsing.lexer.blocklexers.TestOrKeywordLexer

name_type = 'TESTCASE NAME'
lex()[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.KeywordLexer(ctx: robot.parsing.lexer.context.FileContext)[source]

Bases: robot.parsing.lexer.blocklexers.TestOrKeywordLexer

name_type = 'KEYWORD NAME'
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.NestedBlockLexer(ctx: robot.parsing.lexer.context.TestCaseContext | robot.parsing.lexer.context.KeywordContext[robot.parsing.lexer.context.TestCaseContext, robot.parsing.lexer.context.KeywordContext])[source]

Bases: robot.parsing.lexer.blocklexers.BlockLexer, abc.ABC

accepts_more(statement: List[Token]) → bool[source]
input(statement: List[Token])[source]
handles(statement: List[Token]) → bool
lex()
lexer_classes() → tuple
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.ForLexer(ctx: robot.parsing.lexer.context.TestCaseContext | robot.parsing.lexer.context.KeywordContext[robot.parsing.lexer.context.TestCaseContext, robot.parsing.lexer.context.KeywordContext])[source]

Bases: robot.parsing.lexer.blocklexers.NestedBlockLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.WhileLexer(ctx: robot.parsing.lexer.context.TestCaseContext | robot.parsing.lexer.context.KeywordContext[robot.parsing.lexer.context.TestCaseContext, robot.parsing.lexer.context.KeywordContext])[source]

Bases: robot.parsing.lexer.blocklexers.NestedBlockLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.TryLexer(ctx: robot.parsing.lexer.context.TestCaseContext | robot.parsing.lexer.context.KeywordContext[robot.parsing.lexer.context.TestCaseContext, robot.parsing.lexer.context.KeywordContext])[source]

Bases: robot.parsing.lexer.blocklexers.NestedBlockLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.IfLexer(ctx: robot.parsing.lexer.context.TestCaseContext | robot.parsing.lexer.context.KeywordContext[robot.parsing.lexer.context.TestCaseContext, robot.parsing.lexer.context.KeywordContext])[source]

Bases: robot.parsing.lexer.blocklexers.NestedBlockLexer

handles(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer
class robot.parsing.lexer.blocklexers.InlineIfLexer(ctx: robot.parsing.lexer.context.TestCaseContext | robot.parsing.lexer.context.KeywordContext[robot.parsing.lexer.context.TestCaseContext, robot.parsing.lexer.context.KeywordContext])[source]

Bases: robot.parsing.lexer.blocklexers.NestedBlockLexer

handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool[source]
lexer_classes() → tuple[source]
input(statement: List[Token])[source]
lex()
lexer_for(statement: List[Token]) → robot.parsing.lexer.statementlexers.Lexer

robot.parsing.lexer.context module

class robot.parsing.lexer.context.LexingContext(settings: robot.parsing.lexer.settings.Settings, languages: robot.conf.languages.Languages)[source]

Bases: object

lex_setting(statement: List[Token])[source]
class robot.parsing.lexer.context.FileContext(lang: Union[Languages, Language, str, pathlib.Path, Iterable[Union[Language, str, pathlib.Path]], None] = None)[source]

Bases: robot.parsing.lexer.context.LexingContext

add_language(lang: Union[Language, str, pathlib.Path])[source]
keyword_context() → robot.parsing.lexer.context.KeywordContext[source]
setting_section(statement: List[Token]) → bool[source]
variable_section(statement: List[Token]) → bool[source]
test_case_section(statement: List[Token]) → bool[source]
task_section(statement: List[Token]) → bool[source]
keyword_section(statement: List[Token]) → bool[source]
comment_section(statement: List[Token]) → bool[source]
lex_invalid_section(statement: List[Token])[source]
lex_setting(statement: List[Token])
class robot.parsing.lexer.context.SuiteFileContext(lang: Union[Languages, Language, str, pathlib.Path, Iterable[Union[Language, str, pathlib.Path]], None] = None)[source]

Bases: robot.parsing.lexer.context.FileContext

test_case_context() → robot.parsing.lexer.context.TestCaseContext[source]
test_case_section(statement: List[Token]) → bool[source]
task_section(statement: List[Token]) → bool[source]
add_language(lang: Union[Language, str, pathlib.Path])
comment_section(statement: List[Token]) → bool
keyword_context() → robot.parsing.lexer.context.KeywordContext
keyword_section(statement: List[Token]) → bool
lex_invalid_section(statement: List[Token])
lex_setting(statement: List[Token])
setting_section(statement: List[Token]) → bool
variable_section(statement: List[Token]) → bool
class robot.parsing.lexer.context.ResourceFileContext(lang: Union[Languages, Language, str, pathlib.Path, Iterable[Union[Language, str, pathlib.Path]], None] = None)[source]

Bases: robot.parsing.lexer.context.FileContext

add_language(lang: Union[Language, str, pathlib.Path])
comment_section(statement: List[Token]) → bool
keyword_context() → robot.parsing.lexer.context.KeywordContext
keyword_section(statement: List[Token]) → bool
lex_invalid_section(statement: List[Token])
lex_setting(statement: List[Token])
setting_section(statement: List[Token]) → bool
task_section(statement: List[Token]) → bool
test_case_section(statement: List[Token]) → bool
variable_section(statement: List[Token]) → bool
class robot.parsing.lexer.context.InitFileContext(lang: Union[Languages, Language, str, pathlib.Path, Iterable[Union[Language, str, pathlib.Path]], None] = None)[source]

Bases: robot.parsing.lexer.context.FileContext

add_language(lang: Union[Language, str, pathlib.Path])
comment_section(statement: List[Token]) → bool
keyword_context() → robot.parsing.lexer.context.KeywordContext
keyword_section(statement: List[Token]) → bool
lex_invalid_section(statement: List[Token])
lex_setting(statement: List[Token])
setting_section(statement: List[Token]) → bool
task_section(statement: List[Token]) → bool
test_case_section(statement: List[Token]) → bool
variable_section(statement: List[Token]) → bool
class robot.parsing.lexer.context.TestCaseContext(settings: robot.parsing.lexer.settings.TestCaseSettings)[source]

Bases: robot.parsing.lexer.context.LexingContext

template_set
lex_setting(statement: List[Token])
class robot.parsing.lexer.context.KeywordContext(settings: robot.parsing.lexer.settings.KeywordSettings)[source]

Bases: robot.parsing.lexer.context.LexingContext

template_set
lex_setting(statement: List[Token])

robot.parsing.lexer.lexer module

robot.parsing.lexer.lexer.get_tokens(source: Union[pathlib.Path, str, TextIO], data_only: bool = False, tokenize_variables: bool = False, lang: Union[Languages, Language, str, pathlib.Path, Iterable[Union[Language, str, pathlib.Path]], None] = None) → collections.abc.Iterator[robot.parsing.lexer.tokens.Token][source]

Parses the given source to tokens.

Parameters:
  • source – The source where to read the data. Can be a path to a source file as a string or as pathlib.Path object, an already opened file object, or Unicode text containing the date directly. Source files must be UTF-8 encoded.
  • data_only – When False (default), returns all tokens. When set to True, omits separators, comments, continuation markers, and other non-data tokens.
  • tokenize_variables – When True, possible variables in keyword arguments and elsewhere are tokenized. See the tokenize_variables() method for details.
  • lang – Additional languages to be supported during parsing. Can be a string matching any of the supported language codes or names, an initialized Language subclass, a list containing such strings or instances, or a Languages instance.

Returns a generator that yields Token instances.

robot.parsing.lexer.lexer.get_resource_tokens(source: Union[pathlib.Path, str, TextIO], data_only: bool = False, tokenize_variables: bool = False, lang: Union[Languages, Language, str, pathlib.Path, Iterable[Union[Language, str, pathlib.Path]], None] = None) → collections.abc.Iterator[robot.parsing.lexer.tokens.Token][source]

Parses the given source to resource file tokens.

Same as get_tokens() otherwise, but the source is considered to be a resource file. This affects, for example, what settings are valid.

robot.parsing.lexer.lexer.get_init_tokens(source: Union[pathlib.Path, str, TextIO], data_only: bool = False, tokenize_variables: bool = False, lang: Union[Languages, Language, str, pathlib.Path, Iterable[Union[Language, str, pathlib.Path]], None] = None) → collections.abc.Iterator[robot.parsing.lexer.tokens.Token][source]

Parses the given source to init file tokens.

Same as get_tokens() otherwise, but the source is considered to be a suite initialization file. This affects, for example, what settings are valid.

class robot.parsing.lexer.lexer.Lexer(ctx: robot.parsing.lexer.context.LexingContext, data_only: bool = False, tokenize_variables: bool = False)[source]

Bases: object

input(source: Union[pathlib.Path, str, TextIO])[source]
get_tokens() → collections.abc.Iterator[robot.parsing.lexer.tokens.Token][source]

robot.parsing.lexer.settings module

class robot.parsing.lexer.settings.Settings(languages: robot.conf.languages.Languages)[source]

Bases: abc.ABC

names = ()
aliases = {}
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template', 'Name')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
lex(statement: List[Token])[source]
class robot.parsing.lexer.settings.FileSettings(languages: robot.conf.languages.Languages)[source]

Bases: robot.parsing.lexer.settings.Settings, abc.ABC

aliases = {}
lex(statement: List[Token])
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
names = ()
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template', 'Name')
class robot.parsing.lexer.settings.SuiteFileSettings(languages: robot.conf.languages.Languages)[source]

Bases: robot.parsing.lexer.settings.FileSettings

names = ('Documentation', 'Metadata', 'Name', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Test Timeout', 'Test Tags', 'Default Tags', 'Keyword Tags', 'Library', 'Resource', 'Variables')
aliases = {'Force Tags': 'Test Tags', 'Task Setup': 'Test Setup', 'Task Tags': 'Test Tags', 'Task Teardown': 'Test Teardown', 'Task Template': 'Test Template', 'Task Timeout': 'Test Timeout'}
lex(statement: List[Token])
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template', 'Name')
class robot.parsing.lexer.settings.InitFileSettings(languages: robot.conf.languages.Languages)[source]

Bases: robot.parsing.lexer.settings.FileSettings

names = ('Documentation', 'Metadata', 'Name', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Timeout', 'Test Tags', 'Keyword Tags', 'Library', 'Resource', 'Variables')
aliases = {'Force Tags': 'Test Tags', 'Task Setup': 'Test Setup', 'Task Tags': 'Test Tags', 'Task Teardown': 'Test Teardown', 'Task Timeout': 'Test Timeout'}
lex(statement: List[Token])
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template', 'Name')
class robot.parsing.lexer.settings.ResourceFileSettings(languages: robot.conf.languages.Languages)[source]

Bases: robot.parsing.lexer.settings.FileSettings

names = ('Documentation', 'Keyword Tags', 'Library', 'Resource', 'Variables')
aliases = {}
lex(statement: List[Token])
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template', 'Name')
class robot.parsing.lexer.settings.TestCaseSettings(parent: robot.parsing.lexer.settings.SuiteFileSettings)[source]

Bases: robot.parsing.lexer.settings.Settings

names = ('Documentation', 'Tags', 'Setup', 'Teardown', 'Template', 'Timeout')
template_set
aliases = {}
lex(statement: List[Token])
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template', 'Name')
class robot.parsing.lexer.settings.KeywordSettings(parent: robot.parsing.lexer.settings.FileSettings)[source]

Bases: robot.parsing.lexer.settings.Settings

names = ('Documentation', 'Arguments', 'Teardown', 'Timeout', 'Tags', 'Return')
aliases = {}
lex(statement: List[Token])
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template', 'Name')

robot.parsing.lexer.statementlexers module

class robot.parsing.lexer.statementlexers.Lexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: abc.ABC

handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool[source]
input(statement: List[Token])[source]
lex()[source]
class robot.parsing.lexer.statementlexers.StatementLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.Lexer, abc.ABC

accepts_more(statement: List[Token]) → bool[source]
input(statement: List[Token])[source]
lex()[source]
handles(statement: List[Token]) → bool
class robot.parsing.lexer.statementlexers.SingleType(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer, abc.ABC

lex()[source]
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.TypeAndArguments(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer, abc.ABC

lex()[source]
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.SectionHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.SingleType, abc.ABC

handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.SettingSectionHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'SETTING HEADER'
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.VariableSectionHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'VARIABLE HEADER'
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.TestCaseSectionHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'TESTCASE HEADER'
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.TaskSectionHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'TASK HEADER'
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.KeywordSectionHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'KEYWORD HEADER'
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.CommentSectionHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'COMMENT HEADER'
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.InvalidSectionHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.SectionHeaderLexer

token_type = 'INVALID HEADER'
lex()[source]
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.CommentLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.SingleType

token_type = 'COMMENT'
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.ImplicitCommentLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.CommentLexer

input(statement: List[Token])[source]
lex()[source]
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
token_type = 'COMMENT'
class robot.parsing.lexer.statementlexers.SettingLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

lex()[source]
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.TestCaseSettingLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

lex()[source]
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.KeywordSettingLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

lex()[source]
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.VariableLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'VARIABLE'
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.KeywordCallLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

lex()[source]
accepts_more(statement: List[Token]) → bool
handles(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.ForHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

separators = ('IN', 'IN RANGE', 'IN ENUMERATE', 'IN ZIP')
handles(statement: List[Token]) → bool[source]
lex()[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.IfHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'IF'
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.InlineIfHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

token_type = 'INLINE IF'
handles(statement: List[Token]) → bool[source]
lex()[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.ElseIfHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'ELSE IF'
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.ElseHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'ELSE'
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.TryHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'TRY'
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.ExceptHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

token_type = 'EXCEPT'
handles(statement: List[Token]) → bool[source]
lex()[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.FinallyHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'FINALLY'
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.WhileHeaderLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.StatementLexer

token_type = 'WHILE'
handles(statement: List[Token]) → bool[source]
lex()[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
class robot.parsing.lexer.statementlexers.EndLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'END'
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.ReturnLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'RETURN STATEMENT'
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.ContinueLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'CONTINUE'
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.BreakLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'BREAK'
handles(statement: List[Token]) → bool[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])
lex()
class robot.parsing.lexer.statementlexers.SyntaxErrorLexer(ctx: robot.parsing.lexer.context.LexingContext)[source]

Bases: robot.parsing.lexer.statementlexers.TypeAndArguments

token_type = 'ERROR'
handles(statement: List[Token]) → bool[source]
lex()[source]
accepts_more(statement: List[Token]) → bool
input(statement: List[Token])

robot.parsing.lexer.tokenizer module

class robot.parsing.lexer.tokenizer.Tokenizer[source]

Bases: object

tokenize(data: str, data_only: bool = False) → collections.abc.Iterator[list][source]

robot.parsing.lexer.tokens module

class robot.parsing.lexer.tokens.Token(type: str | None[str, None] = None, value: str | None[str, None] = None, lineno: int = -1, col_offset: int = -1, error: str | None[str, None] = None)[source]

Bases: object

Token representing piece of Robot Framework data.

Each token has type, value, line number, column offset and end column offset in type, value, lineno, col_offset and end_col_offset attributes, respectively. Tokens representing error also have their error message in error attribute.

Token types are declared as class attributes such as SETTING_HEADER and EOL. Values of these constants have changed slightly in Robot Framework 4.0, and they may change again in the future. It is thus safer to use the constants, not their values, when types are needed. For example, use Token(Token.EOL) instead of Token('EOL') and token.type == Token.EOL instead of token.type == 'EOL'.

If value is not given and type is a special marker like IF or :attr:`EOL, the value is set automatically.

SETTING_HEADER = 'SETTING HEADER'
VARIABLE_HEADER = 'VARIABLE HEADER'
TESTCASE_HEADER = 'TESTCASE HEADER'
TASK_HEADER = 'TASK HEADER'
KEYWORD_HEADER = 'KEYWORD HEADER'
COMMENT_HEADER = 'COMMENT HEADER'
INVALID_HEADER = 'INVALID HEADER'
FATAL_INVALID_HEADER = 'FATAL INVALID HEADER'
TESTCASE_NAME = 'TESTCASE NAME'
KEYWORD_NAME = 'KEYWORD NAME'
SUITE_NAME = 'SUITE NAME'
DOCUMENTATION = 'DOCUMENTATION'
SUITE_SETUP = 'SUITE SETUP'
SUITE_TEARDOWN = 'SUITE TEARDOWN'
METADATA = 'METADATA'
TEST_SETUP = 'TEST SETUP'
TEST_TEARDOWN = 'TEST TEARDOWN'
TEST_TEMPLATE = 'TEST TEMPLATE'
TEST_TIMEOUT = 'TEST TIMEOUT'
FORCE_TAGS = 'FORCE TAGS'
DEFAULT_TAGS = 'DEFAULT TAGS'
KEYWORD_TAGS = 'KEYWORD TAGS'
LIBRARY = 'LIBRARY'
RESOURCE = 'RESOURCE'
VARIABLES = 'VARIABLES'
SETUP = 'SETUP'
TEARDOWN = 'TEARDOWN'
TEMPLATE = 'TEMPLATE'
TIMEOUT = 'TIMEOUT'
TAGS = 'TAGS'
ARGUMENTS = 'ARGUMENTS'
RETURN = 'RETURN'
RETURN_SETTING = 'RETURN'
WITH_NAME = 'WITH NAME'
AS = 'AS'
NAME = 'NAME'
VARIABLE = 'VARIABLE'
ARGUMENT = 'ARGUMENT'
ASSIGN = 'ASSIGN'
KEYWORD = 'KEYWORD'
FOR = 'FOR'
FOR_SEPARATOR = 'FOR SEPARATOR'
END = 'END'
IF = 'IF'
INLINE_IF = 'INLINE IF'
ELSE_IF = 'ELSE IF'
ELSE = 'ELSE'
TRY = 'TRY'
EXCEPT = 'EXCEPT'
FINALLY = 'FINALLY'
WHILE = 'WHILE'
RETURN_STATEMENT = 'RETURN STATEMENT'
CONTINUE = 'CONTINUE'
BREAK = 'BREAK'
OPTION = 'OPTION'
SEPARATOR = 'SEPARATOR'
COMMENT = 'COMMENT'
CONTINUATION = 'CONTINUATION'
CONFIG = 'CONFIG'
EOL = 'EOL'
EOS = 'EOS'
ERROR = 'ERROR'
FATAL_ERROR = 'FATAL ERROR'
NON_DATA_TOKENS = frozenset({'SEPARATOR', 'EOS', 'COMMENT', 'CONTINUATION', 'EOL'})
SETTING_TOKENS = frozenset({'FORCE TAGS', 'TEST TEARDOWN', 'TEST TEMPLATE', 'SUITE TEARDOWN', 'SUITE NAME', 'TEST SETUP', 'TEARDOWN', 'DEFAULT TAGS', 'KEYWORD TAGS', 'ARGUMENTS', 'SETUP', 'RESOURCE', 'TEMPLATE', 'LIBRARY', 'TEST TIMEOUT', 'TIMEOUT', 'VARIABLES', 'DOCUMENTATION', 'RETURN', 'SUITE SETUP', 'METADATA', 'TAGS'})
HEADER_TOKENS = frozenset({'SETTING HEADER', 'COMMENT HEADER', 'INVALID HEADER', 'KEYWORD HEADER', 'VARIABLE HEADER', 'TESTCASE HEADER', 'TASK HEADER'})
ALLOW_VARIABLES = frozenset({'NAME', 'ARGUMENT', 'KEYWORD NAME', 'TESTCASE NAME'})
type
value
lineno
col_offset
error
end_col_offset
set_error(error: str)[source]
tokenize_variables() → collections.abc.Iterator[robot.parsing.lexer.tokens.Token][source]

Tokenizes possible variables in token value.

Yields the token itself if the token does not allow variables (see Token.ALLOW_VARIABLES) or its value does not contain variables. Otherwise, yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.

class robot.parsing.lexer.tokens.EOS(lineno: int = -1, col_offset: int = -1)[source]

Bases: robot.parsing.lexer.tokens.Token

Token representing end of a statement.

classmethod from_token(token: robot.parsing.lexer.tokens.Token, before: bool = False) → robot.parsing.lexer.tokens.EOS[source]
ALLOW_VARIABLES = frozenset({'NAME', 'ARGUMENT', 'KEYWORD NAME', 'TESTCASE NAME'})
ARGUMENT = 'ARGUMENT'
ARGUMENTS = 'ARGUMENTS'
AS = 'AS'
ASSIGN = 'ASSIGN'
BREAK = 'BREAK'
COMMENT = 'COMMENT'
COMMENT_HEADER = 'COMMENT HEADER'
CONFIG = 'CONFIG'
CONTINUATION = 'CONTINUATION'
CONTINUE = 'CONTINUE'
DEFAULT_TAGS = 'DEFAULT TAGS'
DOCUMENTATION = 'DOCUMENTATION'
ELSE = 'ELSE'
ELSE_IF = 'ELSE IF'
END = 'END'
EOL = 'EOL'
EOS = 'EOS'
ERROR = 'ERROR'
EXCEPT = 'EXCEPT'
FATAL_ERROR = 'FATAL ERROR'
FATAL_INVALID_HEADER = 'FATAL INVALID HEADER'
FINALLY = 'FINALLY'
FOR = 'FOR'
FORCE_TAGS = 'FORCE TAGS'
FOR_SEPARATOR = 'FOR SEPARATOR'
HEADER_TOKENS = frozenset({'SETTING HEADER', 'COMMENT HEADER', 'INVALID HEADER', 'KEYWORD HEADER', 'VARIABLE HEADER', 'TESTCASE HEADER', 'TASK HEADER'})
IF = 'IF'
INLINE_IF = 'INLINE IF'
INVALID_HEADER = 'INVALID HEADER'
KEYWORD = 'KEYWORD'
KEYWORD_HEADER = 'KEYWORD HEADER'
KEYWORD_NAME = 'KEYWORD NAME'
KEYWORD_TAGS = 'KEYWORD TAGS'
LIBRARY = 'LIBRARY'
METADATA = 'METADATA'
NAME = 'NAME'
NON_DATA_TOKENS = frozenset({'SEPARATOR', 'EOS', 'COMMENT', 'CONTINUATION', 'EOL'})
OPTION = 'OPTION'
RESOURCE = 'RESOURCE'
RETURN = 'RETURN'
RETURN_SETTING = 'RETURN'
RETURN_STATEMENT = 'RETURN STATEMENT'
SEPARATOR = 'SEPARATOR'
SETTING_HEADER = 'SETTING HEADER'
SETTING_TOKENS = frozenset({'FORCE TAGS', 'TEST TEARDOWN', 'TEST TEMPLATE', 'SUITE TEARDOWN', 'SUITE NAME', 'TEST SETUP', 'TEARDOWN', 'DEFAULT TAGS', 'KEYWORD TAGS', 'ARGUMENTS', 'SETUP', 'RESOURCE', 'TEMPLATE', 'LIBRARY', 'TEST TIMEOUT', 'TIMEOUT', 'VARIABLES', 'DOCUMENTATION', 'RETURN', 'SUITE SETUP', 'METADATA', 'TAGS'})
SETUP = 'SETUP'
SUITE_NAME = 'SUITE NAME'
SUITE_SETUP = 'SUITE SETUP'
SUITE_TEARDOWN = 'SUITE TEARDOWN'
TAGS = 'TAGS'
TASK_HEADER = 'TASK HEADER'
TEARDOWN = 'TEARDOWN'
TEMPLATE = 'TEMPLATE'
TESTCASE_HEADER = 'TESTCASE HEADER'
TESTCASE_NAME = 'TESTCASE NAME'
TEST_SETUP = 'TEST SETUP'
TEST_TEARDOWN = 'TEST TEARDOWN'
TEST_TEMPLATE = 'TEST TEMPLATE'
TEST_TIMEOUT = 'TEST TIMEOUT'
TIMEOUT = 'TIMEOUT'
TRY = 'TRY'
VARIABLE = 'VARIABLE'
VARIABLES = 'VARIABLES'
VARIABLE_HEADER = 'VARIABLE HEADER'
WHILE = 'WHILE'
WITH_NAME = 'WITH NAME'
col_offset
end_col_offset
error
lineno
set_error(error: str)
tokenize_variables() → collections.abc.Iterator[robot.parsing.lexer.tokens.Token]

Tokenizes possible variables in token value.

Yields the token itself if the token does not allow variables (see Token.ALLOW_VARIABLES) or its value does not contain variables. Otherwise, yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.

type
value
class robot.parsing.lexer.tokens.END(lineno: int = -1, col_offset: int = -1, virtual: bool = False)[source]

Bases: robot.parsing.lexer.tokens.Token

Token representing END token used to signify block ending.

Virtual END tokens have ‘’ as their value, with “real” END tokens the value is ‘END’.

classmethod from_token(token: robot.parsing.lexer.tokens.Token, virtual: bool = False) → robot.parsing.lexer.tokens.END[source]
ALLOW_VARIABLES = frozenset({'NAME', 'ARGUMENT', 'KEYWORD NAME', 'TESTCASE NAME'})
ARGUMENT = 'ARGUMENT'
ARGUMENTS = 'ARGUMENTS'
AS = 'AS'
ASSIGN = 'ASSIGN'
BREAK = 'BREAK'
COMMENT = 'COMMENT'
COMMENT_HEADER = 'COMMENT HEADER'
CONFIG = 'CONFIG'
CONTINUATION = 'CONTINUATION'
CONTINUE = 'CONTINUE'
DEFAULT_TAGS = 'DEFAULT TAGS'
DOCUMENTATION = 'DOCUMENTATION'
ELSE = 'ELSE'
ELSE_IF = 'ELSE IF'
END = 'END'
EOL = 'EOL'
EOS = 'EOS'
ERROR = 'ERROR'
EXCEPT = 'EXCEPT'
FATAL_ERROR = 'FATAL ERROR'
FATAL_INVALID_HEADER = 'FATAL INVALID HEADER'
FINALLY = 'FINALLY'
FOR = 'FOR'
FORCE_TAGS = 'FORCE TAGS'
FOR_SEPARATOR = 'FOR SEPARATOR'
HEADER_TOKENS = frozenset({'SETTING HEADER', 'COMMENT HEADER', 'INVALID HEADER', 'KEYWORD HEADER', 'VARIABLE HEADER', 'TESTCASE HEADER', 'TASK HEADER'})
IF = 'IF'
INLINE_IF = 'INLINE IF'
INVALID_HEADER = 'INVALID HEADER'
KEYWORD = 'KEYWORD'
KEYWORD_HEADER = 'KEYWORD HEADER'
KEYWORD_NAME = 'KEYWORD NAME'
KEYWORD_TAGS = 'KEYWORD TAGS'
LIBRARY = 'LIBRARY'
METADATA = 'METADATA'
NAME = 'NAME'
NON_DATA_TOKENS = frozenset({'SEPARATOR', 'EOS', 'COMMENT', 'CONTINUATION', 'EOL'})
OPTION = 'OPTION'
RESOURCE = 'RESOURCE'
RETURN = 'RETURN'
RETURN_SETTING = 'RETURN'
RETURN_STATEMENT = 'RETURN STATEMENT'
SEPARATOR = 'SEPARATOR'
SETTING_HEADER = 'SETTING HEADER'
SETTING_TOKENS = frozenset({'FORCE TAGS', 'TEST TEARDOWN', 'TEST TEMPLATE', 'SUITE TEARDOWN', 'SUITE NAME', 'TEST SETUP', 'TEARDOWN', 'DEFAULT TAGS', 'KEYWORD TAGS', 'ARGUMENTS', 'SETUP', 'RESOURCE', 'TEMPLATE', 'LIBRARY', 'TEST TIMEOUT', 'TIMEOUT', 'VARIABLES', 'DOCUMENTATION', 'RETURN', 'SUITE SETUP', 'METADATA', 'TAGS'})
SETUP = 'SETUP'
SUITE_NAME = 'SUITE NAME'
SUITE_SETUP = 'SUITE SETUP'
SUITE_TEARDOWN = 'SUITE TEARDOWN'
TAGS = 'TAGS'
TASK_HEADER = 'TASK HEADER'
TEARDOWN = 'TEARDOWN'
TEMPLATE = 'TEMPLATE'
TESTCASE_HEADER = 'TESTCASE HEADER'
TESTCASE_NAME = 'TESTCASE NAME'
TEST_SETUP = 'TEST SETUP'
TEST_TEARDOWN = 'TEST TEARDOWN'
TEST_TEMPLATE = 'TEST TEMPLATE'
TEST_TIMEOUT = 'TEST TIMEOUT'
TIMEOUT = 'TIMEOUT'
TRY = 'TRY'
VARIABLE = 'VARIABLE'
VARIABLES = 'VARIABLES'
VARIABLE_HEADER = 'VARIABLE HEADER'
WHILE = 'WHILE'
WITH_NAME = 'WITH NAME'
col_offset
end_col_offset
error
lineno
set_error(error: str)
tokenize_variables() → collections.abc.Iterator[robot.parsing.lexer.tokens.Token]

Tokenizes possible variables in token value.

Yields the token itself if the token does not allow variables (see Token.ALLOW_VARIABLES) or its value does not contain variables. Otherwise, yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.

type
value