kubernetes/vendor/github.com/antlr4-go/antlr/v4/lexer.go

type Lexer

type BaseLexer

func NewBaseLexer(input CharStream) *BaseLexer {}

const LexerDefaultMode

const LexerMore

const LexerSkip

const LexerDefaultTokenChannel

const LexerHidden

const LexerMinCharValue

const LexerMaxCharValue

func (b *BaseLexer) Reset() {}

func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {}

func (b *BaseLexer) GetInputStream() CharStream {}

func (b *BaseLexer) GetSourceName() string {}

func (b *BaseLexer) SetChannel(v int) {}

func (b *BaseLexer) GetTokenFactory() TokenFactory {}

func (b *BaseLexer) setTokenFactory(f TokenFactory) {}

func (b *BaseLexer) safeMatch() (ret int) {}

// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
func (b *BaseLexer) NextToken() Token {}

// Skip instructs the lexer to Skip creating a token for current lexer rule
// and look for another token. [NextToken] knows to keep looking when
// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
// if token==nil at end of any token rule, it creates one for you
// and emits it.
func (b *BaseLexer) Skip() {}

func (b *BaseLexer) More() {}

// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
// will be in force.
func (b *BaseLexer) SetMode(m int) {}

// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
// current lexer mode to the supplied mode m.
func (b *BaseLexer) PushMode(m int) {}

// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
// return to.
func (b *BaseLexer) PopMode() int {}

func (b *BaseLexer) inputStream() CharStream {}

// SetInputStream resets the lexer input stream and associated lexer state.
func (b *BaseLexer) SetInputStream(input CharStream) {}

func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {}

// EmitToken by default does not support multiple emits per [NextToken] invocation
// for efficiency reasons. Subclass and override this func, [NextToken],
// and [GetToken] (to push tokens into a list and pull from that list
// rather than a single variable as this implementation does).
func (b *BaseLexer) EmitToken(token Token) {}

// Emit is the standard method called to automatically emit a token at the
// outermost lexical rule. The token object should point into the
// char buffer start..stop. If there is a text override in 'text',
// use that to set the token's text. Override this method to emit
// custom [Token] objects or provide a new factory.
// /
func (b *BaseLexer) Emit() Token {}

// EmitEOF emits an EOF token. By default, this is the last token emitted
func (b *BaseLexer) EmitEOF() Token {}

// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
func (b *BaseLexer) GetCharPositionInLine() int {}

func (b *BaseLexer) GetLine() int {}

func (b *BaseLexer) GetType() int {}

func (b *BaseLexer) SetType(t int) {}

// GetCharIndex returns the index of the current character of lookahead
func (b *BaseLexer) GetCharIndex() int {}

// GetText returns the text Matched so far for the current token or any text override.
func (b *BaseLexer) GetText() string {}

// SetText sets the complete text of this token; it wipes any previous changes to the text.
func (b *BaseLexer) SetText(text string) {}

// GetATN returns the ATN used by the lexer.
func (b *BaseLexer) GetATN() *ATN {}

// GetAllTokens returns a list of all [Token] objects in input char stream.
// Forces a load of all tokens that can be made from the input char stream.
//
// Does not include EOF token.
func (b *BaseLexer) GetAllTokens() []Token {}

func (b *BaseLexer) notifyListeners(e RecognitionException) {}

func (b *BaseLexer) getErrorDisplayForChar(c rune) string {}

func (b *BaseLexer) getCharErrorDisplay(c rune) string {}

// Recover can normally Match any char in its vocabulary after Matching
// a token, so here we do the easy thing and just kill a character and hope
// it all works out. You can instead use the rule invocation stack
// to do sophisticated error recovery if you are in a fragment rule.
//
// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
// a character that makes no sense to the recognizer.
func (b *BaseLexer) Recover(re RecognitionException) {}