kubernetes/vendor/github.com/mailru/easyjson/jlexer/lexer.go

type tokenKind

const tokenUndef

const tokenDelim

const tokenString

const tokenNumber

const tokenBool

const tokenNull

type token

type Lexer

// FetchToken scans the input for the next token.
func (r *Lexer) FetchToken() {}

// isTokenEnd returns true if the char can follow a non-delimiter token
func isTokenEnd(c byte) bool {}

// fetchNull fetches and checks remaining bytes of null keyword.
func (r *Lexer) fetchNull() {}

// fetchTrue fetches and checks remaining bytes of true keyword.
func (r *Lexer) fetchTrue() {}

// fetchFalse fetches and checks remaining bytes of false keyword.
func (r *Lexer) fetchFalse() {}

// fetchNumber scans a number literal token.
func (r *Lexer) fetchNumber() {}

// findStringLen tries to scan into the string literal for ending quote char to determine required size.
// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
func findStringLen(data []byte) (isValid bool, length int) {}

// unescapeStringToken performs unescaping of string token.
// if no escaping is needed, original string is returned, otherwise - a new one allocated
func (r *Lexer) unescapeStringToken() (err error) {}

// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
// or it returns -1.
func getu4(s []byte) rune {}

// decodeEscape processes a single escape sequence and returns number of bytes processed.
func decodeEscape(data []byte) (decoded rune, bytesProcessed int, err error) {}

// fetchString scans a string literal token.
func (r *Lexer) fetchString() {}

// scanToken scans the next token if no token is currently available in the lexer.
func (r *Lexer) scanToken() {}

// consume resets the current token to allow scanning the next one.
func (r *Lexer) consume() {}

// Ok returns true if no error (including io.EOF) was encountered during scanning.
func (r *Lexer) Ok() bool {}

const maxErrorContextLen

func (r *Lexer) errParse(what string) {}

func (r *Lexer) errSyntax() {}

func (r *Lexer) errInvalidToken(expected string) {}

func (r *Lexer) GetPos() int {}

// Delim consumes a token and verifies that it is the given delimiter.
func (r *Lexer) Delim(c byte) {}

// IsDelim returns true if there was no scanning error and next token is the given delimiter.
func (r *Lexer) IsDelim(c byte) bool {}

// Null verifies that the next token is null and consumes it.
func (r *Lexer) Null() {}

// IsNull returns true if the next token is a null keyword.
func (r *Lexer) IsNull() bool {}

// Skip skips a single token.
func (r *Lexer) Skip() {}

// SkipRecursive skips next array or object completely, or just skips a single token if not
// an array/object.
//
// Note: no syntax validation is performed on the skipped data.
func (r *Lexer) SkipRecursive() {}

// Raw fetches the next item recursively as a data slice
func (r *Lexer) Raw() []byte {}

// IsStart returns whether the lexer is positioned at the start
// of an input string.
func (r *Lexer) IsStart() bool {}

// Consumed reads all remaining bytes from the input, publishing an error if
// there is anything but whitespace remaining.
func (r *Lexer) Consumed() {}

func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) {}

// UnsafeString returns the string value if the token is a string literal.
//
// Warning: returned string may point to the input buffer, so the string should not outlive
// the input buffer. Intended pattern of usage is as an argument to a switch statement.
func (r *Lexer) UnsafeString() string {}

// UnsafeBytes returns the byte slice if the token is a string literal.
func (r *Lexer) UnsafeBytes() []byte {}

// UnsafeFieldName returns current member name string token
func (r *Lexer) UnsafeFieldName(skipUnescape bool) string {}

// String reads a string literal.
func (r *Lexer) String() string {}

// StringIntern reads a string literal, and performs string interning on it.
func (r *Lexer) StringIntern() string {}

// Bytes reads a string literal and base64 decodes it into a byte slice.
func (r *Lexer) Bytes() []byte {}

// Bool reads a true or false boolean keyword.
func (r *Lexer) Bool() bool {}

func (r *Lexer) number() string {}

func (r *Lexer) Uint8() uint8 {}

func (r *Lexer) Uint16() uint16 {}

func (r *Lexer) Uint32() uint32 {}

func (r *Lexer) Uint64() uint64 {}

func (r *Lexer) Uint() uint {}

func (r *Lexer) Int8() int8 {}

func (r *Lexer) Int16() int16 {}

func (r *Lexer) Int32() int32 {}

func (r *Lexer) Int64() int64 {}

func (r *Lexer) Int() int {}

func (r *Lexer) Uint8Str() uint8 {}

func (r *Lexer) Uint16Str() uint16 {}

func (r *Lexer) Uint32Str() uint32 {}

func (r *Lexer) Uint64Str() uint64 {}

func (r *Lexer) UintStr() uint {}

func (r *Lexer) UintptrStr() uintptr {}

func (r *Lexer) Int8Str() int8 {}

func (r *Lexer) Int16Str() int16 {}

func (r *Lexer) Int32Str() int32 {}

func (r *Lexer) Int64Str() int64 {}

func (r *Lexer) IntStr() int {}

func (r *Lexer) Float32() float32 {}

func (r *Lexer) Float32Str() float32 {}

func (r *Lexer) Float64() float64 {}

func (r *Lexer) Float64Str() float64 {}

func (r *Lexer) Error() error {}

func (r *Lexer) AddError(e error) {}

func (r *Lexer) AddNonFatalError(e error) {}

func (r *Lexer) addNonfatalError(err *LexerError) {}

func (r *Lexer) GetNonFatalErrors() []*LexerError {}

// JsonNumber fetches and json.Number from 'encoding/json' package.
// Both int, float or string, contains them are valid values
func (r *Lexer) JsonNumber() json.Number {}

// Interface fetches an interface{} analogous to the 'encoding/json' package.
func (r *Lexer) Interface() interface{}

// WantComma requires a comma to be present before fetching next token.
func (r *Lexer) WantComma() {}

// WantColon requires a colon to be present before fetching next token.
func (r *Lexer) WantColon() {}