cpython/Parser/lexer/lexer.c

#include "Python.h"
#include "pycore_token.h"
#include "pycore_unicodeobject.h"
#include "errcode.h"

#include "state.h"
#include "../tokenizer/helpers.h"

/* Alternate tab spacing */
#define ALTTABSIZE

#define is_potential_identifier_start(c)

#define is_potential_identifier_char(c)

#ifdef Py_DEBUG
static inline tokenizer_mode* TOK_GET_MODE(struct tok_state* tok) {
    assert(tok->tok_mode_stack_index >= 0);
    assert(tok->tok_mode_stack_index < MAXFSTRINGLEVEL);
    return &(tok->tok_mode_stack[tok->tok_mode_stack_index]);
}
static inline tokenizer_mode* TOK_NEXT_MODE(struct tok_state* tok) {
    assert(tok->tok_mode_stack_index >= 0);
    assert(tok->tok_mode_stack_index + 1 < MAXFSTRINGLEVEL);
    return &(tok->tok_mode_stack[++tok->tok_mode_stack_index]);
}
#else
#define TOK_GET_MODE(tok)
#define TOK_NEXT_MODE(tok)
#endif

#define MAKE_TOKEN(token_type)
#define MAKE_TYPE_COMMENT_TOKEN(token_type, col_offset, end_col_offset)

/* Spaces in this constant are treated as "zero or more spaces or tabs" when
   tokenizing. */
static const char* type_comment_prefix =;

static inline int
contains_null_bytes(const char* str, size_t size)
{}

/* Get next char, updating state; error code goes into tok->done */
static int
tok_nextc(struct tok_state *tok)
{}

/* Back-up one character */
static void
tok_backup(struct tok_state *tok, int c)
{}

static int
set_fstring_expr(struct tok_state* tok, struct token *token, char c) {}

int
_PyLexer_update_fstring_expr(struct tok_state *tok, char cur)
{}

static int
lookahead(struct tok_state *tok, const char *test)
{}

static int
verify_end_of_number(struct tok_state *tok, int c, const char *kind) {}

/* Verify that the identifier follows PEP 3131.
   All identifier strings are guaranteed to be "ready" unicode objects.
 */
static int
verify_identifier(struct tok_state *tok)
{}

static int
tok_decimal_tail(struct tok_state *tok)
{}

static inline int
tok_continuation_line(struct tok_state *tok) {}

static int
tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
{}

static int
tok_get_fstring_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
{}

static int
tok_get(struct tok_state *tok, struct token *token)
{}

int
_PyTokenizer_Get(struct tok_state *tok, struct token *token)
{}