cpython/Python/flowgraph.c


#include <stdbool.h>

#include "Python.h"
#include "pycore_flowgraph.h"
#include "pycore_compile.h"
#include "pycore_pymem.h"         // _PyMem_IsPtrFreed()

#include "pycore_opcode_utils.h"
#include "pycore_opcode_metadata.h" // OPCODE_HAS_ARG, etc


#undef SUCCESS
#undef ERROR
#define SUCCESS
#define ERROR

#define RETURN_IF_ERROR(X)

#define DEFAULT_BLOCK_SIZE

location;
jump_target_label;

cfg_instr;

basicblock;


struct _PyCfgBuilder {};

cfg_builder;

#define SAME_LABEL(L1, L2)
#define IS_LABEL(L)

#define LOCATION(LNO, END_LNO, COL, END_COL)

static inline int
is_block_push(cfg_instr *i)
{}

static inline int
is_jump(cfg_instr *i)
{}

/* One arg*/
#define INSTR_SET_OP1(I, OP, ARG)

/* No args*/
#define INSTR_SET_OP0(I, OP)

/***** Blocks *****/

/* Returns the offset of the next instruction in the current block's
   b_instr array.  Resizes the b_instr as necessary.
   Returns -1 on failure.
*/
static int
basicblock_next_instr(basicblock *b)
{}

static cfg_instr *
basicblock_last_instr(const basicblock *b) {}

/* Allocate a new block and return a pointer to it.
   Returns NULL on error.
*/

static basicblock *
cfg_builder_new_block(cfg_builder *g)
{}

static int
basicblock_addop(basicblock *b, int opcode, int oparg, location loc)
{}

static int
basicblock_add_jump(basicblock *b, int opcode, basicblock *target, location loc)
{}

static inline int
basicblock_append_instructions(basicblock *to, basicblock *from)
{}

static inline int
basicblock_nofallthrough(const basicblock *b) {}

#define BB_NO_FALLTHROUGH(B)
#define BB_HAS_FALLTHROUGH(B)

static basicblock *
copy_basicblock(cfg_builder *g, basicblock *block)
{}

static int
basicblock_insert_instruction(basicblock *block, int pos, cfg_instr *instr) {}

/* For debugging purposes only */
#if 0
static void
dump_instr(cfg_instr *i)
{
    const char *jump = is_jump(i) ? "jump " : "";

    char arg[128];

    *arg = '\0';
    if (OPCODE_HAS_ARG(i->i_opcode)) {
        sprintf(arg, "arg: %d ", i->i_oparg);
    }
    if (HAS_TARGET(i->i_opcode)) {
        sprintf(arg, "target: %p [%d] ", i->i_target, i->i_oparg);
    }
    fprintf(stderr, "line: %d, %s (%d)  %s%s\n",
                    i->i_loc.lineno, _PyOpcode_OpName[i->i_opcode], i->i_opcode, arg, jump);
}

static inline int
basicblock_returns(const basicblock *b) {
    cfg_instr *last = basicblock_last_instr(b);
    return last && (last->i_opcode == RETURN_VALUE || last->i_opcode == RETURN_CONST);
}

static void
dump_basicblock(const basicblock *b)
{
    const char *b_return = basicblock_returns(b) ? "return " : "";
    fprintf(stderr, "%d: [EH=%d CLD=%d WRM=%d NO_FT=%d %p] used: %d, depth: %d, preds: %d %s\n",
        b->b_label.id, b->b_except_handler, b->b_cold, b->b_warm, BB_NO_FALLTHROUGH(b), b, b->b_iused,
        b->b_startdepth, b->b_predecessors, b_return);
    if (b->b_instr) {
        int i;
        for (i = 0; i < b->b_iused; i++) {
            fprintf(stderr, "  [%02d] ", i);
            dump_instr(b->b_instr + i);
        }
    }
}

void
_PyCfgBuilder_DumpGraph(const basicblock *entryblock)
{
    for (const basicblock *b = entryblock; b != NULL; b = b->b_next) {
        dump_basicblock(b);
    }
}

#endif


/***** CFG construction and modification *****/

static basicblock *
cfg_builder_use_next_block(cfg_builder *g, basicblock *block)
{}

static inline int
basicblock_exits_scope(const basicblock *b) {}

static inline int
basicblock_has_eval_break(const basicblock *b) {}

static bool
cfg_builder_current_block_is_terminated(cfg_builder *g)
{}

static int
cfg_builder_maybe_start_new_block(cfg_builder *g)
{}

#ifndef NDEBUG
static bool
cfg_builder_check(cfg_builder *g)
{
    assert(g->g_entryblock->b_iused > 0);
    for (basicblock *block = g->g_block_list; block != NULL; block = block->b_list) {
        assert(!_PyMem_IsPtrFreed(block));
        if (block->b_instr != NULL) {
            assert(block->b_ialloc > 0);
            assert(block->b_iused >= 0);
            assert(block->b_ialloc >= block->b_iused);
        }
        else {
            assert (block->b_iused == 0);
            assert (block->b_ialloc == 0);
        }
    }
    return true;
}
#endif

static int
init_cfg_builder(cfg_builder *g)
{}

cfg_builder *
_PyCfgBuilder_New(void)
{}

void
_PyCfgBuilder_Free(cfg_builder *g)
{}

int
_PyCfgBuilder_CheckSize(cfg_builder *g)
{}

int
_PyCfgBuilder_UseLabel(cfg_builder *g, jump_target_label lbl)
{}

int
_PyCfgBuilder_Addop(cfg_builder *g, int opcode, int oparg, location loc)
{}


static basicblock *
next_nonempty_block(basicblock *b)
{}

/***** debugging helpers *****/

#ifndef NDEBUG
static int remove_redundant_nops(cfg_builder *g);

static bool
no_redundant_nops(cfg_builder *g) {
    if (remove_redundant_nops(g) != 0) {
        return false;
    }
    return true;
}

static bool
no_redundant_jumps(cfg_builder *g) {
    for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
        cfg_instr *last = basicblock_last_instr(b);
        if (last != NULL) {
            if (IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)) {
                basicblock *next = next_nonempty_block(b->b_next);
                basicblock *jump_target = next_nonempty_block(last->i_target);
                if (jump_target == next) {
                    assert(next);
                    if (last->i_loc.lineno == next->b_instr[0].i_loc.lineno) {
                        assert(0);
                        return false;
                    }
                }
            }
        }
    }
    return true;
}

static bool
all_exits_have_lineno(basicblock *entryblock) {
    for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
        for (int i = 0; i < b->b_iused; i++) {
            cfg_instr *instr = &b->b_instr[i];
            if (instr->i_opcode == RETURN_VALUE) {
                if (instr->i_loc.lineno < 0) {
                    assert(0);
                    return false;
                }
            }
        }
    }
    return true;
}
#endif

/***** CFG preprocessing (jump targets and exceptions) *****/

static int
normalize_jumps_in_block(cfg_builder *g, basicblock *b) {}


static int
normalize_jumps(cfg_builder *g)
{}

static int
check_cfg(cfg_builder *g) {}

static int
get_max_label(basicblock *entryblock)
{}

/* Calculate the actual jump target from the target_label */
static int
translate_jump_labels_to_targets(basicblock *entryblock)
{}

static int
mark_except_handlers(basicblock *entryblock) {}


struct _PyCfgExceptStack {};


static basicblock *
push_except_block(struct _PyCfgExceptStack *stack, cfg_instr *setup) {}

static basicblock *
pop_except_block(struct _PyCfgExceptStack *stack) {}

static basicblock *
except_stack_top(struct _PyCfgExceptStack *stack) {}

static struct _PyCfgExceptStack *
make_except_stack(void) {}

static struct _PyCfgExceptStack *
copy_except_stack(struct _PyCfgExceptStack *stack) {}

static basicblock**
make_cfg_traversal_stack(basicblock *entryblock) {}

/* Return the stack effect of opcode with argument oparg.

   Some opcodes have different stack effect when jump to the target and
   when not jump. The 'jump' parameter specifies the case:

   * 0 -- when not jump
   * 1 -- when jump
   * -1 -- maximal
 */
Py_LOCAL(int)
stack_effect(int opcode, int oparg, int jump)
{}

Py_LOCAL_INLINE(int)
stackdepth_push(basicblock ***sp, basicblock *b, int depth)
{}

/* Find the flow path that needs the largest stack.  We assume that
 * cycles in the flow graph have no net effect on the stack depth.
 */
static int
calculate_stackdepth(cfg_builder *g)
{}

static int
label_exception_targets(basicblock *entryblock) {}

/***** CFG optimizations *****/

static int
remove_unreachable(basicblock *entryblock) {}

static int
basicblock_remove_redundant_nops(basicblock *bb) {}

static int
remove_redundant_nops(cfg_builder *g) {}

static int
remove_redundant_nops_and_pairs(basicblock *entryblock)
{}

static int
remove_redundant_jumps(cfg_builder *g) {}

static inline bool
basicblock_has_no_lineno(basicblock *b) {}

/* Maximum size of basic block that should be copied in optimizer */
#define MAX_COPY_SIZE

/* If this block ends with an unconditional jump to a small exit block or
 * a block that has no line numbers (and no fallthrough), then
 * remove the jump and extend this block with the target.
 * Returns 1 if extended, 0 if no change, and -1 on error.
 */
static int
basicblock_inline_small_or_no_lineno_blocks(basicblock *bb) {}

static int
inline_small_or_no_lineno_blocks(basicblock *entryblock) {}

// Attempt to eliminate jumps to jumps by updating inst to jump to
// target->i_target using the provided opcode. Return whether or not the
// optimization was successful.
static bool
jump_thread(basicblock *bb, cfg_instr *inst, cfg_instr *target, int opcode)
{}

static PyObject*
get_const_value(int opcode, int oparg, PyObject *co_consts)
{}

// Steals a reference to newconst.
static int
add_const(PyObject *newconst, PyObject *consts, PyObject *const_cache)
{}

/* Replace LOAD_CONST c1, LOAD_CONST c2 ... LOAD_CONST cn, BUILD_TUPLE n
   with    LOAD_CONST (c1, c2, ... cn).
   The consts table must still be in list form so that the
   new constant (c1, c2, ... cn) can be appended.
   Called with codestr pointing to the first LOAD_CONST.
*/
static int
fold_tuple_on_constants(PyObject *const_cache,
                        cfg_instr *inst,
                        int n, PyObject *consts)
{}

#define VISITED

// Replace an arbitrary run of SWAPs and NOPs with an optimal one that has the
// same effect.
static int
swaptimize(basicblock *block, int *ix)
{}


// This list is pretty small, since it's only okay to reorder opcodes that:
// - can't affect control flow (like jumping or raising exceptions)
// - can't invoke arbitrary code (besides finalizers)
// - only touch the TOS (and pop it when finished)
#define SWAPPABLE(opcode)

#define STORES_TO(instr)

static int
next_swappable_instruction(basicblock *block, int i, int lineno)
{}

// Attempt to apply SWAPs statically by swapping *instructions* rather than
// stack items. For example, we can replace SWAP(2), POP_TOP, STORE_FAST(42)
// with the more efficient NOP, STORE_FAST(42), POP_TOP.
static void
apply_static_swaps(basicblock *block, int i)
{}

static int
basicblock_optimize_load_const(PyObject *const_cache, basicblock *bb, PyObject *consts)
{}

static int
optimize_load_const(PyObject *const_cache, cfg_builder *g, PyObject *consts) {}

static int
optimize_basic_block(PyObject *const_cache, basicblock *bb, PyObject *consts)
{}

static int resolve_line_numbers(cfg_builder *g, int firstlineno);

static int
remove_redundant_nops_and_jumps(cfg_builder *g)
{}

/* Perform optimizations on a control flow graph.
   The consts object should still be in list form to allow new constants
   to be appended.

   Code trasnformations that reduce code size initially fill the gaps with
   NOPs.  Later those NOPs are removed.
*/
static int
optimize_cfg(cfg_builder *g, PyObject *consts, PyObject *const_cache, int firstlineno)
{}

static void
make_super_instruction(cfg_instr *inst1, cfg_instr *inst2, int super_op)
{}

static int
insert_superinstructions(cfg_builder *g)
{}

// helper functions for add_checks_for_loads_of_unknown_variables
static inline void
maybe_push(basicblock *b, uint64_t unsafe_mask, basicblock ***sp)
{}

static void
scan_block_for_locals(basicblock *b, basicblock ***sp)
{}

static int
fast_scan_many_locals(basicblock *entryblock, int nlocals)
{}

static int
remove_unused_consts(basicblock *entryblock, PyObject *consts)
{}



static int
add_checks_for_loads_of_uninitialized_variables(basicblock *entryblock,
                                                int nlocals,
                                                int nparams)
{}


static int
mark_warm(basicblock *entryblock) {}

static int
mark_cold(basicblock *entryblock) {}


static int
push_cold_blocks_to_end(cfg_builder *g) {}

static int
convert_pseudo_conditional_jumps(cfg_builder *g)
{}

static int
convert_pseudo_ops(cfg_builder *g)
{}

static inline bool
is_exit_or_eval_check_without_lineno(basicblock *b) {}


/* PEP 626 mandates that the f_lineno of a frame is correct
 * after a frame terminates. It would be prohibitively expensive
 * to continuously update the f_lineno field at runtime,
 * so we make sure that all exiting instruction (raises and returns)
 * have a valid line number, allowing us to compute f_lineno lazily.
 * We can do this by duplicating the exit blocks without line number
 * so that none have more than one predecessor. We can then safely
 * copy the line number from the sole predecessor block.
 */
static int
duplicate_exits_without_lineno(cfg_builder *g)
{}


/* If an instruction has no line number, but it's predecessor in the BB does,
 * then copy the line number. If a successor block has no line number, and only
 * one predecessor, then inherit the line number.
 * This ensures that all exit blocks (with one predecessor) receive a line number.
 * Also reduces the size of the line number table,
 * but has no impact on the generated line number events.
 */
static void
propagate_line_numbers(basicblock *entryblock) {}

static int
resolve_line_numbers(cfg_builder *g, int firstlineno)
{}

int
_PyCfg_OptimizeCodeUnit(cfg_builder *g, PyObject *consts, PyObject *const_cache,
                        int nlocals, int nparams, int firstlineno)
{}

static int *
build_cellfixedoffsets(_PyCompile_CodeUnitMetadata *umd)
{}

#define IS_GENERATOR(CF)

static int
insert_prefix_instructions(_PyCompile_CodeUnitMetadata *umd, basicblock *entryblock,
                           int *fixed, int nfreevars, int code_flags)
{}

static int
fix_cell_offsets(_PyCompile_CodeUnitMetadata *umd, basicblock *entryblock, int *fixedmap)
{}

static int
prepare_localsplus(_PyCompile_CodeUnitMetadata *umd, cfg_builder *g, int code_flags)
{}

cfg_builder *
_PyCfg_FromInstructionSequence(_PyInstructionSequence *seq)
{}

int
_PyCfg_ToInstructionSequence(cfg_builder *g, _PyInstructionSequence *seq)
{}


int
_PyCfg_OptimizedCfgToInstructionSequence(cfg_builder *g,
                                     _PyCompile_CodeUnitMetadata *umd, int code_flags,
                                     int *stackdepth, int *nlocalsplus,
                                     _PyInstructionSequence *seq)
{}

/* This is used by _PyCompile_Assemble to fill in the jump and exception
 * targets in a synthetic CFG (which is not the output of the builtin compiler).
 */
int
_PyCfg_JumpLabelsToTargets(cfg_builder *g)
{}

/* Exported API functions */

int
PyCompile_OpcodeStackEffectWithJump(int opcode, int oparg, int jump)
{}

int
PyCompile_OpcodeStackEffect(int opcode, int oparg)
{}

/* Access to compiler optimizations for unit tests.

 * _PyCompile_OptimizeCfg takes an instruction list, constructs
 * a CFG, optimizes it and converts back to an instruction list.
 */

static PyObject *
cfg_to_instruction_sequence(cfg_builder *g)
{}

PyObject *
_PyCompile_OptimizeCfg(PyObject *seq, PyObject *consts, int nlocals)
{}