go/src/cmd/compile/internal/amd64/ssa.go

// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}

// loadByType returns the load instruction of the given type.
func loadByType(t *types.Type) obj.As {}

// storeByType returns the store instruction of the given type.
func storeByType(t *types.Type) obj.As {}

// moveByType returns the reg->reg move instruction of the given type.
func moveByType(t *types.Type) obj.As {}

// opregreg emits instructions for
//
//	dest := dest(To) op src(From)
//
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {}

// memIdx fills out a as an indexed memory reference for v.
// It assumes that the base register and the index register
// are v.Args[0].Reg() and v.Args[1].Reg(), respectively.
// The caller must still use gc.AddAux/gc.AddAux2 to handle v.Aux as necessary.
func memIdx(a *obj.Addr, v *ssa.Value) {}

// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
// See runtime/mkduff.go.
func duffStart(size int64) int64 {}

func duffAdj(size int64) int64 {}

// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
// required to use the duffzero mechanism for a block of the given size.
func duff(size int64) (int64, int64) {}

func getgFromTLS(s *ssagen.State, r int16) {}

func ssaGenValue(s *ssagen.State, v *ssa.Value) {}

var blockJump

var eqfJumps

var nefJumps

func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {}

func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {}

func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {}