go/src/cmd/internal/obj/x86/asm6.go

var plan9privates

const loopAlign

const maxLoopPad

const branchBackwards

const branchShort

const branchLoopHead

type opBytes

type Optab

type movtab

const Yxxx

const Ynone

const Yi0

const Yi1

const Yu2

const Yi8

const Yu8

const Yu7

const Ys32

const Yi32

const Yi64

const Yiauto

const Yal

const Ycl

const Yax

const Ycx

const Yrb

const Yrl

const Yrl32

const Yrf

const Yf0

const Yrx

const Ymb

const Yml

const Ym

const Ybr

const Ycs

const Yss

const Yds

const Yes

const Yfs

const Ygs

const Ygdtr

const Yidtr

const Yldtr

const Ymsw

const Ytask

const Ycr0

const Ycr1

const Ycr2

const Ycr3

const Ycr4

const Ycr5

const Ycr6

const Ycr7

const Ycr8

const Ydr0

const Ydr1

const Ydr2

const Ydr3

const Ydr4

const Ydr5

const Ydr6

const Ydr7

const Ytr0

const Ytr1

const Ytr2

const Ytr3

const Ytr4

const Ytr5

const Ytr6

const Ytr7

const Ymr

const Ymm

const Yxr0

const YxrEvexMulti4

const Yxr

const YxrEvex

const Yxm

const YxmEvex

const Yxvm

const YxvmEvex

const YyrEvexMulti4

const Yyr

const YyrEvex

const Yym

const YymEvex

const Yyvm

const YyvmEvex

const YzrMulti4

const Yzr

const Yzm

const Yzvm

const Yk0

const Yknot0

const Yk

const Ykm

const Ytls

const Ytextsize

const Yindir

const Ymax

const Zxxx

const Zlit

const Zlitm_r

const Zlitr_m

const Zlit_m_r

const Z_rp

const Zbr

const Zcall

const Zcallcon

const Zcallduff

const Zcallind

const Zcallindreg

const Zib_

const Zib_rp

const Zibo_m

const Zibo_m_xm

const Zil_

const Zil_rp

const Ziq_rp

const Zilo_m

const Zjmp

const Zjmpcon

const Zloop

const Zo_iw

const Zm_o

const Zm_r

const Z_m_r

const Zm2_r

const Zm_r_xm

const Zm_r_i_xm

const Zm_r_xm_nr

const Zr_m_xm_nr

const Zibm_r

const Zibr_m

const Zmb_r

const Zaut_r

const Zo_m

const Zo_m64

const Zpseudo

const Zr_m

const Zr_m_xm

const Zrp_

const Z_ib

const Z_il

const Zm_ibo

const Zm_ilo

const Zib_rr

const Zil_rr

const Zbyte

const Zvex_rm_v_r

const Zvex_rm_v_ro

const Zvex_r_v_rm

const Zvex_i_rm_vo

const Zvex_v_rm_r

const Zvex_i_rm_r

const Zvex_i_r_v

const Zvex_i_rm_v_r

const Zvex

const Zvex_rm_r_vo

const Zvex_i_r_rm

const Zvex_hr_rm_v_r

const Zevex_first

const Zevex_i_r_k_rm

const Zevex_i_r_rm

const Zevex_i_rm_k_r

const Zevex_i_rm_k_vo

const Zevex_i_rm_r

const Zevex_i_rm_v_k_r

const Zevex_i_rm_v_r

const Zevex_i_rm_vo

const Zevex_k_rmo

const Zevex_r_k_rm

const Zevex_r_v_k_rm

const Zevex_r_v_rm

const Zevex_rm_k_r

const Zevex_rm_v_k_r

const Zevex_rm_v_r

const Zevex_last

const Zmax

const Px

const Px1

const P32

const Pe

const Pm

const Pq

const Pb

const Pf2

const Pf3

const Pef3

const Pq3

const Pq4

const Pq4w

const Pq5

const Pq5w

const Pfw

const Pw

const Pw8

const Py

const Py1

const Py3

const Pavx

const RxrEvex

const Rxw

const Rxr

const Rxx

const Rxb

const avxEscape

const vex66

const vexF3

const vexF2

const vexLZ

const vexLIG

const vex128

const vex256

const vexWIG

const vexW0

const vexW1

const vex0F

const vex0F38

const vex0F3A

var ycover

var reg

var regrex

var ynone

var ytext

var ynop

var yfuncdata

var ypcdata

var yxorb

var yaddl

var yincl

var yincq

var ycmpb

var ycmpl

var yshb

var yshl

var ytestl

var ymovb

var ybtl

var ymovw

var ymovl

var yret

var ymovq

var ymovbe

var ym_rl

var yrl_m

var ymb_rl

var yml_rl

var yrl_ml

var yml_mb

var yrb_mb

var yxchg

var ydivl

var ydivb

var yimul

var yimul3

var ybyte

var yin

var yint

var ypushl

var ypopl

var ywrfsbase

var yrdrand

var yclflush

var ybswap

var yscond

var yjcond

var yloop

var ycall

var yduff

var yjmp

var yfmvd

var yfmvdp

var yfmvf

var yfmvx

var yfmvp

var yfcmv

var yfadd

var yfxch

var ycompp

var ystsw

var ysvrs_mo

var ysvrs_om

var ymm

var yxm

var yxm_q4

var yxcvm1

var yxcvm2

var yxr

var yxr_ml

var ymr

var ymr_ml

var yxcmpi

var yxmov

var yxcvfl

var yxcvlf

var yxcvfq

var yxcvqf

var yps

var yxrrl

var ymrxr

var ymshuf

var ymshufb

var yxshuf

var yextrw

var yextr

var yinsrw

var yinsr

var ypsdq

var ymskb

var ycrc32l

var ycrc32b

var yprefetch

var yaes

var yxbegin

var yxabort

var ylddqu

var ypalignr

var ysha256rnds2

var yblendvpd

var ymmxmm0f38

var yextractps

var ysha1rnds4

var optab

var opindex

// useAbs reports whether s describes a symbol that must avoid pc-relative addressing.
// This happens on systems like Solaris that call .so functions instead of system calls.
// It does not seem to be necessary for any other systems. This is probably working
// around a Solaris-specific bug that should be fixed differently, but we don't know
// what that bug is. And this does fix it.
func useAbs(ctxt *obj.Link, s *obj.LSym) bool {}

var nop

// Native Client rejects the repeated 0x66 prefix.
// {0x66, 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
func fillnop(p []byte, n int) {}

func noppad(ctxt *obj.Link, s *obj.LSym, c int32, pad int32) int32 {}

func spadjop(ctxt *obj.Link, l, q obj.As) obj.As {}

// isJump returns whether p is a jump instruction.
// It is used to ensure that no standalone or macro-fused jump will straddle
// or end on a 32 byte boundary by inserting NOPs before the jumps.
func isJump(p *obj.Prog) bool {}

// lookForJCC returns the first real instruction starting from p, if that instruction is a conditional
// jump. Otherwise, nil is returned.
func lookForJCC(p *obj.Prog) *obj.Prog {}

// fusedJump determines whether p can be fused with a subsequent conditional jump instruction.
// If it can, we return true followed by the total size of the fused jump. If it can't, we return false.
// Macro fusion rules are derived from the Intel Optimization Manual (April 2019) section 3.4.2.2.
func fusedJump(p *obj.Prog) (bool, uint8) {}

type padJumpsCtx

func makePjcCtx(ctxt *obj.Link) padJumpsCtx {}

// padJump detects whether the instruction being assembled is a standalone or a macro-fused
// jump that needs to be padded. If it is, NOPs are inserted to ensure that the jump does
// not cross or end on a 32 byte boundary.
func (pjc padJumpsCtx) padJump(ctxt *obj.Link, s *obj.LSym, p *obj.Prog, c int32) int32 {}

// reAssemble is called if an instruction's size changes during assembly. If
// it does and the instruction is a standalone or a macro-fused jump we need to
// reassemble.
func (pjc padJumpsCtx) reAssemble(p *obj.Prog) bool {}

type nopPad

// requireAlignment ensures that the function alignment is at
// least as high as a, which should be a power of two
// and between 8 and 2048, inclusive.
//
// the boolean result indicates whether the alignment meets those constraints
func requireAlignment(a int64, ctxt *obj.Link, cursym *obj.LSym) bool {}

func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {}

func instinit(ctxt *obj.Link) {}

var isAndroid

func prefixof(ctxt *obj.Link, a *obj.Addr) int {}

// oclassRegList returns multisource operand class for addr.
func oclassRegList(ctxt *obj.Link, addr *obj.Addr) int {}

// oclassVMem returns V-mem (vector memory with VSIB) operand class.
// For addr that is not V-mem returns (Yxxx, false).
func oclassVMem(ctxt *obj.Link, addr *obj.Addr) (int, bool) {}

func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {}

type AsmBuf

// Put1 appends one byte to the end of the buffer.
func (ab *AsmBuf) Put1(x byte) {}

// Put2 appends two bytes to the end of the buffer.
func (ab *AsmBuf) Put2(x, y byte) {}

// Put3 appends three bytes to the end of the buffer.
func (ab *AsmBuf) Put3(x, y, z byte) {}

// Put4 appends four bytes to the end of the buffer.
func (ab *AsmBuf) Put4(x, y, z, w byte) {}

// PutInt16 writes v into the buffer using little-endian encoding.
func (ab *AsmBuf) PutInt16(v int16) {}

// PutInt32 writes v into the buffer using little-endian encoding.
func (ab *AsmBuf) PutInt32(v int32) {}

// PutInt64 writes v into the buffer using little-endian encoding.
func (ab *AsmBuf) PutInt64(v int64) {}

// Put copies b into the buffer.
func (ab *AsmBuf) Put(b []byte) {}

// PutOpBytesLit writes zero terminated sequence of bytes from op,
// starting at specified offset (e.g. z counter value).
// Trailing 0 is not written.
//
// Intended to be used for literal Z cases.
// Literal Z cases usually have "Zlit" in their name (Zlit, Zlitr_m, Zlitm_r).
func (ab *AsmBuf) PutOpBytesLit(offset int, op *opBytes) {}

// Insert inserts b at offset i.
func (ab *AsmBuf) Insert(i int, b byte) {}

// Last returns the byte at the end of the buffer.
func (ab *AsmBuf) Last() byte {}

// Len returns the length of the buffer.
func (ab *AsmBuf) Len() int {}

// Bytes returns the contents of the buffer.
func (ab *AsmBuf) Bytes() []byte {}

// Reset empties the buffer.
func (ab *AsmBuf) Reset() {}

// At returns the byte at offset i.
func (ab *AsmBuf) At(i int) byte {}

// asmidx emits SIB byte.
func (ab *AsmBuf) asmidx(ctxt *obj.Link, scale int, index int, base int) {}

func (ab *AsmBuf) relput4(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, a *obj.Addr) {}

func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 {}

func (ab *AsmBuf) asmandsz(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int) {}

func (ab *AsmBuf) asmand(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, a *obj.Addr, ra *obj.Addr) {}

func (ab *AsmBuf) asmando(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, a *obj.Addr, o int) {}

func bytereg(a *obj.Addr, t *uint8) {}

func unbytereg(a *obj.Addr, t *uint8) {}

const movLit

const movRegMem

const movMemReg

const movRegMem2op

const movMemReg2op

const movFullPtr

const movDoubleShift

const movTLSReg

var ymovtab

func isax(a *obj.Addr) bool {}

func subreg(p *obj.Prog, from int, to int) {}

func (ab *AsmBuf) mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int {}

var bpduff1

var bpduff2

// asmevex emits EVEX pregis and opcode byte.
// In addition to asmvex r/m, vvvv and reg fields also requires optional
// K-masking register.
//
// Expects asmbuf.evex to be properly initialized.
func (ab *AsmBuf) asmevex(ctxt *obj.Link, p *obj.Prog, rm, v, r, k *obj.Addr) {}

// Emit VEX prefix and opcode byte.
// The three addresses are the r/m, vvvv, and reg fields.
// The reg and rm arguments appear in the same order as the
// arguments to asmand, which typically follows the call to asmvex.
// The final two arguments are the VEX prefix (see encoding above)
// and the opcode byte.
// For details about vex prefix see:
// https://en.wikipedia.org/wiki/VEX_prefix#Technical_description
func (ab *AsmBuf) asmvex(ctxt *obj.Link, rm, v, r *obj.Addr, vex, opcode uint8) {}

// regIndex returns register index that fits in 5 bits.
//
//	R         : 3 bit | legacy instructions     | N/A
//	[R/V]EX.R : 1 bit | REX / VEX extension bit | Rxr
//	EVEX.R    : 1 bit | EVEX extension bit      | RxrEvex
//
// Examples:
//
//	REG_Z30 => 30
//	REG_X15 => 15
//	REG_R9  => 9
//	REG_AX  => 0
func regIndex(r int16) int {}

// avx2gatherValid reports whether p satisfies AVX2 gather constraints.
// Reports errors via ctxt.
func avx2gatherValid(ctxt *obj.Link, p *obj.Prog) bool {}

// avx512gatherValid reports whether p satisfies AVX512 gather constraints.
// Reports errors via ctxt.
func avx512gatherValid(ctxt *obj.Link, p *obj.Prog) bool {}

func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) {}

// byteswapreg returns a byte-addressable register (AX, BX, CX, DX)
// which is not referenced in a.
// If a is empty, it returns BX to account for MULB-like instructions
// that might use DX and AX.
func byteswapreg(ctxt *obj.Link, a *obj.Addr) int {}

func isbadbyte(a *obj.Addr) bool {}

func (ab *AsmBuf) asmins(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) {}

// unpackOps4 extracts 4 operands from p.
func unpackOps4(p *obj.Prog) (arg0, arg1, arg2, dst *obj.Addr) {}

// unpackOps5 extracts 5 operands from p.
func unpackOps5(p *obj.Prog) (arg0, arg1, arg2, arg3, dst *obj.Addr) {}