go/src/cmd/compile/internal/ssa/rewrite.go

type deadValueChoice

const leaveDeadValues

const removeDeadValues

// deadcode indicates whether rewrite should try to remove any values that become dead.
func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValueChoice) {}

func is64BitFloat(t *types.Type) bool {}

func is32BitFloat(t *types.Type) bool {}

func is64BitInt(t *types.Type) bool {}

func is32BitInt(t *types.Type) bool {}

func is16BitInt(t *types.Type) bool {}

func is8BitInt(t *types.Type) bool {}

func isPtr(t *types.Type) bool {}

// mergeSym merges two symbolic offsets. There is no real merging of
// offsets, we just pick the non-nil one.
func mergeSym(x, y Sym) Sym {}

func canMergeSym(x, y Sym) bool {}

// canMergeLoadClobber reports whether the load can be merged into target without
// invalidating the schedule.
// It also checks that the other non-load argument x is something we
// are ok with clobbering.
func canMergeLoadClobber(target, load, x *Value) bool {}

// canMergeLoad reports whether the load can be merged into target without
// invalidating the schedule.
func canMergeLoad(target, load *Value) bool {}

// isSameCall reports whether sym is the same as the given named symbol.
func isSameCall(sym interface{}

// canLoadUnaligned reports if the architecture supports unaligned load operations.
func canLoadUnaligned(c *Config) bool {}

// nlzX returns the number of leading zeros.
func nlz64(x int64) int {}

func nlz32(x int32) int {}

func nlz16(x int16) int {}

func nlz8(x int8) int   {}

// ntzX returns the number of trailing zeros.
func ntz64(x int64) int {}

func ntz32(x int32) int {}

func ntz16(x int16) int {}

func ntz8(x int8) int   {}

func oneBit(x int64) bool   {}

func oneBit8(x int8) bool   {}

func oneBit16(x int16) bool {}

func oneBit32(x int32) bool {}

func oneBit64(x int64) bool {}

// nto returns the number of trailing ones.
func nto(x int64) int64 {}

// logX returns logarithm of n base 2.
// n must be a positive power of 2 (isPowerOfTwoX returns true).
func log8(n int8) int64 {}

func log16(n int16) int64 {}

func log32(n int32) int64 {}

func log64(n int64) int64 {}

// log2uint32 returns logarithm in base 2 of uint32(n), with log2(0) = -1.
// Rounds down.
func log2uint32(n int64) int64 {}

// isPowerOfTwoX functions report whether n is a power of 2.
func isPowerOfTwo[T int8 | int16 | int32 | int64](n T) bool {}

// isUint64PowerOfTwo reports whether uint64(n) is a power of 2.
func isUint64PowerOfTwo(in int64) bool {}

// isUint32PowerOfTwo reports whether uint32(n) is a power of 2.
func isUint32PowerOfTwo(in int64) bool {}

// is32Bit reports whether n can be represented as a signed 32 bit integer.
func is32Bit(n int64) bool {}

// is16Bit reports whether n can be represented as a signed 16 bit integer.
func is16Bit(n int64) bool {}

// is8Bit reports whether n can be represented as a signed 8 bit integer.
func is8Bit(n int64) bool {}

// isU8Bit reports whether n can be represented as an unsigned 8 bit integer.
func isU8Bit(n int64) bool {}

// isU12Bit reports whether n can be represented as an unsigned 12 bit integer.
func isU12Bit(n int64) bool {}

// isU16Bit reports whether n can be represented as an unsigned 16 bit integer.
func isU16Bit(n int64) bool {}

// isU32Bit reports whether n can be represented as an unsigned 32 bit integer.
func isU32Bit(n int64) bool {}

// is20Bit reports whether n can be represented as a signed 20 bit integer.
func is20Bit(n int64) bool {}

// b2i translates a boolean value to 0 or 1 for assigning to auxInt.
func b2i(b bool) int64 {}

// b2i32 translates a boolean value to 0 or 1.
func b2i32(b bool) int32 {}

// shiftIsBounded reports whether (left/right) shift Value v is known to be bounded.
// A shift is bounded if it is shifting by less than the width of the shifted value.
func shiftIsBounded(v *Value) bool {}

// canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing
// generated code as much as possible.
func canonLessThan(x, y *Value) bool {}

// truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern
// of the mantissa. It will panic if the truncation results in lost information.
func truncate64Fto32F(f float64) float32 {}

// extend32Fto64F converts a float32 value to a float64 value preserving the bit
// pattern of the mantissa.
func extend32Fto64F(f float32) float64 {}

// DivisionNeedsFixUp reports whether the division needs fix-up code.
func DivisionNeedsFixUp(v *Value) bool {}

// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
func auxFrom64F(f float64) int64 {}

// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
func auxFrom32F(f float32) int64 {}

// auxTo32F decodes a float32 from the AuxInt value provided.
func auxTo32F(i int64) float32 {}

// auxTo64F decodes a float64 from the AuxInt value provided.
func auxTo64F(i int64) float64 {}

func auxIntToBool(i int64) bool {}

func auxIntToInt8(i int64) int8 {}

func auxIntToInt16(i int64) int16 {}

func auxIntToInt32(i int64) int32 {}

func auxIntToInt64(i int64) int64 {}

func auxIntToUint8(i int64) uint8 {}

func auxIntToFloat32(i int64) float32 {}

func auxIntToFloat64(i int64) float64 {}

func auxIntToValAndOff(i int64) ValAndOff {}

func auxIntToArm64BitField(i int64) arm64BitField {}

func auxIntToInt128(x int64) int128 {}

func auxIntToFlagConstant(x int64) flagConstant {}

func auxIntToOp(cc int64) Op {}

func boolToAuxInt(b bool) int64 {}

func int8ToAuxInt(i int8) int64 {}

func int16ToAuxInt(i int16) int64 {}

func int32ToAuxInt(i int32) int64 {}

func int64ToAuxInt(i int64) int64 {}

func uint8ToAuxInt(i uint8) int64 {}

func float32ToAuxInt(f float32) int64 {}

func float64ToAuxInt(f float64) int64 {}

func valAndOffToAuxInt(v ValAndOff) int64 {}

func arm64BitFieldToAuxInt(v arm64BitField) int64 {}

func int128ToAuxInt(x int128) int64 {}

func flagConstantToAuxInt(x flagConstant) int64 {}

func opToAuxInt(o Op) int64 {}

type Aux

type auxMark

func (auxMark) CanBeAnSSAAux() {}

var AuxMark

type stringAux

func (stringAux) CanBeAnSSAAux() {}

func auxToString(i Aux) string {}

func auxToSym(i Aux) Sym {}

func auxToType(i Aux) *types.Type {}

func auxToCall(i Aux) *AuxCall {}

func auxToS390xCCMask(i Aux) s390x.CCMask {}

func auxToS390xRotateParams(i Aux) s390x.RotateParams {}

func StringToAux(s string) Aux {}

func symToAux(s Sym) Aux {}

func callToAux(s *AuxCall) Aux {}

func typeToAux(t *types.Type) Aux {}

func s390xCCMaskToAux(c s390x.CCMask) Aux {}

func s390xRotateParamsToAux(r s390x.RotateParams) Aux {}

// uaddOvf reports whether unsigned a+b would overflow.
func uaddOvf(a, b int64) bool {}

// loadLSymOffset simulates reading a word at an offset into a
// read-only symbol's runtime memory. If it would read a pointer to
// another symbol, that symbol is returned. Otherwise, it returns nil.
func loadLSymOffset(lsym *obj.LSym, offset int64) *obj.LSym {}

func devirtLECall(v *Value, sym *obj.LSym) *Value {}

// isSamePtr reports whether p1 and p2 point to the same address.
func isSamePtr(p1, p2 *Value) bool {}

func isStackPtr(v *Value) bool {}

// disjoint reports whether the memory region specified by [p1:p1+n1)
// does not overlap with [p2:p2+n2).
// A return value of false does not imply the regions overlap.
func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool {}

// moveSize returns the number of bytes an aligned MOV instruction moves.
func moveSize(align int64, c *Config) int64 {}

// mergePoint finds a block among a's blocks which dominates b and is itself
// dominated by all of a's blocks. Returns nil if it can't find one.
// Might return nil even if one does exist.
func mergePoint(b *Block, a ...*Value) *Block {}

// clobber invalidates values. Returns true.
// clobber is used by rewrite rules to:
//
//	A) make sure the values are really dead and never used again.
//	B) decrement use counts of the values' args.
func clobber(vv ...*Value) bool {}

// clobberIfDead resets v when use count is 1. Returns true.
// clobberIfDead is used by rewrite rules to decrement
// use counts of v's args when v is dead and never used.
func clobberIfDead(v *Value) bool {}

// noteRule is an easy way to track if a rule is matched when writing
// new ones.  Make the rule of interest also conditional on
//
//	noteRule("note to self: rule of interest matched")
//
// and that message will print when the rule matches.
func noteRule(s string) bool {}

// countRule increments Func.ruleMatches[key].
// If Func.ruleMatches is non-nil at the end
// of compilation, it will be printed to stdout.
// This is intended to make it easier to find which functions
// which contain lots of rules matches when developing new rules.
func countRule(v *Value, key string) bool {}

// warnRule generates compiler debug output with string s when
// v is not in autogenerated code, cond is true and the rule has fired.
func warnRule(cond bool, v *Value, s string) bool {}

// for a pseudo-op like (LessThan x), extract x.
func flagArg(v *Value) *Value {}

// arm64Negate finds the complement to an ARM64 condition code,
// for example !Equal -> NotEqual or !LessThan -> GreaterEqual
//
// For floating point, it's more subtle because NaN is unordered. We do
// !LessThanF -> NotLessThanF, the latter takes care of NaNs.
func arm64Negate(op Op) Op {}

// arm64Invert evaluates (InvertFlags op), which
// is the same as altering the condition codes such
// that the same result would be produced if the arguments
// to the flag-generating instruction were reversed, e.g.
// (InvertFlags (CMP x y)) -> (CMP y x)
func arm64Invert(op Op) Op {}

// evaluate an ARM64 op against a flags value
// that is potentially constant; return 1 for true,
// -1 for false, and 0 for not constant.
func ccARM64Eval(op Op, flags *Value) int {}

// logRule logs the use of the rule s. This will only be enabled if
// rewrite rules were generated with the -log option, see _gen/rulegen.go.
func logRule(s string) {}

var ruleFile

func isConstZero(v *Value) bool {}

// reciprocalExact64 reports whether 1/c is exactly representable.
func reciprocalExact64(c float64) bool {}

// reciprocalExact32 reports whether 1/c is exactly representable.
func reciprocalExact32(c float32) bool {}

// check if an immediate can be directly encoded into an ARM's instruction.
func isARMImmRot(v uint32) bool {}

// overlap reports whether the ranges given by the given offset and
// size pairs overlap.
func overlap(offset1, size1, offset2, size2 int64) bool {}

// check if value zeroes out upper 32-bit of 64-bit register.
// depth limits recursion depth. In AMD64.rules 3 is used as limit,
// because it catches same amount of cases as 4.
func zeroUpper32Bits(x *Value, depth int) bool {}

// zeroUpper48Bits is similar to zeroUpper32Bits, but for upper 48 bits.
func zeroUpper48Bits(x *Value, depth int) bool {}

// zeroUpper56Bits is similar to zeroUpper32Bits, but for upper 56 bits.
func zeroUpper56Bits(x *Value, depth int) bool {}

func isInlinableMemclr(c *Config, sz int64) bool {}

// isInlinableMemmove reports whether the given arch performs a Move of the given size
// faster than memmove. It will only return true if replacing the memmove with a Move is
// safe, either because Move will do all of its loads before any of its stores, or
// because the arguments are known to be disjoint.
// This is used as a check for replacing memmove with Move ops.
func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {}

func IsInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {}

// logLargeCopy logs the occurrence of a large copy.
// The best place to do this is in the rewrite rules where the size of the move is easy to find.
// "Large" is arbitrarily chosen to be 128 bytes; this may change.
func logLargeCopy(v *Value, s int64) bool {}

func LogLargeCopy(funcName string, pos src.XPos, s int64) {}

// hasSmallRotate reports whether the architecture has rotate instructions
// for sizes < 32-bit.  This is used to decide whether to promote some rotations.
func hasSmallRotate(c *Config) bool {}

func supportsPPC64PCRel() bool {}

func newPPC64ShiftAuxInt(sh, mb, me, sz int64) int32 {}

func GetPPC64Shiftsh(auxint int64) int64 {}

func GetPPC64Shiftmb(auxint int64) int64 {}

func GetPPC64Shiftme(auxint int64) int64 {}

// Test if this value can encoded as a mask for a rlwinm like
// operation.  Masks can also extend from the msb and wrap to
// the lsb too.  That is, the valid masks are 32 bit strings
// of the form: 0..01..10..0 or 1..10..01..1 or 1...1
func isPPC64WordRotateMask(v64 int64) bool {}

// Compress mask and shift into single value of the form
// me | mb<<8 | rotate<<16 | nbits<<24 where me and mb can
// be used to regenerate the input mask.
func encodePPC64RotateMask(rotate, mask, nbits int64) int64 {}

// Merge (RLDICL [encoded] (SRDconst [s] x)) into (RLDICL [new_encoded] x)
// SRDconst on PPC64 is an extended mnemonic of RLDICL. If the input to an
// RLDICL is an SRDconst, and the RLDICL does not rotate its value, the two
// operations can be combined. This functions assumes the two opcodes can
// be merged, and returns an encoded rotate+mask value of the combined RLDICL.
func mergePPC64RLDICLandSRDconst(encoded, s int64) int64 {}

// DecodePPC64RotateMask is the inverse operation of encodePPC64RotateMask.  The values returned as
// mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask.
func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) {}

// This verifies that the mask is a set of
// consecutive bits including the least
// significant bit.
func isPPC64ValidShiftMask(v int64) bool {}

func getPPC64ShiftMaskLength(v int64) int64 {}

// Decompose a shift right into an equivalent rotate/mask,
// and return mask & m.
func mergePPC64RShiftMask(m, s, nbits int64) int64 {}

// Combine (ANDconst [m] (SRWconst [s])) into (RLWINM [y]) or return 0
func mergePPC64AndSrwi(m, s int64) int64 {}

// Combine (ANDconst [m] (SRDconst [s])) into (RLWINM [y]) or return 0
func mergePPC64AndSrdi(m, s int64) int64 {}

// Combine (ANDconst [m] (SLDconst [s])) into (RLWINM [y]) or return 0
func mergePPC64AndSldi(m, s int64) int64 {}

// Test if a word shift right feeding into a CLRLSLDI can be merged into RLWINM.
// Return the encoded RLWINM constant, or 0 if they cannot be merged.
func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {}

// Test if a doubleword shift right feeding into a CLRLSLDI can be merged into RLWINM.
// Return the encoded RLWINM constant, or 0 if they cannot be merged.
func mergePPC64ClrlsldiSrd(sld, srd int64) int64 {}

// Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM.  Return
// the encoded RLWINM constant, or 0 if they cannot be merged.
func mergePPC64ClrlsldiRlwinm(sld int32, rlw int64) int64 {}

// Test if RLWINM feeding into an ANDconst can be merged. Return the encoded RLWINM constant,
// or 0 if they cannot be merged.
func mergePPC64AndRlwinm(mask uint32, rlw int64) int64 {}

// Test if RLWINM opcode rlw clears the upper 32 bits of the
// result. Return rlw if it does, 0 otherwise.
func mergePPC64MovwzregRlwinm(rlw int64) int64 {}

// Test if AND feeding into an ANDconst can be merged. Return the encoded RLWINM constant,
// or 0 if they cannot be merged.
func mergePPC64RlwinmAnd(rlw int64, mask uint32) int64 {}

// Test if RLWINM feeding into SRDconst can be merged. Return the encoded RLIWNM constant,
// or 0 if they cannot be merged.
func mergePPC64SldiRlwinm(sldi, rlw int64) int64 {}

// Compute the encoded RLWINM constant from combining (SLDconst [sld] (SRWconst [srw] x)),
// or return 0 if they cannot be combined.
func mergePPC64SldiSrw(sld, srw int64) int64 {}

// Convert a PPC64 opcode from the Op to OpCC form. This converts (op x y)
// to (Select0 (opCC x y)) without having to explicitly fixup every user
// of op.
//
// E.g consider the case:
// a = (ADD x y)
// b = (CMPconst [0] a)
// c = (OR a z)
//
// A rule like (CMPconst [0] (ADD x y)) => (CMPconst [0] (Select0 (ADDCC x y)))
// would produce:
// a  = (ADD x y)
// a' = (ADDCC x y)
// a” = (Select0 a')
// b  = (CMPconst [0] a”)
// c  = (OR a z)
//
// which makes it impossible to rewrite the second user. Instead the result
// of this conversion is:
// a' = (ADDCC x y)
// a  = (Select0 a')
// b  = (CMPconst [0] a)
// c  = (OR a z)
//
// Which makes it trivial to rewrite b using a lowering rule.
func convertPPC64OpToOpCC(op *Value) *Value {}

// Try converting a RLDICL to ANDCC. If successful, return the mask otherwise 0.
func convertPPC64RldiclAndccconst(sauxint int64) int64 {}

// Convenience function to rotate a 32 bit constant value by another constant.
func rotateLeft32(v, rotate int64) int64 {}

func rotateRight64(v, rotate int64) int64 {}

// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format.
func armBFAuxInt(lsb, width int64) arm64BitField {}

// returns the lsb part of the auxInt field of arm64 bitfield ops.
func (bfc arm64BitField) lsb() int64 {}

// returns the width part of the auxInt field of arm64 bitfield ops.
func (bfc arm64BitField) width() int64 {}

// checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask.
func isARM64BFMask(lsb, mask, rshift int64) bool {}

// returns the bitfield width of mask >> rshift for arm64 bitfield ops.
func arm64BFWidth(mask, rshift int64) int64 {}

// registerizable reports whether t is a primitive type that fits in
// a register. It assumes float64 values will always fit into registers
// even if that isn't strictly true.
func registerizable(b *Block, typ *types.Type) bool {}

// needRaceCleanup reports whether this call to racefuncenter/exit isn't needed.
func needRaceCleanup(sym *AuxCall, v *Value) bool {}

// symIsRO reports whether sym is a read-only global.
func symIsRO(sym interface{}

// symIsROZero reports whether sym is a read-only global whose data contains all zeros.
func symIsROZero(sym Sym) bool {}

// isFixed32 returns true if the int32 at offset off in symbol sym
// is known and constant.
func isFixed32(c *Config, sym Sym, off int64) bool {}

// isFixed returns true if the range [off,off+size] of the symbol sym
// is known and constant.
func isFixed(c *Config, sym Sym, off, size int64) bool {}

func fixed32(c *Config, sym Sym, off int64) int32 {}

// isFixedSym returns true if the contents of sym at the given offset
// is known and is the constant address of another symbol.
func isFixedSym(sym Sym, off int64) bool {}

func fixedSym(f *Func, sym Sym, off int64) Sym {}

// read8 reads one byte from the read-only global sym at offset off.
func read8(sym interface{}

// read16 reads two bytes from the read-only global sym at offset off.
func read16(sym interface{}

// read32 reads four bytes from the read-only global sym at offset off.
func read32(sym interface{}

// read64 reads eight bytes from the read-only global sym at offset off.
func read64(sym interface{}

// sequentialAddresses reports true if it can prove that x + n == y
func sequentialAddresses(x, y *Value, n int64) bool {}

type flagConstant

// N reports whether the result of an operation is negative (high bit set).
func (fc flagConstant) N() bool {}

// Z reports whether the result of an operation is 0.
func (fc flagConstant) Z() bool {}

// C reports whether an unsigned add overflowed (carry), or an
// unsigned subtract did not underflow (borrow).
func (fc flagConstant) C() bool {}

// V reports whether a signed operation overflowed or underflowed.
func (fc flagConstant) V() bool {}

func (fc flagConstant) eq() bool {}

func (fc flagConstant) ne() bool {}

func (fc flagConstant) lt() bool {}

func (fc flagConstant) le() bool {}

func (fc flagConstant) gt() bool {}

func (fc flagConstant) ge() bool {}

func (fc flagConstant) ult() bool {}

func (fc flagConstant) ule() bool {}

func (fc flagConstant) ugt() bool {}

func (fc flagConstant) uge() bool {}

func (fc flagConstant) ltNoov() bool {}

func (fc flagConstant) leNoov() bool {}

func (fc flagConstant) gtNoov() bool {}

func (fc flagConstant) geNoov() bool {}

func (fc flagConstant) String() string {}

type flagConstantBuilder

func (fcs flagConstantBuilder) encode() flagConstant {}

// addFlags64 returns the flags that would be set from computing x+y.
func addFlags64(x, y int64) flagConstant {}

// subFlags64 returns the flags that would be set from computing x-y.
func subFlags64(x, y int64) flagConstant {}

// addFlags32 returns the flags that would be set from computing x+y.
func addFlags32(x, y int32) flagConstant {}

// subFlags32 returns the flags that would be set from computing x-y.
func subFlags32(x, y int32) flagConstant {}

// logicFlags64 returns flags set to the sign/zeroness of x.
// C and V are set to false.
func logicFlags64(x int64) flagConstant {}

// logicFlags32 returns flags set to the sign/zeroness of x.
// C and V are set to false.
func logicFlags32(x int32) flagConstant {}

func makeJumpTableSym(b *Block) *obj.LSym {}

// canRotate reports whether the architecture supports
// rotates of integer registers with the given number of bits.
func canRotate(c *Config, bits int64) bool {}

// isARM64bitcon reports whether a constant can be encoded into a logical instruction.
func isARM64bitcon(x uint64) bool {}

// sequenceOfOnes tests whether a constant is a sequence of ones in binary, with leading and trailing zeros.
func sequenceOfOnes(x uint64) bool {}

// isARM64addcon reports whether x can be encoded as the immediate value in an ADD or SUB instruction.
func isARM64addcon(v int64) bool {}

// setPos sets the position of v to pos, then returns true.
// Useful for setting the result of a rewrite's position to
// something other than the default.
func setPos(v *Value, pos src.XPos) bool {}

// isNonNegative reports whether v is known to be greater or equal to zero.
// Note that this is pretty simplistic. The prove pass generates more detailed
// nonnegative information about values.
func isNonNegative(v *Value) bool {}

func rewriteStructLoad(v *Value) *Value {}

func rewriteStructStore(v *Value) *Value {}