llvm/llvm/lib/Target/X86/X86ISelLowering.h

//===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that X86 uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
#define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H

#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/TargetLowering.h"

namespace llvm {
  class X86Subtarget;
  class X86TargetMachine;

  namespace X86ISD {
    // X86 Specific DAG Nodes
  enum NodeType : unsigned {};
  } // end namespace X86ISD

  namespace X86 {
    /// Current rounding mode is represented in bits 11:10 of FPSR. These
    /// values are same as corresponding constants for rounding mode used
    /// in glibc.
    enum RoundingMode {};
  }

  /// Define some predicates that are used for node matching.
  namespace X86 {
    /// Returns true if Elt is a constant zero or floating point constant +0.0.
    bool isZeroNode(SDValue Elt);

    /// Returns true of the given offset can be
    /// fit into displacement field of the instruction.
    bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
                                      bool hasSymbolicDisplacement);

    /// Determines whether the callee is required to pop its
    /// own arguments. Callee pop is necessary to support tail calls.
    bool isCalleePop(CallingConv::ID CallingConv,
                     bool is64Bit, bool IsVarArg, bool GuaranteeTCO);

    /// If Op is a constant whose elements are all the same constant or
    /// undefined, return true and return the constant value in \p SplatVal.
    /// If we have undef bits that don't cover an entire element, we treat these
    /// as zero if AllowPartialUndefs is set, else we fail and return false.
    bool isConstantSplat(SDValue Op, APInt &SplatVal,
                         bool AllowPartialUndefs = true);

    /// Check if Op is a load operation that could be folded into some other x86
    /// instruction as a memory operand. Example: vpaddd (%rdi), %xmm0, %xmm0.
    bool mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
                     bool AssumeSingleUse = false);

    /// Check if Op is a load operation that could be folded into a vector splat
    /// instruction as a memory operand. Example: vbroadcastss 16(%rdi), %xmm2.
    bool mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
                                         const X86Subtarget &Subtarget,
                                         bool AssumeSingleUse = false);

    /// Check if Op is a value that could be used to fold a store into some
    /// other x86 instruction as a memory operand. Ex: pextrb $0, %xmm0, (%rdi).
    bool mayFoldIntoStore(SDValue Op);

    /// Check if Op is an operation that could be folded into a zero extend x86
    /// instruction.
    bool mayFoldIntoZeroExtend(SDValue Op);

    /// True if the target supports the extended frame for async Swift
    /// functions.
    bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget,
                                            const MachineFunction &MF);
  } // end namespace X86

  //===--------------------------------------------------------------------===//
  //  X86 Implementation of the TargetLowering interface
  class X86TargetLowering final : public TargetLowering {};

  namespace X86 {
    FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
                             const TargetLibraryInfo *libInfo);
  } // end namespace X86

  // X86 specific Gather/Scatter nodes.
  // The class has the same order of operands as MaskedGatherScatterSDNode for
  // convenience.
  class X86MaskedGatherScatterSDNode : public MemIntrinsicSDNode {};

  class X86MaskedGatherSDNode : public X86MaskedGatherScatterSDNode {};

  class X86MaskedScatterSDNode : public X86MaskedGatherScatterSDNode {};

  /// Generate unpacklo/unpackhi shuffle mask.
  void createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask, bool Lo,
                               bool Unary);

  /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
  /// imposed by AVX and specific to the unary pattern. Example:
  /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
  /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
  void createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask, bool Lo);

} // end namespace llvm

#endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H