llvm/clang/test/CodeGen/LoongArch/inline-asm-operand-modifiers.c

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple loongarch32 -O2 -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -triple loongarch64 -O2 -emit-llvm %s -o - | FileCheck %s

/// Test LoongArch specific operand modifiers (i.e. operand codes).

// CHECK-LABEL: @test_z_zero(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 asm sideeffect "add.w $0, $1, ${2:z}", "=r,r,ri"(i32 [[A:%.*]], i32 0) #[[ATTR1:[0-9]+]], !srcloc !2
// CHECK-NEXT:    ret void
//
void test_z_zero(int a) {
  int tmp;
  asm volatile ("add.w %0, %1, %z2" : "=r" (tmp) : "r" (a), "ri" (0));
}

// CHECK-LABEL: @test_z_nonzero(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 asm sideeffect "add.w $0, $1, ${2:z}", "=r,r,ri"(i32 [[A:%.*]], i32 1) #[[ATTR1]], !srcloc !3
// CHECK-NEXT:    ret void
//
void test_z_nonzero(int a) {
  int tmp;
  asm volatile ("add.w %0, %1, %z2" : "=r" (tmp) : "r" (a), "ri" (1));
}