llvm/llvm/test/CodeGen/AArch64/rand.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64 -mattr=+v8.5a,+rand -aarch64-enable-sink-fold=true %s -o - | FileCheck %s

define  i32 @rndr(ptr %__addr) {
; CHECK-LABEL: rndr:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mrs x9, RNDR
; CHECK-NEXT:    mov x8, x0
; CHECK-NEXT:    cset w10, eq
; CHECK-NEXT:    str x9, [x8]
; CHECK-NEXT:    and w0, w10, #0x1
; CHECK-NEXT:    ret
  %1 = tail call { i64, i1 } @llvm.aarch64.rndr()
  %2 = extractvalue { i64, i1 } %1, 0
  %3 = extractvalue { i64, i1 } %1, 1
  store i64 %2, ptr %__addr, align 8
  %4 = zext i1 %3 to i32
  ret i32 %4
}


define  i32 @rndrrs(ptr  %__addr) {
; CHECK-LABEL: rndrrs:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mrs x9, RNDRRS
; CHECK-NEXT:    mov x8, x0
; CHECK-NEXT:    cset w10, eq
; CHECK-NEXT:    str x9, [x8]
; CHECK-NEXT:    and w0, w10, #0x1
; CHECK-NEXT:    ret
  %1 = tail call { i64, i1 } @llvm.aarch64.rndrrs()
  %2 = extractvalue { i64, i1 } %1, 0
  %3 = extractvalue { i64, i1 } %1, 1
  store i64 %2, ptr %__addr, align 8
  %4 = zext i1 %3 to i32
  ret i32 %4
}

declare { i64, i1 } @llvm.aarch64.rndr()
declare { i64, i1 } @llvm.aarch64.rndrrs()