; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter-out "\b(sp)\b" --filter "^\s*(ld[^r]|st[^r]|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)"
; The base test file was generated by ./llvm/test/CodeGen/AArch64/Atomics/generate-tests.py
; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+outline-atomics -O0 | FileCheck %s --check-prefixes=CHECK,-O0
; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+outline-atomics -O1 | FileCheck %s --check-prefixes=CHECK,-O1
define dso_local i8 @atomicrmw_xchg_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xchg_i8_aligned_monotonic:
; CHECK: bl __aarch64_swp1_relax
%r = atomicrmw xchg ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xchg_i8_aligned_acquire(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xchg_i8_aligned_acquire:
; CHECK: bl __aarch64_swp1_acq
%r = atomicrmw xchg ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xchg_i8_aligned_release(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xchg_i8_aligned_release:
; CHECK: bl __aarch64_swp1_rel
%r = atomicrmw xchg ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xchg_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xchg_i8_aligned_acq_rel:
; CHECK: bl __aarch64_swp1_acq_rel
%r = atomicrmw xchg ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xchg_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xchg_i8_aligned_seq_cst:
; CHECK: bl __aarch64_swp1_acq_rel
%r = atomicrmw xchg ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_xchg_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xchg_i16_aligned_monotonic:
; CHECK: bl __aarch64_swp2_relax
%r = atomicrmw xchg ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_xchg_i16_aligned_acquire(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xchg_i16_aligned_acquire:
; CHECK: bl __aarch64_swp2_acq
%r = atomicrmw xchg ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_xchg_i16_aligned_release(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xchg_i16_aligned_release:
; CHECK: bl __aarch64_swp2_rel
%r = atomicrmw xchg ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_xchg_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xchg_i16_aligned_acq_rel:
; CHECK: bl __aarch64_swp2_acq_rel
%r = atomicrmw xchg ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_xchg_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xchg_i16_aligned_seq_cst:
; CHECK: bl __aarch64_swp2_acq_rel
%r = atomicrmw xchg ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_xchg_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xchg_i32_aligned_monotonic:
; CHECK: bl __aarch64_swp4_relax
%r = atomicrmw xchg ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_xchg_i32_aligned_acquire(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xchg_i32_aligned_acquire:
; CHECK: bl __aarch64_swp4_acq
%r = atomicrmw xchg ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_xchg_i32_aligned_release(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xchg_i32_aligned_release:
; CHECK: bl __aarch64_swp4_rel
%r = atomicrmw xchg ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_xchg_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xchg_i32_aligned_acq_rel:
; CHECK: bl __aarch64_swp4_acq_rel
%r = atomicrmw xchg ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_xchg_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xchg_i32_aligned_seq_cst:
; CHECK: bl __aarch64_swp4_acq_rel
%r = atomicrmw xchg ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_xchg_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xchg_i64_aligned_monotonic:
; CHECK: bl __aarch64_swp8_relax
%r = atomicrmw xchg ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_xchg_i64_aligned_acquire(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xchg_i64_aligned_acquire:
; CHECK: bl __aarch64_swp8_acq
%r = atomicrmw xchg ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_xchg_i64_aligned_release(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xchg_i64_aligned_release:
; CHECK: bl __aarch64_swp8_rel
%r = atomicrmw xchg ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_xchg_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xchg_i64_aligned_acq_rel:
; CHECK: bl __aarch64_swp8_acq_rel
%r = atomicrmw xchg ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_xchg_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xchg_i64_aligned_seq_cst:
; CHECK: bl __aarch64_swp8_acq_rel
%r = atomicrmw xchg ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_xchg_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xchg_i128_aligned_monotonic:
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_xchg_i128_aligned_monotonic:
; -O1: ldxp x8, x1, [x0]
; -O1: stxp w9, x2, x3, [x0]
%r = atomicrmw xchg ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_xchg_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xchg_i128_aligned_acquire:
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_xchg_i128_aligned_acquire:
; -O1: ldaxp x8, x1, [x0]
; -O1: stxp w9, x2, x3, [x0]
%r = atomicrmw xchg ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_xchg_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xchg_i128_aligned_release:
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_xchg_i128_aligned_release:
; -O1: ldxp x8, x1, [x0]
; -O1: stlxp w9, x2, x3, [x0]
%r = atomicrmw xchg ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_xchg_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xchg_i128_aligned_acq_rel:
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_xchg_i128_aligned_acq_rel:
; -O1: ldaxp x8, x1, [x0]
; -O1: stlxp w9, x2, x3, [x0]
%r = atomicrmw xchg ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_xchg_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xchg_i128_aligned_seq_cst:
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_xchg_i128_aligned_seq_cst:
; -O1: ldaxp x8, x1, [x0]
; -O1: stlxp w9, x2, x3, [x0]
%r = atomicrmw xchg ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_xchg_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xchg_i8_unaligned_monotonic:
; CHECK: bl __aarch64_swp1_relax
%r = atomicrmw xchg ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xchg_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xchg_i8_unaligned_acquire:
; CHECK: bl __aarch64_swp1_acq
%r = atomicrmw xchg ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xchg_i8_unaligned_release(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xchg_i8_unaligned_release:
; CHECK: bl __aarch64_swp1_rel
%r = atomicrmw xchg ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xchg_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xchg_i8_unaligned_acq_rel:
; CHECK: bl __aarch64_swp1_acq_rel
%r = atomicrmw xchg ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xchg_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xchg_i8_unaligned_seq_cst:
; CHECK: bl __aarch64_swp1_acq_rel
%r = atomicrmw xchg ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_xchg_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xchg_i16_unaligned_monotonic:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_xchg_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xchg_i16_unaligned_acquire:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_xchg_i16_unaligned_release(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xchg_i16_unaligned_release:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_xchg_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xchg_i16_unaligned_acq_rel:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_xchg_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xchg_i16_unaligned_seq_cst:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_xchg_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xchg_i32_unaligned_monotonic:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_xchg_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xchg_i32_unaligned_acquire:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_xchg_i32_unaligned_release(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xchg_i32_unaligned_release:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_xchg_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xchg_i32_unaligned_acq_rel:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_xchg_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xchg_i32_unaligned_seq_cst:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_xchg_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xchg_i64_unaligned_monotonic:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_xchg_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xchg_i64_unaligned_acquire:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_xchg_i64_unaligned_release(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xchg_i64_unaligned_release:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_xchg_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xchg_i64_unaligned_acq_rel:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_xchg_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xchg_i64_unaligned_seq_cst:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_xchg_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; CHECK-LABEL: atomicrmw_xchg_i128_unaligned_monotonic:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_xchg_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; CHECK-LABEL: atomicrmw_xchg_i128_unaligned_acquire:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_xchg_i128_unaligned_release(ptr %ptr, i128 %value) {
; CHECK-LABEL: atomicrmw_xchg_i128_unaligned_release:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_xchg_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; CHECK-LABEL: atomicrmw_xchg_i128_unaligned_acq_rel:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_xchg_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; CHECK-LABEL: atomicrmw_xchg_i128_unaligned_seq_cst:
; CHECK: bl __atomic_exchange
%r = atomicrmw xchg ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}
define dso_local i8 @atomicrmw_add_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_add_i8_aligned_monotonic:
; CHECK: bl __aarch64_ldadd1_relax
%r = atomicrmw add ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_add_i8_aligned_acquire(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_add_i8_aligned_acquire:
; CHECK: bl __aarch64_ldadd1_acq
%r = atomicrmw add ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_add_i8_aligned_release(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_add_i8_aligned_release:
; CHECK: bl __aarch64_ldadd1_rel
%r = atomicrmw add ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_add_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_add_i8_aligned_acq_rel:
; CHECK: bl __aarch64_ldadd1_acq_rel
%r = atomicrmw add ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_add_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_add_i8_aligned_seq_cst:
; CHECK: bl __aarch64_ldadd1_acq_rel
%r = atomicrmw add ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_add_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_add_i16_aligned_monotonic:
; CHECK: bl __aarch64_ldadd2_relax
%r = atomicrmw add ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_add_i16_aligned_acquire(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_add_i16_aligned_acquire:
; CHECK: bl __aarch64_ldadd2_acq
%r = atomicrmw add ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_add_i16_aligned_release(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_add_i16_aligned_release:
; CHECK: bl __aarch64_ldadd2_rel
%r = atomicrmw add ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_add_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_add_i16_aligned_acq_rel:
; CHECK: bl __aarch64_ldadd2_acq_rel
%r = atomicrmw add ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_add_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_add_i16_aligned_seq_cst:
; CHECK: bl __aarch64_ldadd2_acq_rel
%r = atomicrmw add ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_add_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_add_i32_aligned_monotonic:
; CHECK: bl __aarch64_ldadd4_relax
%r = atomicrmw add ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_add_i32_aligned_acquire(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_add_i32_aligned_acquire:
; CHECK: bl __aarch64_ldadd4_acq
%r = atomicrmw add ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_add_i32_aligned_release(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_add_i32_aligned_release:
; CHECK: bl __aarch64_ldadd4_rel
%r = atomicrmw add ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_add_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_add_i32_aligned_acq_rel:
; CHECK: bl __aarch64_ldadd4_acq_rel
%r = atomicrmw add ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_add_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_add_i32_aligned_seq_cst:
; CHECK: bl __aarch64_ldadd4_acq_rel
%r = atomicrmw add ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_add_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_add_i64_aligned_monotonic:
; CHECK: bl __aarch64_ldadd8_relax
%r = atomicrmw add ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_add_i64_aligned_acquire(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_add_i64_aligned_acquire:
; CHECK: bl __aarch64_ldadd8_acq
%r = atomicrmw add ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_add_i64_aligned_release(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_add_i64_aligned_release:
; CHECK: bl __aarch64_ldadd8_rel
%r = atomicrmw add ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_add_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_add_i64_aligned_acq_rel:
; CHECK: bl __aarch64_ldadd8_acq_rel
%r = atomicrmw add ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_add_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_add_i64_aligned_seq_cst:
; CHECK: bl __aarch64_ldadd8_acq_rel
%r = atomicrmw add ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_add_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_add_i128_aligned_monotonic:
; -O0: adds x2, x8, x10
; -O0: subs w10, w10, #1
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_add_i128_aligned_monotonic:
; -O1: ldxp x0, x1, [x8]
; -O1: adds x9, x0, x2
; -O1: stxp w11, x9, x10, [x8]
%r = atomicrmw add ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_add_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_add_i128_aligned_acquire:
; -O0: adds x2, x8, x10
; -O0: subs w10, w10, #1
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_add_i128_aligned_acquire:
; -O1: ldaxp x0, x1, [x8]
; -O1: adds x9, x0, x2
; -O1: stxp w11, x9, x10, [x8]
%r = atomicrmw add ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_add_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_add_i128_aligned_release:
; -O0: adds x2, x8, x10
; -O0: subs w10, w10, #1
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_add_i128_aligned_release:
; -O1: ldxp x0, x1, [x8]
; -O1: adds x9, x0, x2
; -O1: stlxp w11, x9, x10, [x8]
%r = atomicrmw add ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_add_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_add_i128_aligned_acq_rel:
; -O0: adds x2, x8, x10
; -O0: subs w10, w10, #1
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_add_i128_aligned_acq_rel:
; -O1: ldaxp x0, x1, [x8]
; -O1: adds x9, x0, x2
; -O1: stlxp w11, x9, x10, [x8]
%r = atomicrmw add ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_add_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_add_i128_aligned_seq_cst:
; -O0: adds x2, x8, x10
; -O0: subs w10, w10, #1
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_add_i128_aligned_seq_cst:
; -O1: ldaxp x0, x1, [x8]
; -O1: adds x9, x0, x2
; -O1: stlxp w11, x9, x10, [x8]
%r = atomicrmw add ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_add_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_add_i8_unaligned_monotonic:
; CHECK: bl __aarch64_ldadd1_relax
%r = atomicrmw add ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_add_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_add_i8_unaligned_acquire:
; CHECK: bl __aarch64_ldadd1_acq
%r = atomicrmw add ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_add_i8_unaligned_release(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_add_i8_unaligned_release:
; CHECK: bl __aarch64_ldadd1_rel
%r = atomicrmw add ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_add_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_add_i8_unaligned_acq_rel:
; CHECK: bl __aarch64_ldadd1_acq_rel
%r = atomicrmw add ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_add_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_add_i8_unaligned_seq_cst:
; CHECK: bl __aarch64_ldadd1_acq_rel
%r = atomicrmw add ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_add_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_add_i16_unaligned_monotonic:
; -O0: add w8, w8, w9, uxth
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i16_unaligned_monotonic:
; -O1: add w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_add_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_add_i16_unaligned_acquire:
; -O0: add w8, w8, w9, uxth
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i16_unaligned_acquire:
; -O1: add w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_add_i16_unaligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_add_i16_unaligned_release:
; -O0: add w8, w8, w9, uxth
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i16_unaligned_release:
; -O1: add w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_add_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_add_i16_unaligned_acq_rel:
; -O0: add w8, w8, w9, uxth
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i16_unaligned_acq_rel:
; -O1: add w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_add_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_add_i16_unaligned_seq_cst:
; -O0: add w8, w8, w9, uxth
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i16_unaligned_seq_cst:
; -O1: add w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_add_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_add_i32_unaligned_monotonic:
; -O0: add w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i32_unaligned_monotonic:
; -O1: add w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_add_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_add_i32_unaligned_acquire:
; -O0: add w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i32_unaligned_acquire:
; -O1: add w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_add_i32_unaligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_add_i32_unaligned_release:
; -O0: add w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i32_unaligned_release:
; -O1: add w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_add_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_add_i32_unaligned_acq_rel:
; -O0: add w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i32_unaligned_acq_rel:
; -O1: add w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_add_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_add_i32_unaligned_seq_cst:
; -O0: add w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i32_unaligned_seq_cst:
; -O1: add w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_add_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_add_i64_unaligned_monotonic:
; -O0: add x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i64_unaligned_monotonic:
; -O1: add x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_add_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_add_i64_unaligned_acquire:
; -O0: add x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i64_unaligned_acquire:
; -O1: add x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_add_i64_unaligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_add_i64_unaligned_release:
; -O0: add x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i64_unaligned_release:
; -O1: add x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_add_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_add_i64_unaligned_acq_rel:
; -O0: add x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i64_unaligned_acq_rel:
; -O1: add x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_add_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_add_i64_unaligned_seq_cst:
; -O0: add x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i64_unaligned_seq_cst:
; -O1: add x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_add_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_add_i128_unaligned_monotonic:
; -O0: adds x9, x8, x9
; -O0: subs w11, w11, #1
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i128_unaligned_monotonic:
; -O1: ldp x0, x1, [x0]
; -O1: adds x8, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_add_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_add_i128_unaligned_acquire:
; -O0: adds x9, x8, x9
; -O0: subs w11, w11, #1
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i128_unaligned_acquire:
; -O1: ldp x0, x1, [x0]
; -O1: adds x8, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_add_i128_unaligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_add_i128_unaligned_release:
; -O0: adds x9, x8, x9
; -O0: subs w11, w11, #1
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i128_unaligned_release:
; -O1: ldp x0, x1, [x0]
; -O1: adds x8, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_add_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_add_i128_unaligned_acq_rel:
; -O0: adds x9, x8, x9
; -O0: subs w11, w11, #1
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i128_unaligned_acq_rel:
; -O1: ldp x0, x1, [x0]
; -O1: adds x8, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_add_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_add_i128_unaligned_seq_cst:
; -O0: adds x9, x8, x9
; -O0: subs w11, w11, #1
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_add_i128_unaligned_seq_cst:
; -O1: ldp x0, x1, [x0]
; -O1: adds x8, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw add ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}
define dso_local i8 @atomicrmw_sub_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_sub_i8_aligned_monotonic:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd1_relax
;
; -O1-LABEL: atomicrmw_sub_i8_aligned_monotonic:
; -O1: bl __aarch64_ldadd1_relax
%r = atomicrmw sub ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_sub_i8_aligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_sub_i8_aligned_acquire:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd1_acq
;
; -O1-LABEL: atomicrmw_sub_i8_aligned_acquire:
; -O1: bl __aarch64_ldadd1_acq
%r = atomicrmw sub ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_sub_i8_aligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_sub_i8_aligned_release:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd1_rel
;
; -O1-LABEL: atomicrmw_sub_i8_aligned_release:
; -O1: bl __aarch64_ldadd1_rel
%r = atomicrmw sub ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_sub_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_sub_i8_aligned_acq_rel:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd1_acq_rel
;
; -O1-LABEL: atomicrmw_sub_i8_aligned_acq_rel:
; -O1: bl __aarch64_ldadd1_acq_rel
%r = atomicrmw sub ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_sub_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_sub_i8_aligned_seq_cst:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd1_acq_rel
;
; -O1-LABEL: atomicrmw_sub_i8_aligned_seq_cst:
; -O1: bl __aarch64_ldadd1_acq_rel
%r = atomicrmw sub ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_sub_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_sub_i16_aligned_monotonic:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd2_relax
;
; -O1-LABEL: atomicrmw_sub_i16_aligned_monotonic:
; -O1: bl __aarch64_ldadd2_relax
%r = atomicrmw sub ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_sub_i16_aligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_sub_i16_aligned_acquire:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd2_acq
;
; -O1-LABEL: atomicrmw_sub_i16_aligned_acquire:
; -O1: bl __aarch64_ldadd2_acq
%r = atomicrmw sub ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_sub_i16_aligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_sub_i16_aligned_release:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd2_rel
;
; -O1-LABEL: atomicrmw_sub_i16_aligned_release:
; -O1: bl __aarch64_ldadd2_rel
%r = atomicrmw sub ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_sub_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_sub_i16_aligned_acq_rel:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd2_acq_rel
;
; -O1-LABEL: atomicrmw_sub_i16_aligned_acq_rel:
; -O1: bl __aarch64_ldadd2_acq_rel
%r = atomicrmw sub ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_sub_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_sub_i16_aligned_seq_cst:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd2_acq_rel
;
; -O1-LABEL: atomicrmw_sub_i16_aligned_seq_cst:
; -O1: bl __aarch64_ldadd2_acq_rel
%r = atomicrmw sub ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_sub_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_sub_i32_aligned_monotonic:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd4_relax
;
; -O1-LABEL: atomicrmw_sub_i32_aligned_monotonic:
; -O1: bl __aarch64_ldadd4_relax
%r = atomicrmw sub ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_sub_i32_aligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_sub_i32_aligned_acquire:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd4_acq
;
; -O1-LABEL: atomicrmw_sub_i32_aligned_acquire:
; -O1: bl __aarch64_ldadd4_acq
%r = atomicrmw sub ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_sub_i32_aligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_sub_i32_aligned_release:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd4_rel
;
; -O1-LABEL: atomicrmw_sub_i32_aligned_release:
; -O1: bl __aarch64_ldadd4_rel
%r = atomicrmw sub ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_sub_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_sub_i32_aligned_acq_rel:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd4_acq_rel
;
; -O1-LABEL: atomicrmw_sub_i32_aligned_acq_rel:
; -O1: bl __aarch64_ldadd4_acq_rel
%r = atomicrmw sub ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_sub_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_sub_i32_aligned_seq_cst:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd4_acq_rel
;
; -O1-LABEL: atomicrmw_sub_i32_aligned_seq_cst:
; -O1: bl __aarch64_ldadd4_acq_rel
%r = atomicrmw sub ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_sub_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_sub_i64_aligned_monotonic:
; -O0: subs x0, x8, x9
; -O0: bl __aarch64_ldadd8_relax
;
; -O1-LABEL: atomicrmw_sub_i64_aligned_monotonic:
; -O1: bl __aarch64_ldadd8_relax
%r = atomicrmw sub ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_sub_i64_aligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_sub_i64_aligned_acquire:
; -O0: subs x0, x8, x9
; -O0: bl __aarch64_ldadd8_acq
;
; -O1-LABEL: atomicrmw_sub_i64_aligned_acquire:
; -O1: bl __aarch64_ldadd8_acq
%r = atomicrmw sub ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_sub_i64_aligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_sub_i64_aligned_release:
; -O0: subs x0, x8, x9
; -O0: bl __aarch64_ldadd8_rel
;
; -O1-LABEL: atomicrmw_sub_i64_aligned_release:
; -O1: bl __aarch64_ldadd8_rel
%r = atomicrmw sub ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_sub_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_sub_i64_aligned_acq_rel:
; -O0: subs x0, x8, x9
; -O0: bl __aarch64_ldadd8_acq_rel
;
; -O1-LABEL: atomicrmw_sub_i64_aligned_acq_rel:
; -O1: bl __aarch64_ldadd8_acq_rel
%r = atomicrmw sub ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_sub_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_sub_i64_aligned_seq_cst:
; -O0: subs x0, x8, x9
; -O0: bl __aarch64_ldadd8_acq_rel
;
; -O1-LABEL: atomicrmw_sub_i64_aligned_seq_cst:
; -O1: bl __aarch64_ldadd8_acq_rel
%r = atomicrmw sub ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_sub_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_sub_i128_aligned_monotonic:
; -O0: subs x2, x8, x10
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_sub_i128_aligned_monotonic:
; -O1: ldxp x0, x1, [x8]
; -O1: subs x9, x0, x2
; -O1: stxp w11, x9, x10, [x8]
%r = atomicrmw sub ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_sub_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_sub_i128_aligned_acquire:
; -O0: subs x2, x8, x10
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_sub_i128_aligned_acquire:
; -O1: ldaxp x0, x1, [x8]
; -O1: subs x9, x0, x2
; -O1: stxp w11, x9, x10, [x8]
%r = atomicrmw sub ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_sub_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_sub_i128_aligned_release:
; -O0: subs x2, x8, x10
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_sub_i128_aligned_release:
; -O1: ldxp x0, x1, [x8]
; -O1: subs x9, x0, x2
; -O1: stlxp w11, x9, x10, [x8]
%r = atomicrmw sub ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_sub_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_sub_i128_aligned_acq_rel:
; -O0: subs x2, x8, x10
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_sub_i128_aligned_acq_rel:
; -O1: ldaxp x0, x1, [x8]
; -O1: subs x9, x0, x2
; -O1: stlxp w11, x9, x10, [x8]
%r = atomicrmw sub ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_sub_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_sub_i128_aligned_seq_cst:
; -O0: subs x2, x8, x10
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_sub_i128_aligned_seq_cst:
; -O1: ldaxp x0, x1, [x8]
; -O1: subs x9, x0, x2
; -O1: stlxp w11, x9, x10, [x8]
%r = atomicrmw sub ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_sub_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_sub_i8_unaligned_monotonic:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd1_relax
;
; -O1-LABEL: atomicrmw_sub_i8_unaligned_monotonic:
; -O1: bl __aarch64_ldadd1_relax
%r = atomicrmw sub ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_sub_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_sub_i8_unaligned_acquire:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd1_acq
;
; -O1-LABEL: atomicrmw_sub_i8_unaligned_acquire:
; -O1: bl __aarch64_ldadd1_acq
%r = atomicrmw sub ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_sub_i8_unaligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_sub_i8_unaligned_release:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd1_rel
;
; -O1-LABEL: atomicrmw_sub_i8_unaligned_release:
; -O1: bl __aarch64_ldadd1_rel
%r = atomicrmw sub ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_sub_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_sub_i8_unaligned_acq_rel:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd1_acq_rel
;
; -O1-LABEL: atomicrmw_sub_i8_unaligned_acq_rel:
; -O1: bl __aarch64_ldadd1_acq_rel
%r = atomicrmw sub ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_sub_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_sub_i8_unaligned_seq_cst:
; -O0: subs w0, w8, w9
; -O0: bl __aarch64_ldadd1_acq_rel
;
; -O1-LABEL: atomicrmw_sub_i8_unaligned_seq_cst:
; -O1: bl __aarch64_ldadd1_acq_rel
%r = atomicrmw sub ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_sub_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_sub_i16_unaligned_monotonic:
; -O0: subs w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i16_unaligned_monotonic:
; -O1: sub w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_sub_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_sub_i16_unaligned_acquire:
; -O0: subs w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i16_unaligned_acquire:
; -O1: sub w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_sub_i16_unaligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_sub_i16_unaligned_release:
; -O0: subs w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i16_unaligned_release:
; -O1: sub w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_sub_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_sub_i16_unaligned_acq_rel:
; -O0: subs w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i16_unaligned_acq_rel:
; -O1: sub w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_sub_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_sub_i16_unaligned_seq_cst:
; -O0: subs w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i16_unaligned_seq_cst:
; -O1: sub w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_sub_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_sub_i32_unaligned_monotonic:
; -O0: subs w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i32_unaligned_monotonic:
; -O1: sub w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_sub_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_sub_i32_unaligned_acquire:
; -O0: subs w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i32_unaligned_acquire:
; -O1: sub w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_sub_i32_unaligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_sub_i32_unaligned_release:
; -O0: subs w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i32_unaligned_release:
; -O1: sub w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_sub_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_sub_i32_unaligned_acq_rel:
; -O0: subs w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i32_unaligned_acq_rel:
; -O1: sub w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_sub_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_sub_i32_unaligned_seq_cst:
; -O0: subs w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i32_unaligned_seq_cst:
; -O1: sub w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_sub_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_sub_i64_unaligned_monotonic:
; -O0: subs x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i64_unaligned_monotonic:
; -O1: sub x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_sub_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_sub_i64_unaligned_acquire:
; -O0: subs x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i64_unaligned_acquire:
; -O1: sub x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_sub_i64_unaligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_sub_i64_unaligned_release:
; -O0: subs x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i64_unaligned_release:
; -O1: sub x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_sub_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_sub_i64_unaligned_acq_rel:
; -O0: subs x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i64_unaligned_acq_rel:
; -O1: sub x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_sub_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_sub_i64_unaligned_seq_cst:
; -O0: subs x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i64_unaligned_seq_cst:
; -O1: sub x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_sub_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_sub_i128_unaligned_monotonic:
; -O0: subs x9, x8, x9
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i128_unaligned_monotonic:
; -O1: ldp x0, x1, [x0]
; -O1: subs x8, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_sub_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_sub_i128_unaligned_acquire:
; -O0: subs x9, x8, x9
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i128_unaligned_acquire:
; -O1: ldp x0, x1, [x0]
; -O1: subs x8, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_sub_i128_unaligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_sub_i128_unaligned_release:
; -O0: subs x9, x8, x9
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i128_unaligned_release:
; -O1: ldp x0, x1, [x0]
; -O1: subs x8, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_sub_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_sub_i128_unaligned_acq_rel:
; -O0: subs x9, x8, x9
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i128_unaligned_acq_rel:
; -O1: ldp x0, x1, [x0]
; -O1: subs x8, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_sub_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_sub_i128_unaligned_seq_cst:
; -O0: subs x9, x8, x9
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_sub_i128_unaligned_seq_cst:
; -O1: ldp x0, x1, [x0]
; -O1: subs x8, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw sub ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}
define dso_local i8 @atomicrmw_and_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_and_i8_aligned_monotonic:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr1_relax
;
; -O1-LABEL: atomicrmw_and_i8_aligned_monotonic:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr1_relax
%r = atomicrmw and ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_and_i8_aligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_and_i8_aligned_acquire:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr1_acq
;
; -O1-LABEL: atomicrmw_and_i8_aligned_acquire:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr1_acq
%r = atomicrmw and ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_and_i8_aligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_and_i8_aligned_release:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr1_rel
;
; -O1-LABEL: atomicrmw_and_i8_aligned_release:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr1_rel
%r = atomicrmw and ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_and_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_and_i8_aligned_acq_rel:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr1_acq_rel
;
; -O1-LABEL: atomicrmw_and_i8_aligned_acq_rel:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr1_acq_rel
%r = atomicrmw and ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_and_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_and_i8_aligned_seq_cst:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr1_acq_rel
;
; -O1-LABEL: atomicrmw_and_i8_aligned_seq_cst:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr1_acq_rel
%r = atomicrmw and ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_and_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_and_i16_aligned_monotonic:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr2_relax
;
; -O1-LABEL: atomicrmw_and_i16_aligned_monotonic:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr2_relax
%r = atomicrmw and ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_and_i16_aligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_and_i16_aligned_acquire:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr2_acq
;
; -O1-LABEL: atomicrmw_and_i16_aligned_acquire:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr2_acq
%r = atomicrmw and ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_and_i16_aligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_and_i16_aligned_release:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr2_rel
;
; -O1-LABEL: atomicrmw_and_i16_aligned_release:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr2_rel
%r = atomicrmw and ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_and_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_and_i16_aligned_acq_rel:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr2_acq_rel
;
; -O1-LABEL: atomicrmw_and_i16_aligned_acq_rel:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr2_acq_rel
%r = atomicrmw and ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_and_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_and_i16_aligned_seq_cst:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr2_acq_rel
;
; -O1-LABEL: atomicrmw_and_i16_aligned_seq_cst:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr2_acq_rel
%r = atomicrmw and ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_and_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_and_i32_aligned_monotonic:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr4_relax
;
; -O1-LABEL: atomicrmw_and_i32_aligned_monotonic:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr4_relax
%r = atomicrmw and ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_and_i32_aligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_and_i32_aligned_acquire:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr4_acq
;
; -O1-LABEL: atomicrmw_and_i32_aligned_acquire:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr4_acq
%r = atomicrmw and ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_and_i32_aligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_and_i32_aligned_release:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr4_rel
;
; -O1-LABEL: atomicrmw_and_i32_aligned_release:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr4_rel
%r = atomicrmw and ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_and_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_and_i32_aligned_acq_rel:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr4_acq_rel
;
; -O1-LABEL: atomicrmw_and_i32_aligned_acq_rel:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr4_acq_rel
%r = atomicrmw and ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_and_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_and_i32_aligned_seq_cst:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr4_acq_rel
;
; -O1-LABEL: atomicrmw_and_i32_aligned_seq_cst:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr4_acq_rel
%r = atomicrmw and ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_and_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_and_i64_aligned_monotonic:
; -O0: eor x0, x8, x9
; -O0: bl __aarch64_ldclr8_relax
;
; -O1-LABEL: atomicrmw_and_i64_aligned_monotonic:
; -O1: mvn x0, x1
; -O1: bl __aarch64_ldclr8_relax
%r = atomicrmw and ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_and_i64_aligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_and_i64_aligned_acquire:
; -O0: eor x0, x8, x9
; -O0: bl __aarch64_ldclr8_acq
;
; -O1-LABEL: atomicrmw_and_i64_aligned_acquire:
; -O1: mvn x0, x1
; -O1: bl __aarch64_ldclr8_acq
%r = atomicrmw and ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_and_i64_aligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_and_i64_aligned_release:
; -O0: eor x0, x8, x9
; -O0: bl __aarch64_ldclr8_rel
;
; -O1-LABEL: atomicrmw_and_i64_aligned_release:
; -O1: mvn x0, x1
; -O1: bl __aarch64_ldclr8_rel
%r = atomicrmw and ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_and_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_and_i64_aligned_acq_rel:
; -O0: eor x0, x8, x9
; -O0: bl __aarch64_ldclr8_acq_rel
;
; -O1-LABEL: atomicrmw_and_i64_aligned_acq_rel:
; -O1: mvn x0, x1
; -O1: bl __aarch64_ldclr8_acq_rel
%r = atomicrmw and ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_and_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_and_i64_aligned_seq_cst:
; -O0: eor x0, x8, x9
; -O0: bl __aarch64_ldclr8_acq_rel
;
; -O1-LABEL: atomicrmw_and_i64_aligned_seq_cst:
; -O1: mvn x0, x1
; -O1: bl __aarch64_ldclr8_acq_rel
%r = atomicrmw and ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_and_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_and_i128_aligned_monotonic:
; -O0: and x2, x8, x10
; -O0: and x3, x8, x9
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_and_i128_aligned_monotonic:
; -O1: ldxp x0, x1, [x8]
; -O1: and x9, x1, x3
; -O1: and x10, x0, x2
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw and ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_and_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_and_i128_aligned_acquire:
; -O0: and x2, x8, x10
; -O0: and x3, x8, x9
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_and_i128_aligned_acquire:
; -O1: ldaxp x0, x1, [x8]
; -O1: and x9, x1, x3
; -O1: and x10, x0, x2
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw and ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_and_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_and_i128_aligned_release:
; -O0: and x2, x8, x10
; -O0: and x3, x8, x9
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_and_i128_aligned_release:
; -O1: ldxp x0, x1, [x8]
; -O1: and x9, x1, x3
; -O1: and x10, x0, x2
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw and ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_and_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_and_i128_aligned_acq_rel:
; -O0: and x2, x8, x10
; -O0: and x3, x8, x9
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_and_i128_aligned_acq_rel:
; -O1: ldaxp x0, x1, [x8]
; -O1: and x9, x1, x3
; -O1: and x10, x0, x2
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw and ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_and_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_and_i128_aligned_seq_cst:
; -O0: and x2, x8, x10
; -O0: and x3, x8, x9
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_and_i128_aligned_seq_cst:
; -O1: ldaxp x0, x1, [x8]
; -O1: and x9, x1, x3
; -O1: and x10, x0, x2
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw and ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_and_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_and_i8_unaligned_monotonic:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr1_relax
;
; -O1-LABEL: atomicrmw_and_i8_unaligned_monotonic:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr1_relax
%r = atomicrmw and ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_and_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_and_i8_unaligned_acquire:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr1_acq
;
; -O1-LABEL: atomicrmw_and_i8_unaligned_acquire:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr1_acq
%r = atomicrmw and ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_and_i8_unaligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_and_i8_unaligned_release:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr1_rel
;
; -O1-LABEL: atomicrmw_and_i8_unaligned_release:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr1_rel
%r = atomicrmw and ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_and_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_and_i8_unaligned_acq_rel:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr1_acq_rel
;
; -O1-LABEL: atomicrmw_and_i8_unaligned_acq_rel:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr1_acq_rel
%r = atomicrmw and ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_and_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_and_i8_unaligned_seq_cst:
; -O0: eor w0, w8, w9
; -O0: bl __aarch64_ldclr1_acq_rel
;
; -O1-LABEL: atomicrmw_and_i8_unaligned_seq_cst:
; -O1: mvn w0, w1
; -O1: bl __aarch64_ldclr1_acq_rel
%r = atomicrmw and ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_and_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_and_i16_unaligned_monotonic:
; -O0: and w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i16_unaligned_monotonic:
; -O1: and w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_and_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_and_i16_unaligned_acquire:
; -O0: and w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i16_unaligned_acquire:
; -O1: and w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_and_i16_unaligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_and_i16_unaligned_release:
; -O0: and w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i16_unaligned_release:
; -O1: and w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_and_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_and_i16_unaligned_acq_rel:
; -O0: and w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i16_unaligned_acq_rel:
; -O1: and w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_and_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_and_i16_unaligned_seq_cst:
; -O0: and w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i16_unaligned_seq_cst:
; -O1: and w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_and_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_and_i32_unaligned_monotonic:
; -O0: and w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i32_unaligned_monotonic:
; -O1: and w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_and_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_and_i32_unaligned_acquire:
; -O0: and w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i32_unaligned_acquire:
; -O1: and w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_and_i32_unaligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_and_i32_unaligned_release:
; -O0: and w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i32_unaligned_release:
; -O1: and w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_and_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_and_i32_unaligned_acq_rel:
; -O0: and w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i32_unaligned_acq_rel:
; -O1: and w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_and_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_and_i32_unaligned_seq_cst:
; -O0: and w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i32_unaligned_seq_cst:
; -O1: and w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_and_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_and_i64_unaligned_monotonic:
; -O0: and x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i64_unaligned_monotonic:
; -O1: and x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_and_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_and_i64_unaligned_acquire:
; -O0: and x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i64_unaligned_acquire:
; -O1: and x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_and_i64_unaligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_and_i64_unaligned_release:
; -O0: and x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i64_unaligned_release:
; -O1: and x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_and_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_and_i64_unaligned_acq_rel:
; -O0: and x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i64_unaligned_acq_rel:
; -O1: and x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_and_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_and_i64_unaligned_seq_cst:
; -O0: and x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i64_unaligned_seq_cst:
; -O1: and x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_and_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_and_i128_unaligned_monotonic:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i128_unaligned_monotonic:
; -O1: ldp x0, x1, [x0]
; -O1: and x8, x1, x19
; -O1: and x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_and_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_and_i128_unaligned_acquire:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i128_unaligned_acquire:
; -O1: ldp x0, x1, [x0]
; -O1: and x8, x1, x19
; -O1: and x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_and_i128_unaligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_and_i128_unaligned_release:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i128_unaligned_release:
; -O1: ldp x0, x1, [x0]
; -O1: and x8, x1, x19
; -O1: and x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_and_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_and_i128_unaligned_acq_rel:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i128_unaligned_acq_rel:
; -O1: ldp x0, x1, [x0]
; -O1: and x8, x1, x19
; -O1: and x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_and_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_and_i128_unaligned_seq_cst:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_and_i128_unaligned_seq_cst:
; -O1: ldp x0, x1, [x0]
; -O1: and x8, x1, x19
; -O1: and x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw and ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}
define dso_local i8 @atomicrmw_nand_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_nand_i8_aligned_monotonic:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas1_relax
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_nand_i8_aligned_monotonic:
; -O1: ldxrb w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_nand_i8_aligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_nand_i8_aligned_acquire:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas1_acq
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_nand_i8_aligned_acquire:
; -O1: ldaxrb w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_nand_i8_aligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_nand_i8_aligned_release:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas1_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_nand_i8_aligned_release:
; -O1: ldxrb w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_nand_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_nand_i8_aligned_acq_rel:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_nand_i8_aligned_acq_rel:
; -O1: ldaxrb w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_nand_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_nand_i8_aligned_seq_cst:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_nand_i8_aligned_seq_cst:
; -O1: ldaxrb w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_nand_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_nand_i16_aligned_monotonic:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas2_relax
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_nand_i16_aligned_monotonic:
; -O1: ldxrh w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stxrh w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_nand_i16_aligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_nand_i16_aligned_acquire:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas2_acq
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_nand_i16_aligned_acquire:
; -O1: ldaxrh w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stxrh w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_nand_i16_aligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_nand_i16_aligned_release:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas2_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_nand_i16_aligned_release:
; -O1: ldxrh w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxrh w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_nand_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_nand_i16_aligned_acq_rel:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas2_acq_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_nand_i16_aligned_acq_rel:
; -O1: ldaxrh w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxrh w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_nand_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_nand_i16_aligned_seq_cst:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas2_acq_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_nand_i16_aligned_seq_cst:
; -O1: ldaxrh w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxrh w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_nand_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_nand_i32_aligned_monotonic:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas4_relax
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_nand_i32_aligned_monotonic:
; -O1: ldxr w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stxr w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_nand_i32_aligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_nand_i32_aligned_acquire:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas4_acq
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_nand_i32_aligned_acquire:
; -O1: ldaxr w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stxr w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_nand_i32_aligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_nand_i32_aligned_release:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas4_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_nand_i32_aligned_release:
; -O1: ldxr w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_nand_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_nand_i32_aligned_acq_rel:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas4_acq_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_nand_i32_aligned_acq_rel:
; -O1: ldaxr w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_nand_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_nand_i32_aligned_seq_cst:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas4_acq_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_nand_i32_aligned_seq_cst:
; -O1: ldaxr w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_nand_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_nand_i64_aligned_monotonic:
; -O0: and x8, x0, x8
; -O0: mvn x1, x8
; -O0: bl __aarch64_cas8_relax
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_nand_i64_aligned_monotonic:
; -O1: ldxr x0, [x8]
; -O1: and x9, x0, x1
; -O1: mvn x9, x9
; -O1: stxr w10, x9, [x8]
%r = atomicrmw nand ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_nand_i64_aligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_nand_i64_aligned_acquire:
; -O0: and x8, x0, x8
; -O0: mvn x1, x8
; -O0: bl __aarch64_cas8_acq
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_nand_i64_aligned_acquire:
; -O1: ldaxr x0, [x8]
; -O1: and x9, x0, x1
; -O1: mvn x9, x9
; -O1: stxr w10, x9, [x8]
%r = atomicrmw nand ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_nand_i64_aligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_nand_i64_aligned_release:
; -O0: and x8, x0, x8
; -O0: mvn x1, x8
; -O0: bl __aarch64_cas8_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_nand_i64_aligned_release:
; -O1: ldxr x0, [x8]
; -O1: and x9, x0, x1
; -O1: mvn x9, x9
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw nand ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_nand_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_nand_i64_aligned_acq_rel:
; -O0: and x8, x0, x8
; -O0: mvn x1, x8
; -O0: bl __aarch64_cas8_acq_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_nand_i64_aligned_acq_rel:
; -O1: ldaxr x0, [x8]
; -O1: and x9, x0, x1
; -O1: mvn x9, x9
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw nand ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_nand_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_nand_i64_aligned_seq_cst:
; -O0: and x8, x0, x8
; -O0: mvn x1, x8
; -O0: bl __aarch64_cas8_acq_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_nand_i64_aligned_seq_cst:
; -O1: ldaxr x0, [x8]
; -O1: and x9, x0, x1
; -O1: mvn x9, x9
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw nand ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_nand_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_nand_i128_aligned_monotonic:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: mvn x2, x9
; -O0: mvn x3, x8
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_nand_i128_aligned_monotonic:
; -O1: ldxp x0, x1, [x8]
; -O1: and x9, x0, x2
; -O1: and x10, x1, x3
; -O1: mvn x10, x10
; -O1: mvn x9, x9
; -O1: stxp w11, x9, x10, [x8]
%r = atomicrmw nand ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_nand_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_nand_i128_aligned_acquire:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: mvn x2, x9
; -O0: mvn x3, x8
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_nand_i128_aligned_acquire:
; -O1: ldaxp x0, x1, [x8]
; -O1: and x9, x0, x2
; -O1: and x10, x1, x3
; -O1: mvn x10, x10
; -O1: mvn x9, x9
; -O1: stxp w11, x9, x10, [x8]
%r = atomicrmw nand ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_nand_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_nand_i128_aligned_release:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: mvn x2, x9
; -O0: mvn x3, x8
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_nand_i128_aligned_release:
; -O1: ldxp x0, x1, [x8]
; -O1: and x9, x0, x2
; -O1: and x10, x1, x3
; -O1: mvn x10, x10
; -O1: mvn x9, x9
; -O1: stlxp w11, x9, x10, [x8]
%r = atomicrmw nand ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_nand_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_nand_i128_aligned_acq_rel:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: mvn x2, x9
; -O0: mvn x3, x8
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_nand_i128_aligned_acq_rel:
; -O1: ldaxp x0, x1, [x8]
; -O1: and x9, x0, x2
; -O1: and x10, x1, x3
; -O1: mvn x10, x10
; -O1: mvn x9, x9
; -O1: stlxp w11, x9, x10, [x8]
%r = atomicrmw nand ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_nand_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_nand_i128_aligned_seq_cst:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: mvn x2, x9
; -O0: mvn x3, x8
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_nand_i128_aligned_seq_cst:
; -O1: ldaxp x0, x1, [x8]
; -O1: and x9, x0, x2
; -O1: and x10, x1, x3
; -O1: mvn x10, x10
; -O1: mvn x9, x9
; -O1: stlxp w11, x9, x10, [x8]
%r = atomicrmw nand ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_nand_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_nand_i8_unaligned_monotonic:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas1_relax
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_nand_i8_unaligned_monotonic:
; -O1: ldxrb w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_nand_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_nand_i8_unaligned_acquire:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas1_acq
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_nand_i8_unaligned_acquire:
; -O1: ldaxrb w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_nand_i8_unaligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_nand_i8_unaligned_release:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas1_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_nand_i8_unaligned_release:
; -O1: ldxrb w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_nand_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_nand_i8_unaligned_acq_rel:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_nand_i8_unaligned_acq_rel:
; -O1: ldaxrb w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_nand_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_nand_i8_unaligned_seq_cst:
; -O0: and w8, w0, w8
; -O0: mvn w1, w8
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_nand_i8_unaligned_seq_cst:
; -O1: ldaxrb w8, [x0]
; -O1: and w9, w8, w1
; -O1: mvn w9, w9
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw nand ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_nand_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_nand_i16_unaligned_monotonic:
; -O0: and w8, w9, w8
; -O0: mvn w8, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i16_unaligned_monotonic:
; -O1: and w8, w0, w20
; -O1: mvn w8, w8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_nand_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_nand_i16_unaligned_acquire:
; -O0: and w8, w9, w8
; -O0: mvn w8, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i16_unaligned_acquire:
; -O1: and w8, w0, w20
; -O1: mvn w8, w8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_nand_i16_unaligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_nand_i16_unaligned_release:
; -O0: and w8, w9, w8
; -O0: mvn w8, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i16_unaligned_release:
; -O1: and w8, w0, w20
; -O1: mvn w8, w8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_nand_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_nand_i16_unaligned_acq_rel:
; -O0: and w8, w9, w8
; -O0: mvn w8, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i16_unaligned_acq_rel:
; -O1: and w8, w0, w20
; -O1: mvn w8, w8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_nand_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_nand_i16_unaligned_seq_cst:
; -O0: and w8, w9, w8
; -O0: mvn w8, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i16_unaligned_seq_cst:
; -O1: and w8, w0, w20
; -O1: mvn w8, w8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_nand_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_nand_i32_unaligned_monotonic:
; -O0: and w8, w9, w8
; -O0: mvn w8, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i32_unaligned_monotonic:
; -O1: and w8, w0, w20
; -O1: mvn w8, w8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_nand_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_nand_i32_unaligned_acquire:
; -O0: and w8, w9, w8
; -O0: mvn w8, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i32_unaligned_acquire:
; -O1: and w8, w0, w20
; -O1: mvn w8, w8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_nand_i32_unaligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_nand_i32_unaligned_release:
; -O0: and w8, w9, w8
; -O0: mvn w8, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i32_unaligned_release:
; -O1: and w8, w0, w20
; -O1: mvn w8, w8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_nand_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_nand_i32_unaligned_acq_rel:
; -O0: and w8, w9, w8
; -O0: mvn w8, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i32_unaligned_acq_rel:
; -O1: and w8, w0, w20
; -O1: mvn w8, w8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_nand_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_nand_i32_unaligned_seq_cst:
; -O0: and w8, w9, w8
; -O0: mvn w8, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i32_unaligned_seq_cst:
; -O1: and w8, w0, w20
; -O1: mvn w8, w8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_nand_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_nand_i64_unaligned_monotonic:
; -O0: and x8, x9, x8
; -O0: mvn x8, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i64_unaligned_monotonic:
; -O1: and x8, x0, x20
; -O1: mvn x8, x8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_nand_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_nand_i64_unaligned_acquire:
; -O0: and x8, x9, x8
; -O0: mvn x8, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i64_unaligned_acquire:
; -O1: and x8, x0, x20
; -O1: mvn x8, x8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_nand_i64_unaligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_nand_i64_unaligned_release:
; -O0: and x8, x9, x8
; -O0: mvn x8, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i64_unaligned_release:
; -O1: and x8, x0, x20
; -O1: mvn x8, x8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_nand_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_nand_i64_unaligned_acq_rel:
; -O0: and x8, x9, x8
; -O0: mvn x8, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i64_unaligned_acq_rel:
; -O1: and x8, x0, x20
; -O1: mvn x8, x8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_nand_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_nand_i64_unaligned_seq_cst:
; -O0: and x8, x9, x8
; -O0: mvn x8, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i64_unaligned_seq_cst:
; -O1: and x8, x0, x20
; -O1: mvn x8, x8
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_nand_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_nand_i128_unaligned_monotonic:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: mvn x9, x9
; -O0: mvn x8, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i128_unaligned_monotonic:
; -O1: ldp x0, x1, [x0]
; -O1: and x8, x1, x19
; -O1: and x9, x0, x21
; -O1: mvn x8, x8
; -O1: mvn x9, x9
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_nand_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_nand_i128_unaligned_acquire:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: mvn x9, x9
; -O0: mvn x8, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i128_unaligned_acquire:
; -O1: ldp x0, x1, [x0]
; -O1: and x8, x1, x19
; -O1: and x9, x0, x21
; -O1: mvn x8, x8
; -O1: mvn x9, x9
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_nand_i128_unaligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_nand_i128_unaligned_release:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: mvn x9, x9
; -O0: mvn x8, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i128_unaligned_release:
; -O1: ldp x0, x1, [x0]
; -O1: and x8, x1, x19
; -O1: and x9, x0, x21
; -O1: mvn x8, x8
; -O1: mvn x9, x9
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_nand_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_nand_i128_unaligned_acq_rel:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: mvn x9, x9
; -O0: mvn x8, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i128_unaligned_acq_rel:
; -O1: ldp x0, x1, [x0]
; -O1: and x8, x1, x19
; -O1: and x9, x0, x21
; -O1: mvn x8, x8
; -O1: mvn x9, x9
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_nand_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_nand_i128_unaligned_seq_cst:
; -O0: and x9, x8, x9
; -O0: and x8, x8, x10
; -O0: mvn x9, x9
; -O0: mvn x8, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_nand_i128_unaligned_seq_cst:
; -O1: ldp x0, x1, [x0]
; -O1: and x8, x1, x19
; -O1: and x9, x0, x21
; -O1: mvn x8, x8
; -O1: mvn x9, x9
; -O1: bl __atomic_compare_exchange
%r = atomicrmw nand ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}
define dso_local i8 @atomicrmw_or_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_or_i8_aligned_monotonic:
; CHECK: bl __aarch64_ldset1_relax
%r = atomicrmw or ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_or_i8_aligned_acquire(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_or_i8_aligned_acquire:
; CHECK: bl __aarch64_ldset1_acq
%r = atomicrmw or ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_or_i8_aligned_release(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_or_i8_aligned_release:
; CHECK: bl __aarch64_ldset1_rel
%r = atomicrmw or ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_or_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_or_i8_aligned_acq_rel:
; CHECK: bl __aarch64_ldset1_acq_rel
%r = atomicrmw or ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_or_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_or_i8_aligned_seq_cst:
; CHECK: bl __aarch64_ldset1_acq_rel
%r = atomicrmw or ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_or_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_or_i16_aligned_monotonic:
; CHECK: bl __aarch64_ldset2_relax
%r = atomicrmw or ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_or_i16_aligned_acquire(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_or_i16_aligned_acquire:
; CHECK: bl __aarch64_ldset2_acq
%r = atomicrmw or ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_or_i16_aligned_release(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_or_i16_aligned_release:
; CHECK: bl __aarch64_ldset2_rel
%r = atomicrmw or ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_or_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_or_i16_aligned_acq_rel:
; CHECK: bl __aarch64_ldset2_acq_rel
%r = atomicrmw or ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_or_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_or_i16_aligned_seq_cst:
; CHECK: bl __aarch64_ldset2_acq_rel
%r = atomicrmw or ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_or_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_or_i32_aligned_monotonic:
; CHECK: bl __aarch64_ldset4_relax
%r = atomicrmw or ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_or_i32_aligned_acquire(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_or_i32_aligned_acquire:
; CHECK: bl __aarch64_ldset4_acq
%r = atomicrmw or ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_or_i32_aligned_release(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_or_i32_aligned_release:
; CHECK: bl __aarch64_ldset4_rel
%r = atomicrmw or ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_or_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_or_i32_aligned_acq_rel:
; CHECK: bl __aarch64_ldset4_acq_rel
%r = atomicrmw or ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_or_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_or_i32_aligned_seq_cst:
; CHECK: bl __aarch64_ldset4_acq_rel
%r = atomicrmw or ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_or_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_or_i64_aligned_monotonic:
; CHECK: bl __aarch64_ldset8_relax
%r = atomicrmw or ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_or_i64_aligned_acquire(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_or_i64_aligned_acquire:
; CHECK: bl __aarch64_ldset8_acq
%r = atomicrmw or ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_or_i64_aligned_release(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_or_i64_aligned_release:
; CHECK: bl __aarch64_ldset8_rel
%r = atomicrmw or ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_or_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_or_i64_aligned_acq_rel:
; CHECK: bl __aarch64_ldset8_acq_rel
%r = atomicrmw or ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_or_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_or_i64_aligned_seq_cst:
; CHECK: bl __aarch64_ldset8_acq_rel
%r = atomicrmw or ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_or_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_or_i128_aligned_monotonic:
; -O0: orr x2, x8, x10
; -O0: orr x3, x8, x9
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_or_i128_aligned_monotonic:
; -O1: ldxp x0, x1, [x8]
; -O1: orr x9, x1, x3
; -O1: orr x10, x0, x2
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw or ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_or_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_or_i128_aligned_acquire:
; -O0: orr x2, x8, x10
; -O0: orr x3, x8, x9
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_or_i128_aligned_acquire:
; -O1: ldaxp x0, x1, [x8]
; -O1: orr x9, x1, x3
; -O1: orr x10, x0, x2
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw or ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_or_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_or_i128_aligned_release:
; -O0: orr x2, x8, x10
; -O0: orr x3, x8, x9
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_or_i128_aligned_release:
; -O1: ldxp x0, x1, [x8]
; -O1: orr x9, x1, x3
; -O1: orr x10, x0, x2
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw or ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_or_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_or_i128_aligned_acq_rel:
; -O0: orr x2, x8, x10
; -O0: orr x3, x8, x9
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_or_i128_aligned_acq_rel:
; -O1: ldaxp x0, x1, [x8]
; -O1: orr x9, x1, x3
; -O1: orr x10, x0, x2
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw or ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_or_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_or_i128_aligned_seq_cst:
; -O0: orr x2, x8, x10
; -O0: orr x3, x8, x9
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_or_i128_aligned_seq_cst:
; -O1: ldaxp x0, x1, [x8]
; -O1: orr x9, x1, x3
; -O1: orr x10, x0, x2
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw or ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_or_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_or_i8_unaligned_monotonic:
; CHECK: bl __aarch64_ldset1_relax
%r = atomicrmw or ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_or_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_or_i8_unaligned_acquire:
; CHECK: bl __aarch64_ldset1_acq
%r = atomicrmw or ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_or_i8_unaligned_release(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_or_i8_unaligned_release:
; CHECK: bl __aarch64_ldset1_rel
%r = atomicrmw or ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_or_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_or_i8_unaligned_acq_rel:
; CHECK: bl __aarch64_ldset1_acq_rel
%r = atomicrmw or ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_or_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_or_i8_unaligned_seq_cst:
; CHECK: bl __aarch64_ldset1_acq_rel
%r = atomicrmw or ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_or_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_or_i16_unaligned_monotonic:
; -O0: orr w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i16_unaligned_monotonic:
; -O1: orr w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_or_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_or_i16_unaligned_acquire:
; -O0: orr w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i16_unaligned_acquire:
; -O1: orr w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_or_i16_unaligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_or_i16_unaligned_release:
; -O0: orr w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i16_unaligned_release:
; -O1: orr w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_or_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_or_i16_unaligned_acq_rel:
; -O0: orr w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i16_unaligned_acq_rel:
; -O1: orr w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_or_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_or_i16_unaligned_seq_cst:
; -O0: orr w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i16_unaligned_seq_cst:
; -O1: orr w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_or_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_or_i32_unaligned_monotonic:
; -O0: orr w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i32_unaligned_monotonic:
; -O1: orr w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_or_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_or_i32_unaligned_acquire:
; -O0: orr w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i32_unaligned_acquire:
; -O1: orr w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_or_i32_unaligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_or_i32_unaligned_release:
; -O0: orr w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i32_unaligned_release:
; -O1: orr w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_or_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_or_i32_unaligned_acq_rel:
; -O0: orr w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i32_unaligned_acq_rel:
; -O1: orr w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_or_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_or_i32_unaligned_seq_cst:
; -O0: orr w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i32_unaligned_seq_cst:
; -O1: orr w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_or_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_or_i64_unaligned_monotonic:
; -O0: orr x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i64_unaligned_monotonic:
; -O1: orr x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_or_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_or_i64_unaligned_acquire:
; -O0: orr x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i64_unaligned_acquire:
; -O1: orr x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_or_i64_unaligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_or_i64_unaligned_release:
; -O0: orr x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i64_unaligned_release:
; -O1: orr x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_or_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_or_i64_unaligned_acq_rel:
; -O0: orr x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i64_unaligned_acq_rel:
; -O1: orr x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_or_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_or_i64_unaligned_seq_cst:
; -O0: orr x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i64_unaligned_seq_cst:
; -O1: orr x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_or_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_or_i128_unaligned_monotonic:
; -O0: orr x9, x8, x9
; -O0: orr x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i128_unaligned_monotonic:
; -O1: ldp x0, x1, [x0]
; -O1: orr x8, x1, x19
; -O1: orr x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_or_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_or_i128_unaligned_acquire:
; -O0: orr x9, x8, x9
; -O0: orr x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i128_unaligned_acquire:
; -O1: ldp x0, x1, [x0]
; -O1: orr x8, x1, x19
; -O1: orr x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_or_i128_unaligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_or_i128_unaligned_release:
; -O0: orr x9, x8, x9
; -O0: orr x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i128_unaligned_release:
; -O1: ldp x0, x1, [x0]
; -O1: orr x8, x1, x19
; -O1: orr x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_or_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_or_i128_unaligned_acq_rel:
; -O0: orr x9, x8, x9
; -O0: orr x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i128_unaligned_acq_rel:
; -O1: ldp x0, x1, [x0]
; -O1: orr x8, x1, x19
; -O1: orr x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_or_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_or_i128_unaligned_seq_cst:
; -O0: orr x9, x8, x9
; -O0: orr x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_or_i128_unaligned_seq_cst:
; -O1: ldp x0, x1, [x0]
; -O1: orr x8, x1, x19
; -O1: orr x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw or ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}
define dso_local i8 @atomicrmw_xor_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xor_i8_aligned_monotonic:
; CHECK: bl __aarch64_ldeor1_relax
%r = atomicrmw xor ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xor_i8_aligned_acquire(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xor_i8_aligned_acquire:
; CHECK: bl __aarch64_ldeor1_acq
%r = atomicrmw xor ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xor_i8_aligned_release(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xor_i8_aligned_release:
; CHECK: bl __aarch64_ldeor1_rel
%r = atomicrmw xor ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xor_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xor_i8_aligned_acq_rel:
; CHECK: bl __aarch64_ldeor1_acq_rel
%r = atomicrmw xor ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xor_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xor_i8_aligned_seq_cst:
; CHECK: bl __aarch64_ldeor1_acq_rel
%r = atomicrmw xor ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_xor_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xor_i16_aligned_monotonic:
; CHECK: bl __aarch64_ldeor2_relax
%r = atomicrmw xor ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_xor_i16_aligned_acquire(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xor_i16_aligned_acquire:
; CHECK: bl __aarch64_ldeor2_acq
%r = atomicrmw xor ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_xor_i16_aligned_release(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xor_i16_aligned_release:
; CHECK: bl __aarch64_ldeor2_rel
%r = atomicrmw xor ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_xor_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xor_i16_aligned_acq_rel:
; CHECK: bl __aarch64_ldeor2_acq_rel
%r = atomicrmw xor ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_xor_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; CHECK-LABEL: atomicrmw_xor_i16_aligned_seq_cst:
; CHECK: bl __aarch64_ldeor2_acq_rel
%r = atomicrmw xor ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_xor_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xor_i32_aligned_monotonic:
; CHECK: bl __aarch64_ldeor4_relax
%r = atomicrmw xor ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_xor_i32_aligned_acquire(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xor_i32_aligned_acquire:
; CHECK: bl __aarch64_ldeor4_acq
%r = atomicrmw xor ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_xor_i32_aligned_release(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xor_i32_aligned_release:
; CHECK: bl __aarch64_ldeor4_rel
%r = atomicrmw xor ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_xor_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xor_i32_aligned_acq_rel:
; CHECK: bl __aarch64_ldeor4_acq_rel
%r = atomicrmw xor ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_xor_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; CHECK-LABEL: atomicrmw_xor_i32_aligned_seq_cst:
; CHECK: bl __aarch64_ldeor4_acq_rel
%r = atomicrmw xor ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_xor_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xor_i64_aligned_monotonic:
; CHECK: bl __aarch64_ldeor8_relax
%r = atomicrmw xor ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_xor_i64_aligned_acquire(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xor_i64_aligned_acquire:
; CHECK: bl __aarch64_ldeor8_acq
%r = atomicrmw xor ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_xor_i64_aligned_release(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xor_i64_aligned_release:
; CHECK: bl __aarch64_ldeor8_rel
%r = atomicrmw xor ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_xor_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xor_i64_aligned_acq_rel:
; CHECK: bl __aarch64_ldeor8_acq_rel
%r = atomicrmw xor ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_xor_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; CHECK-LABEL: atomicrmw_xor_i64_aligned_seq_cst:
; CHECK: bl __aarch64_ldeor8_acq_rel
%r = atomicrmw xor ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_xor_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xor_i128_aligned_monotonic:
; -O0: eor x2, x8, x10
; -O0: eor x3, x8, x9
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_xor_i128_aligned_monotonic:
; -O1: ldxp x0, x1, [x8]
; -O1: eor x9, x1, x3
; -O1: eor x10, x0, x2
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw xor ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_xor_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xor_i128_aligned_acquire:
; -O0: eor x2, x8, x10
; -O0: eor x3, x8, x9
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_xor_i128_aligned_acquire:
; -O1: ldaxp x0, x1, [x8]
; -O1: eor x9, x1, x3
; -O1: eor x10, x0, x2
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw xor ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_xor_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xor_i128_aligned_release:
; -O0: eor x2, x8, x10
; -O0: eor x3, x8, x9
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_xor_i128_aligned_release:
; -O1: ldxp x0, x1, [x8]
; -O1: eor x9, x1, x3
; -O1: eor x10, x0, x2
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw xor ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_xor_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xor_i128_aligned_acq_rel:
; -O0: eor x2, x8, x10
; -O0: eor x3, x8, x9
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_xor_i128_aligned_acq_rel:
; -O1: ldaxp x0, x1, [x8]
; -O1: eor x9, x1, x3
; -O1: eor x10, x0, x2
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw xor ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_xor_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xor_i128_aligned_seq_cst:
; -O0: eor x2, x8, x10
; -O0: eor x3, x8, x9
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_xor_i128_aligned_seq_cst:
; -O1: ldaxp x0, x1, [x8]
; -O1: eor x9, x1, x3
; -O1: eor x10, x0, x2
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw xor ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_xor_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xor_i8_unaligned_monotonic:
; CHECK: bl __aarch64_ldeor1_relax
%r = atomicrmw xor ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xor_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xor_i8_unaligned_acquire:
; CHECK: bl __aarch64_ldeor1_acq
%r = atomicrmw xor ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xor_i8_unaligned_release(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xor_i8_unaligned_release:
; CHECK: bl __aarch64_ldeor1_rel
%r = atomicrmw xor ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xor_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xor_i8_unaligned_acq_rel:
; CHECK: bl __aarch64_ldeor1_acq_rel
%r = atomicrmw xor ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_xor_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; CHECK-LABEL: atomicrmw_xor_i8_unaligned_seq_cst:
; CHECK: bl __aarch64_ldeor1_acq_rel
%r = atomicrmw xor ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_xor_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_xor_i16_unaligned_monotonic:
; -O0: eor w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i16_unaligned_monotonic:
; -O1: eor w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_xor_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_xor_i16_unaligned_acquire:
; -O0: eor w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i16_unaligned_acquire:
; -O1: eor w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_xor_i16_unaligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_xor_i16_unaligned_release:
; -O0: eor w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i16_unaligned_release:
; -O1: eor w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_xor_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_xor_i16_unaligned_acq_rel:
; -O0: eor w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i16_unaligned_acq_rel:
; -O1: eor w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_xor_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_xor_i16_unaligned_seq_cst:
; -O0: eor w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i16_unaligned_seq_cst:
; -O1: eor w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_xor_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_xor_i32_unaligned_monotonic:
; -O0: eor w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i32_unaligned_monotonic:
; -O1: eor w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_xor_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_xor_i32_unaligned_acquire:
; -O0: eor w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i32_unaligned_acquire:
; -O1: eor w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_xor_i32_unaligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_xor_i32_unaligned_release:
; -O0: eor w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i32_unaligned_release:
; -O1: eor w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_xor_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_xor_i32_unaligned_acq_rel:
; -O0: eor w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i32_unaligned_acq_rel:
; -O1: eor w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_xor_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_xor_i32_unaligned_seq_cst:
; -O0: eor w8, w9, w8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i32_unaligned_seq_cst:
; -O1: eor w8, w0, w20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_xor_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_xor_i64_unaligned_monotonic:
; -O0: eor x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i64_unaligned_monotonic:
; -O1: eor x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_xor_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_xor_i64_unaligned_acquire:
; -O0: eor x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i64_unaligned_acquire:
; -O1: eor x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_xor_i64_unaligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_xor_i64_unaligned_release:
; -O0: eor x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i64_unaligned_release:
; -O1: eor x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_xor_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_xor_i64_unaligned_acq_rel:
; -O0: eor x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i64_unaligned_acq_rel:
; -O1: eor x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_xor_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_xor_i64_unaligned_seq_cst:
; -O0: eor x8, x9, x8
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i64_unaligned_seq_cst:
; -O1: eor x8, x0, x20
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_xor_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xor_i128_unaligned_monotonic:
; -O0: eor x9, x8, x9
; -O0: eor x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i128_unaligned_monotonic:
; -O1: ldp x0, x1, [x0]
; -O1: eor x8, x1, x19
; -O1: eor x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_xor_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xor_i128_unaligned_acquire:
; -O0: eor x9, x8, x9
; -O0: eor x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i128_unaligned_acquire:
; -O1: ldp x0, x1, [x0]
; -O1: eor x8, x1, x19
; -O1: eor x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_xor_i128_unaligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xor_i128_unaligned_release:
; -O0: eor x9, x8, x9
; -O0: eor x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i128_unaligned_release:
; -O1: ldp x0, x1, [x0]
; -O1: eor x8, x1, x19
; -O1: eor x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_xor_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xor_i128_unaligned_acq_rel:
; -O0: eor x9, x8, x9
; -O0: eor x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i128_unaligned_acq_rel:
; -O1: ldp x0, x1, [x0]
; -O1: eor x8, x1, x19
; -O1: eor x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_xor_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_xor_i128_unaligned_seq_cst:
; -O0: eor x9, x8, x9
; -O0: eor x8, x8, x10
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_xor_i128_unaligned_seq_cst:
; -O1: ldp x0, x1, [x0]
; -O1: eor x8, x1, x19
; -O1: eor x9, x0, x21
; -O1: bl __atomic_compare_exchange
%r = atomicrmw xor ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}
define dso_local i8 @atomicrmw_max_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_max_i8_aligned_monotonic:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas1_relax
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_max_i8_aligned_monotonic:
; -O1: ldxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, gt
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_max_i8_aligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_max_i8_aligned_acquire:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas1_acq
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_max_i8_aligned_acquire:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, gt
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_max_i8_aligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_max_i8_aligned_release:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas1_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_max_i8_aligned_release:
; -O1: ldxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, gt
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_max_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_max_i8_aligned_acq_rel:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_max_i8_aligned_acq_rel:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, gt
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_max_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_max_i8_aligned_seq_cst:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_max_i8_aligned_seq_cst:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, gt
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_max_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_max_i16_aligned_monotonic:
; -O0: sxth w9, w0
; -O0: subs w9, w9, w8, sxth
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas2_relax
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_max_i16_aligned_monotonic:
; -O1: ldxrh w9, [x0]
; -O1: sxth w8, w9
; -O1: cmp w8, w1, sxth
; -O1: csel w9, w9, w1, gt
; -O1: stxrh w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_max_i16_aligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_max_i16_aligned_acquire:
; -O0: sxth w9, w0
; -O0: subs w9, w9, w8, sxth
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas2_acq
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_max_i16_aligned_acquire:
; -O1: ldaxrh w9, [x0]
; -O1: sxth w8, w9
; -O1: cmp w8, w1, sxth
; -O1: csel w9, w9, w1, gt
; -O1: stxrh w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_max_i16_aligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_max_i16_aligned_release:
; -O0: sxth w9, w0
; -O0: subs w9, w9, w8, sxth
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas2_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_max_i16_aligned_release:
; -O1: ldxrh w9, [x0]
; -O1: sxth w8, w9
; -O1: cmp w8, w1, sxth
; -O1: csel w9, w9, w1, gt
; -O1: stlxrh w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_max_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_max_i16_aligned_acq_rel:
; -O0: sxth w9, w0
; -O0: subs w9, w9, w8, sxth
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas2_acq_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_max_i16_aligned_acq_rel:
; -O1: ldaxrh w9, [x0]
; -O1: sxth w8, w9
; -O1: cmp w8, w1, sxth
; -O1: csel w9, w9, w1, gt
; -O1: stlxrh w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_max_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_max_i16_aligned_seq_cst:
; -O0: sxth w9, w0
; -O0: subs w9, w9, w8, sxth
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas2_acq_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_max_i16_aligned_seq_cst:
; -O1: ldaxrh w9, [x0]
; -O1: sxth w8, w9
; -O1: cmp w8, w1, sxth
; -O1: csel w9, w9, w1, gt
; -O1: stlxrh w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_max_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_max_i32_aligned_monotonic:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas4_relax
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_max_i32_aligned_monotonic:
; -O1: ldxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, gt
; -O1: stxr w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_max_i32_aligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_max_i32_aligned_acquire:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas4_acq
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_max_i32_aligned_acquire:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, gt
; -O1: stxr w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_max_i32_aligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_max_i32_aligned_release:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas4_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_max_i32_aligned_release:
; -O1: ldxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, gt
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_max_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_max_i32_aligned_acq_rel:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas4_acq_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_max_i32_aligned_acq_rel:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, gt
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_max_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_max_i32_aligned_seq_cst:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas4_acq_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_max_i32_aligned_seq_cst:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, gt
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_max_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_max_i64_aligned_monotonic:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, gt
; -O0: bl __aarch64_cas8_relax
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_max_i64_aligned_monotonic:
; -O1: ldxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, gt
; -O1: stxr w10, x9, [x8]
%r = atomicrmw max ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_max_i64_aligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_max_i64_aligned_acquire:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, gt
; -O0: bl __aarch64_cas8_acq
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_max_i64_aligned_acquire:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, gt
; -O1: stxr w10, x9, [x8]
%r = atomicrmw max ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_max_i64_aligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_max_i64_aligned_release:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, gt
; -O0: bl __aarch64_cas8_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_max_i64_aligned_release:
; -O1: ldxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, gt
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw max ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_max_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_max_i64_aligned_acq_rel:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, gt
; -O0: bl __aarch64_cas8_acq_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_max_i64_aligned_acq_rel:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, gt
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw max ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_max_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_max_i64_aligned_seq_cst:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, gt
; -O0: bl __aarch64_cas8_acq_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_max_i64_aligned_seq_cst:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, gt
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw max ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_max_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_max_i128_aligned_monotonic:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_max_i128_aligned_monotonic:
; -O1: ldxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, lt
; -O1: csel x10, x0, x2, lt
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw max ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_max_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_max_i128_aligned_acquire:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_max_i128_aligned_acquire:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, lt
; -O1: csel x10, x0, x2, lt
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw max ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_max_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_max_i128_aligned_release:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_max_i128_aligned_release:
; -O1: ldxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, lt
; -O1: csel x10, x0, x2, lt
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw max ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_max_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_max_i128_aligned_acq_rel:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_max_i128_aligned_acq_rel:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, lt
; -O1: csel x10, x0, x2, lt
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw max ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_max_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_max_i128_aligned_seq_cst:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_max_i128_aligned_seq_cst:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, lt
; -O1: csel x10, x0, x2, lt
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw max ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_max_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_max_i8_unaligned_monotonic:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas1_relax
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_max_i8_unaligned_monotonic:
; -O1: ldxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, gt
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_max_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_max_i8_unaligned_acquire:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas1_acq
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_max_i8_unaligned_acquire:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, gt
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_max_i8_unaligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_max_i8_unaligned_release:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas1_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_max_i8_unaligned_release:
; -O1: ldxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, gt
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_max_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_max_i8_unaligned_acq_rel:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_max_i8_unaligned_acq_rel:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, gt
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_max_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_max_i8_unaligned_seq_cst:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, gt
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_max_i8_unaligned_seq_cst:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, gt
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw max ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_max_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_max_i16_unaligned_monotonic:
; -O0: sxth w10, w9
; -O0: subs w10, w10, w8, sxth
; -O0: csel w8, w9, w8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i16_unaligned_monotonic:
; -O1: sxth w8, w0
; -O1: cmp w8, w20, sxth
; -O1: csel w8, w0, w20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_max_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_max_i16_unaligned_acquire:
; -O0: sxth w10, w9
; -O0: subs w10, w10, w8, sxth
; -O0: csel w8, w9, w8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i16_unaligned_acquire:
; -O1: sxth w8, w0
; -O1: cmp w8, w20, sxth
; -O1: csel w8, w0, w20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_max_i16_unaligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_max_i16_unaligned_release:
; -O0: sxth w10, w9
; -O0: subs w10, w10, w8, sxth
; -O0: csel w8, w9, w8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i16_unaligned_release:
; -O1: sxth w8, w0
; -O1: cmp w8, w20, sxth
; -O1: csel w8, w0, w20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_max_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_max_i16_unaligned_acq_rel:
; -O0: sxth w10, w9
; -O0: subs w10, w10, w8, sxth
; -O0: csel w8, w9, w8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i16_unaligned_acq_rel:
; -O1: sxth w8, w0
; -O1: cmp w8, w20, sxth
; -O1: csel w8, w0, w20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_max_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_max_i16_unaligned_seq_cst:
; -O0: sxth w10, w9
; -O0: subs w10, w10, w8, sxth
; -O0: csel w8, w9, w8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i16_unaligned_seq_cst:
; -O1: sxth w8, w0
; -O1: cmp w8, w20, sxth
; -O1: csel w8, w0, w20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_max_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_max_i32_unaligned_monotonic:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i32_unaligned_monotonic:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_max_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_max_i32_unaligned_acquire:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i32_unaligned_acquire:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_max_i32_unaligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_max_i32_unaligned_release:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i32_unaligned_release:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_max_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_max_i32_unaligned_acq_rel:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i32_unaligned_acq_rel:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_max_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_max_i32_unaligned_seq_cst:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i32_unaligned_seq_cst:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_max_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_max_i64_unaligned_monotonic:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i64_unaligned_monotonic:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_max_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_max_i64_unaligned_acquire:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i64_unaligned_acquire:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_max_i64_unaligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_max_i64_unaligned_release:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i64_unaligned_release:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_max_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_max_i64_unaligned_acq_rel:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i64_unaligned_acq_rel:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_max_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_max_i64_unaligned_seq_cst:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, gt
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i64_unaligned_seq_cst:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, gt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_max_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_max_i128_unaligned_monotonic:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i128_unaligned_monotonic:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, lt
; -O1: csel x9, x0, x21, lt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_max_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_max_i128_unaligned_acquire:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i128_unaligned_acquire:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, lt
; -O1: csel x9, x0, x21, lt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_max_i128_unaligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_max_i128_unaligned_release:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i128_unaligned_release:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, lt
; -O1: csel x9, x0, x21, lt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_max_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_max_i128_unaligned_acq_rel:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i128_unaligned_acq_rel:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, lt
; -O1: csel x9, x0, x21, lt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_max_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_max_i128_unaligned_seq_cst:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_max_i128_unaligned_seq_cst:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, lt
; -O1: csel x9, x0, x21, lt
; -O1: bl __atomic_compare_exchange
%r = atomicrmw max ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}
define dso_local i8 @atomicrmw_min_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_min_i8_aligned_monotonic:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas1_relax
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_min_i8_aligned_monotonic:
; -O1: ldxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, le
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_min_i8_aligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_min_i8_aligned_acquire:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas1_acq
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_min_i8_aligned_acquire:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, le
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_min_i8_aligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_min_i8_aligned_release:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas1_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_min_i8_aligned_release:
; -O1: ldxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, le
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_min_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_min_i8_aligned_acq_rel:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_min_i8_aligned_acq_rel:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, le
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_min_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_min_i8_aligned_seq_cst:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_min_i8_aligned_seq_cst:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, le
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_min_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_min_i16_aligned_monotonic:
; -O0: sxth w9, w0
; -O0: subs w9, w9, w8, sxth
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas2_relax
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_min_i16_aligned_monotonic:
; -O1: ldxrh w9, [x0]
; -O1: sxth w8, w9
; -O1: cmp w8, w1, sxth
; -O1: csel w9, w9, w1, le
; -O1: stxrh w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_min_i16_aligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_min_i16_aligned_acquire:
; -O0: sxth w9, w0
; -O0: subs w9, w9, w8, sxth
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas2_acq
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_min_i16_aligned_acquire:
; -O1: ldaxrh w9, [x0]
; -O1: sxth w8, w9
; -O1: cmp w8, w1, sxth
; -O1: csel w9, w9, w1, le
; -O1: stxrh w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_min_i16_aligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_min_i16_aligned_release:
; -O0: sxth w9, w0
; -O0: subs w9, w9, w8, sxth
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas2_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_min_i16_aligned_release:
; -O1: ldxrh w9, [x0]
; -O1: sxth w8, w9
; -O1: cmp w8, w1, sxth
; -O1: csel w9, w9, w1, le
; -O1: stlxrh w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_min_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_min_i16_aligned_acq_rel:
; -O0: sxth w9, w0
; -O0: subs w9, w9, w8, sxth
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas2_acq_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_min_i16_aligned_acq_rel:
; -O1: ldaxrh w9, [x0]
; -O1: sxth w8, w9
; -O1: cmp w8, w1, sxth
; -O1: csel w9, w9, w1, le
; -O1: stlxrh w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_min_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_min_i16_aligned_seq_cst:
; -O0: sxth w9, w0
; -O0: subs w9, w9, w8, sxth
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas2_acq_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_min_i16_aligned_seq_cst:
; -O1: ldaxrh w9, [x0]
; -O1: sxth w8, w9
; -O1: cmp w8, w1, sxth
; -O1: csel w9, w9, w1, le
; -O1: stlxrh w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_min_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_min_i32_aligned_monotonic:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas4_relax
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_min_i32_aligned_monotonic:
; -O1: ldxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, le
; -O1: stxr w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_min_i32_aligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_min_i32_aligned_acquire:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas4_acq
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_min_i32_aligned_acquire:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, le
; -O1: stxr w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_min_i32_aligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_min_i32_aligned_release:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas4_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_min_i32_aligned_release:
; -O1: ldxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, le
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_min_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_min_i32_aligned_acq_rel:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas4_acq_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_min_i32_aligned_acq_rel:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, le
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_min_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_min_i32_aligned_seq_cst:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas4_acq_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_min_i32_aligned_seq_cst:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, le
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_min_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_min_i64_aligned_monotonic:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, le
; -O0: bl __aarch64_cas8_relax
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_min_i64_aligned_monotonic:
; -O1: ldxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, le
; -O1: stxr w10, x9, [x8]
%r = atomicrmw min ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_min_i64_aligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_min_i64_aligned_acquire:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, le
; -O0: bl __aarch64_cas8_acq
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_min_i64_aligned_acquire:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, le
; -O1: stxr w10, x9, [x8]
%r = atomicrmw min ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_min_i64_aligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_min_i64_aligned_release:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, le
; -O0: bl __aarch64_cas8_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_min_i64_aligned_release:
; -O1: ldxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, le
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw min ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_min_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_min_i64_aligned_acq_rel:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, le
; -O0: bl __aarch64_cas8_acq_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_min_i64_aligned_acq_rel:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, le
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw min ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_min_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_min_i64_aligned_seq_cst:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, le
; -O0: bl __aarch64_cas8_acq_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_min_i64_aligned_seq_cst:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, le
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw min ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_min_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_min_i128_aligned_monotonic:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_min_i128_aligned_monotonic:
; -O1: ldxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, ge
; -O1: csel x10, x0, x2, ge
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw min ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_min_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_min_i128_aligned_acquire:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_min_i128_aligned_acquire:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, ge
; -O1: csel x10, x0, x2, ge
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw min ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_min_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_min_i128_aligned_release:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_min_i128_aligned_release:
; -O1: ldxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, ge
; -O1: csel x10, x0, x2, ge
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw min ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_min_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_min_i128_aligned_acq_rel:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_min_i128_aligned_acq_rel:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, ge
; -O1: csel x10, x0, x2, ge
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw min ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_min_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_min_i128_aligned_seq_cst:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_min_i128_aligned_seq_cst:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, ge
; -O1: csel x10, x0, x2, ge
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw min ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_min_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_min_i8_unaligned_monotonic:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas1_relax
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_min_i8_unaligned_monotonic:
; -O1: ldxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, le
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_min_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_min_i8_unaligned_acquire:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas1_acq
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_min_i8_unaligned_acquire:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, le
; -O1: stxrb w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_min_i8_unaligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_min_i8_unaligned_release:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas1_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_min_i8_unaligned_release:
; -O1: ldxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, le
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_min_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_min_i8_unaligned_acq_rel:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_min_i8_unaligned_acq_rel:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, le
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_min_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_min_i8_unaligned_seq_cst:
; -O0: sxtb w9, w0
; -O0: subs w9, w9, w8, sxtb
; -O0: csel w1, w0, w8, le
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_min_i8_unaligned_seq_cst:
; -O1: ldaxrb w9, [x0]
; -O1: sxtb w8, w9
; -O1: cmp w8, w1, sxtb
; -O1: csel w9, w9, w1, le
; -O1: stlxrb w10, w9, [x0]
%r = atomicrmw min ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_min_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_min_i16_unaligned_monotonic:
; -O0: sxth w10, w9
; -O0: subs w10, w10, w8, sxth
; -O0: csel w8, w9, w8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i16_unaligned_monotonic:
; -O1: sxth w8, w0
; -O1: cmp w8, w20, sxth
; -O1: csel w8, w0, w20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_min_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_min_i16_unaligned_acquire:
; -O0: sxth w10, w9
; -O0: subs w10, w10, w8, sxth
; -O0: csel w8, w9, w8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i16_unaligned_acquire:
; -O1: sxth w8, w0
; -O1: cmp w8, w20, sxth
; -O1: csel w8, w0, w20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_min_i16_unaligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_min_i16_unaligned_release:
; -O0: sxth w10, w9
; -O0: subs w10, w10, w8, sxth
; -O0: csel w8, w9, w8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i16_unaligned_release:
; -O1: sxth w8, w0
; -O1: cmp w8, w20, sxth
; -O1: csel w8, w0, w20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_min_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_min_i16_unaligned_acq_rel:
; -O0: sxth w10, w9
; -O0: subs w10, w10, w8, sxth
; -O0: csel w8, w9, w8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i16_unaligned_acq_rel:
; -O1: sxth w8, w0
; -O1: cmp w8, w20, sxth
; -O1: csel w8, w0, w20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_min_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_min_i16_unaligned_seq_cst:
; -O0: sxth w10, w9
; -O0: subs w10, w10, w8, sxth
; -O0: csel w8, w9, w8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i16_unaligned_seq_cst:
; -O1: sxth w8, w0
; -O1: cmp w8, w20, sxth
; -O1: csel w8, w0, w20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_min_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_min_i32_unaligned_monotonic:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i32_unaligned_monotonic:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_min_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_min_i32_unaligned_acquire:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i32_unaligned_acquire:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_min_i32_unaligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_min_i32_unaligned_release:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i32_unaligned_release:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_min_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_min_i32_unaligned_acq_rel:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i32_unaligned_acq_rel:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_min_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_min_i32_unaligned_seq_cst:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i32_unaligned_seq_cst:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_min_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_min_i64_unaligned_monotonic:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i64_unaligned_monotonic:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_min_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_min_i64_unaligned_acquire:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i64_unaligned_acquire:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_min_i64_unaligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_min_i64_unaligned_release:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i64_unaligned_release:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_min_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_min_i64_unaligned_acq_rel:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i64_unaligned_acq_rel:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_min_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_min_i64_unaligned_seq_cst:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, le
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i64_unaligned_seq_cst:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, le
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_min_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_min_i128_unaligned_monotonic:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i128_unaligned_monotonic:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, ge
; -O1: csel x9, x0, x21, ge
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_min_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_min_i128_unaligned_acquire:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i128_unaligned_acquire:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, ge
; -O1: csel x9, x0, x21, ge
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_min_i128_unaligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_min_i128_unaligned_release:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i128_unaligned_release:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, ge
; -O1: csel x9, x0, x21, ge
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_min_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_min_i128_unaligned_acq_rel:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i128_unaligned_acq_rel:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, ge
; -O1: csel x9, x0, x21, ge
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_min_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_min_i128_unaligned_seq_cst:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_min_i128_unaligned_seq_cst:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, ge
; -O1: csel x9, x0, x21, ge
; -O1: bl __atomic_compare_exchange
%r = atomicrmw min ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}
define dso_local i8 @atomicrmw_umax_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umax_i8_aligned_monotonic:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas1_relax
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umax_i8_aligned_monotonic:
; -O1: and w9, w1, #0xff
; -O1: ldxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stxrb w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umax_i8_aligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umax_i8_aligned_acquire:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas1_acq
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umax_i8_aligned_acquire:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stxrb w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umax_i8_aligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umax_i8_aligned_release:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas1_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umax_i8_aligned_release:
; -O1: and w9, w1, #0xff
; -O1: ldxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umax_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umax_i8_aligned_acq_rel:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umax_i8_aligned_acq_rel:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umax_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umax_i8_aligned_seq_cst:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umax_i8_aligned_seq_cst:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_umax_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umax_i16_aligned_monotonic:
; -O0: subs w9, w9, w8, uxth
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas2_relax
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_umax_i16_aligned_monotonic:
; -O1: and w9, w1, #0xffff
; -O1: ldxrh w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stxrh w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_umax_i16_aligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umax_i16_aligned_acquire:
; -O0: subs w9, w9, w8, uxth
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas2_acq
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_umax_i16_aligned_acquire:
; -O1: and w9, w1, #0xffff
; -O1: ldaxrh w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stxrh w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_umax_i16_aligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umax_i16_aligned_release:
; -O0: subs w9, w9, w8, uxth
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas2_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_umax_i16_aligned_release:
; -O1: and w9, w1, #0xffff
; -O1: ldxrh w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stlxrh w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_umax_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umax_i16_aligned_acq_rel:
; -O0: subs w9, w9, w8, uxth
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas2_acq_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_umax_i16_aligned_acq_rel:
; -O1: and w9, w1, #0xffff
; -O1: ldaxrh w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stlxrh w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_umax_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umax_i16_aligned_seq_cst:
; -O0: subs w9, w9, w8, uxth
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas2_acq_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_umax_i16_aligned_seq_cst:
; -O1: and w9, w1, #0xffff
; -O1: ldaxrh w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stlxrh w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_umax_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umax_i32_aligned_monotonic:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas4_relax
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_umax_i32_aligned_monotonic:
; -O1: ldxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, hi
; -O1: stxr w10, w9, [x0]
%r = atomicrmw umax ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_umax_i32_aligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umax_i32_aligned_acquire:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas4_acq
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_umax_i32_aligned_acquire:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, hi
; -O1: stxr w10, w9, [x0]
%r = atomicrmw umax ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_umax_i32_aligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umax_i32_aligned_release:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas4_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_umax_i32_aligned_release:
; -O1: ldxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, hi
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw umax ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_umax_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umax_i32_aligned_acq_rel:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas4_acq_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_umax_i32_aligned_acq_rel:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, hi
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw umax ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_umax_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umax_i32_aligned_seq_cst:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas4_acq_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_umax_i32_aligned_seq_cst:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, hi
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw umax ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_umax_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umax_i64_aligned_monotonic:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, hi
; -O0: bl __aarch64_cas8_relax
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_umax_i64_aligned_monotonic:
; -O1: ldxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, hi
; -O1: stxr w10, x9, [x8]
%r = atomicrmw umax ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_umax_i64_aligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umax_i64_aligned_acquire:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, hi
; -O0: bl __aarch64_cas8_acq
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_umax_i64_aligned_acquire:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, hi
; -O1: stxr w10, x9, [x8]
%r = atomicrmw umax ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_umax_i64_aligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umax_i64_aligned_release:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, hi
; -O0: bl __aarch64_cas8_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_umax_i64_aligned_release:
; -O1: ldxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, hi
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw umax ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_umax_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umax_i64_aligned_acq_rel:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, hi
; -O0: bl __aarch64_cas8_acq_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_umax_i64_aligned_acq_rel:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, hi
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw umax ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_umax_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umax_i64_aligned_seq_cst:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, hi
; -O0: bl __aarch64_cas8_acq_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_umax_i64_aligned_seq_cst:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, hi
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw umax ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_umax_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umax_i128_aligned_monotonic:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_umax_i128_aligned_monotonic:
; -O1: ldxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, lo
; -O1: csel x10, x0, x2, lo
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw umax ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_umax_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umax_i128_aligned_acquire:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_umax_i128_aligned_acquire:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, lo
; -O1: csel x10, x0, x2, lo
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw umax ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_umax_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umax_i128_aligned_release:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_umax_i128_aligned_release:
; -O1: ldxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, lo
; -O1: csel x10, x0, x2, lo
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw umax ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_umax_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umax_i128_aligned_acq_rel:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_umax_i128_aligned_acq_rel:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, lo
; -O1: csel x10, x0, x2, lo
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw umax ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_umax_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umax_i128_aligned_seq_cst:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_umax_i128_aligned_seq_cst:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, lo
; -O1: csel x10, x0, x2, lo
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw umax ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_umax_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umax_i8_unaligned_monotonic:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas1_relax
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umax_i8_unaligned_monotonic:
; -O1: and w9, w1, #0xff
; -O1: ldxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stxrb w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umax_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umax_i8_unaligned_acquire:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas1_acq
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umax_i8_unaligned_acquire:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stxrb w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umax_i8_unaligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umax_i8_unaligned_release:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas1_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umax_i8_unaligned_release:
; -O1: and w9, w1, #0xff
; -O1: ldxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umax_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umax_i8_unaligned_acq_rel:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umax_i8_unaligned_acq_rel:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umax_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umax_i8_unaligned_seq_cst:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, hi
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umax_i8_unaligned_seq_cst:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, hi
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umax ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_umax_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umax_i16_unaligned_monotonic:
; -O0: subs w10, w10, w8, uxth
; -O0: csel w8, w9, w8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i16_unaligned_monotonic:
; -O1: and w8, w0, #0xffff
; -O1: cmp w8, w20, uxth
; -O1: csel w8, w0, w20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_umax_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umax_i16_unaligned_acquire:
; -O0: subs w10, w10, w8, uxth
; -O0: csel w8, w9, w8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i16_unaligned_acquire:
; -O1: and w8, w0, #0xffff
; -O1: cmp w8, w20, uxth
; -O1: csel w8, w0, w20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_umax_i16_unaligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umax_i16_unaligned_release:
; -O0: subs w10, w10, w8, uxth
; -O0: csel w8, w9, w8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i16_unaligned_release:
; -O1: and w8, w0, #0xffff
; -O1: cmp w8, w20, uxth
; -O1: csel w8, w0, w20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_umax_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umax_i16_unaligned_acq_rel:
; -O0: subs w10, w10, w8, uxth
; -O0: csel w8, w9, w8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i16_unaligned_acq_rel:
; -O1: and w8, w0, #0xffff
; -O1: cmp w8, w20, uxth
; -O1: csel w8, w0, w20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_umax_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umax_i16_unaligned_seq_cst:
; -O0: subs w10, w10, w8, uxth
; -O0: csel w8, w9, w8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i16_unaligned_seq_cst:
; -O1: and w8, w0, #0xffff
; -O1: cmp w8, w20, uxth
; -O1: csel w8, w0, w20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_umax_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umax_i32_unaligned_monotonic:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i32_unaligned_monotonic:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_umax_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umax_i32_unaligned_acquire:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i32_unaligned_acquire:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_umax_i32_unaligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umax_i32_unaligned_release:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i32_unaligned_release:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_umax_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umax_i32_unaligned_acq_rel:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i32_unaligned_acq_rel:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_umax_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umax_i32_unaligned_seq_cst:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i32_unaligned_seq_cst:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_umax_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umax_i64_unaligned_monotonic:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i64_unaligned_monotonic:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_umax_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umax_i64_unaligned_acquire:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i64_unaligned_acquire:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_umax_i64_unaligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umax_i64_unaligned_release:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i64_unaligned_release:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_umax_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umax_i64_unaligned_acq_rel:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i64_unaligned_acq_rel:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_umax_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umax_i64_unaligned_seq_cst:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, hi
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i64_unaligned_seq_cst:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, hi
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_umax_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umax_i128_unaligned_monotonic:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i128_unaligned_monotonic:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, lo
; -O1: csel x9, x0, x21, lo
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_umax_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umax_i128_unaligned_acquire:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i128_unaligned_acquire:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, lo
; -O1: csel x9, x0, x21, lo
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_umax_i128_unaligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umax_i128_unaligned_release:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i128_unaligned_release:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, lo
; -O1: csel x9, x0, x21, lo
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_umax_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umax_i128_unaligned_acq_rel:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i128_unaligned_acq_rel:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, lo
; -O1: csel x9, x0, x21, lo
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_umax_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umax_i128_unaligned_seq_cst:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umax_i128_unaligned_seq_cst:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, lo
; -O1: csel x9, x0, x21, lo
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umax ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}
define dso_local i8 @atomicrmw_umin_i8_aligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umin_i8_aligned_monotonic:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas1_relax
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umin_i8_aligned_monotonic:
; -O1: and w9, w1, #0xff
; -O1: ldxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stxrb w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umin_i8_aligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umin_i8_aligned_acquire:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas1_acq
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umin_i8_aligned_acquire:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stxrb w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umin_i8_aligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umin_i8_aligned_release:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas1_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umin_i8_aligned_release:
; -O1: and w9, w1, #0xff
; -O1: ldxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umin_i8_aligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umin_i8_aligned_acq_rel:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umin_i8_aligned_acq_rel:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umin_i8_aligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umin_i8_aligned_seq_cst:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umin_i8_aligned_seq_cst:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_umin_i16_aligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umin_i16_aligned_monotonic:
; -O0: subs w9, w9, w8, uxth
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas2_relax
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_umin_i16_aligned_monotonic:
; -O1: and w9, w1, #0xffff
; -O1: ldxrh w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stxrh w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i16 %value monotonic, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_umin_i16_aligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umin_i16_aligned_acquire:
; -O0: subs w9, w9, w8, uxth
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas2_acq
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_umin_i16_aligned_acquire:
; -O1: and w9, w1, #0xffff
; -O1: ldaxrh w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stxrh w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i16 %value acquire, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_umin_i16_aligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umin_i16_aligned_release:
; -O0: subs w9, w9, w8, uxth
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas2_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_umin_i16_aligned_release:
; -O1: and w9, w1, #0xffff
; -O1: ldxrh w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stlxrh w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i16 %value release, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_umin_i16_aligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umin_i16_aligned_acq_rel:
; -O0: subs w9, w9, w8, uxth
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas2_acq_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_umin_i16_aligned_acq_rel:
; -O1: and w9, w1, #0xffff
; -O1: ldaxrh w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stlxrh w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i16 %value acq_rel, align 2
ret i16 %r
}
define dso_local i16 @atomicrmw_umin_i16_aligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umin_i16_aligned_seq_cst:
; -O0: subs w9, w9, w8, uxth
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas2_acq_rel
; -O0: subs w8, w8, w0, uxth
;
; -O1-LABEL: atomicrmw_umin_i16_aligned_seq_cst:
; -O1: and w9, w1, #0xffff
; -O1: ldaxrh w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stlxrh w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i16 %value seq_cst, align 2
ret i16 %r
}
define dso_local i32 @atomicrmw_umin_i32_aligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umin_i32_aligned_monotonic:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas4_relax
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_umin_i32_aligned_monotonic:
; -O1: ldxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, ls
; -O1: stxr w10, w9, [x0]
%r = atomicrmw umin ptr %ptr, i32 %value monotonic, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_umin_i32_aligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umin_i32_aligned_acquire:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas4_acq
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_umin_i32_aligned_acquire:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, ls
; -O1: stxr w10, w9, [x0]
%r = atomicrmw umin ptr %ptr, i32 %value acquire, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_umin_i32_aligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umin_i32_aligned_release:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas4_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_umin_i32_aligned_release:
; -O1: ldxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, ls
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw umin ptr %ptr, i32 %value release, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_umin_i32_aligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umin_i32_aligned_acq_rel:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas4_acq_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_umin_i32_aligned_acq_rel:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, ls
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw umin ptr %ptr, i32 %value acq_rel, align 4
ret i32 %r
}
define dso_local i32 @atomicrmw_umin_i32_aligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umin_i32_aligned_seq_cst:
; -O0: subs w9, w0, w8
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas4_acq_rel
; -O0: subs w8, w0, w8
;
; -O1-LABEL: atomicrmw_umin_i32_aligned_seq_cst:
; -O1: ldaxr w8, [x0]
; -O1: cmp w8, w1
; -O1: csel w9, w8, w1, ls
; -O1: stlxr w10, w9, [x0]
%r = atomicrmw umin ptr %ptr, i32 %value seq_cst, align 4
ret i32 %r
}
define dso_local i64 @atomicrmw_umin_i64_aligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umin_i64_aligned_monotonic:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, ls
; -O0: bl __aarch64_cas8_relax
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_umin_i64_aligned_monotonic:
; -O1: ldxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, ls
; -O1: stxr w10, x9, [x8]
%r = atomicrmw umin ptr %ptr, i64 %value monotonic, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_umin_i64_aligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umin_i64_aligned_acquire:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, ls
; -O0: bl __aarch64_cas8_acq
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_umin_i64_aligned_acquire:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, ls
; -O1: stxr w10, x9, [x8]
%r = atomicrmw umin ptr %ptr, i64 %value acquire, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_umin_i64_aligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umin_i64_aligned_release:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, ls
; -O0: bl __aarch64_cas8_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_umin_i64_aligned_release:
; -O1: ldxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, ls
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw umin ptr %ptr, i64 %value release, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_umin_i64_aligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umin_i64_aligned_acq_rel:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, ls
; -O0: bl __aarch64_cas8_acq_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_umin_i64_aligned_acq_rel:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, ls
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw umin ptr %ptr, i64 %value acq_rel, align 8
ret i64 %r
}
define dso_local i64 @atomicrmw_umin_i64_aligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umin_i64_aligned_seq_cst:
; -O0: subs x9, x0, x8
; -O0: csel x1, x0, x8, ls
; -O0: bl __aarch64_cas8_acq_rel
; -O0: subs x8, x0, x8
;
; -O1-LABEL: atomicrmw_umin_i64_aligned_seq_cst:
; -O1: ldaxr x0, [x8]
; -O1: cmp x0, x1
; -O1: csel x9, x0, x1, ls
; -O1: stlxr w10, x9, [x8]
%r = atomicrmw umin ptr %ptr, i64 %value seq_cst, align 8
ret i64 %r
}
define dso_local i128 @atomicrmw_umin_i128_aligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umin_i128_aligned_monotonic:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_relax
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_umin_i128_aligned_monotonic:
; -O1: ldxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, hs
; -O1: csel x10, x0, x2, hs
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw umin ptr %ptr, i128 %value monotonic, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_umin_i128_aligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umin_i128_aligned_acquire:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_umin_i128_aligned_acquire:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, hs
; -O1: csel x10, x0, x2, hs
; -O1: stxp w11, x10, x9, [x8]
%r = atomicrmw umin ptr %ptr, i128 %value acquire, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_umin_i128_aligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umin_i128_aligned_release:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_umin_i128_aligned_release:
; -O1: ldxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, hs
; -O1: csel x10, x0, x2, hs
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw umin ptr %ptr, i128 %value release, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_umin_i128_aligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umin_i128_aligned_acq_rel:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_umin_i128_aligned_acq_rel:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, hs
; -O1: csel x10, x0, x2, hs
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw umin ptr %ptr, i128 %value acq_rel, align 16
ret i128 %r
}
define dso_local i128 @atomicrmw_umin_i128_aligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umin_i128_aligned_seq_cst:
; -O0: subs x8, x8, x9
; -O0: subs x8, x8, x11
; -O0: subs x12, x12, x9
; -O0: csel w10, w8, w10, eq
; -O0: ands w12, w10, #0x1
; -O0: csel x2, x8, x11, ne
; -O0: ands w10, w10, #0x1
; -O0: csel x3, x8, x9, ne
; -O0: bl __aarch64_cas16_acq_rel
; -O0: eor x8, x0, x8
; -O0: eor x9, x1, x9
; -O0: orr x8, x8, x9
; -O0: subs x8, x8, #0
;
; -O1-LABEL: atomicrmw_umin_i128_aligned_seq_cst:
; -O1: ldaxp x0, x1, [x8]
; -O1: cmp x2, x0
; -O1: csel x9, x1, x3, hs
; -O1: csel x10, x0, x2, hs
; -O1: stlxp w11, x10, x9, [x8]
%r = atomicrmw umin ptr %ptr, i128 %value seq_cst, align 16
ret i128 %r
}
define dso_local i8 @atomicrmw_umin_i8_unaligned_monotonic(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umin_i8_unaligned_monotonic:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas1_relax
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umin_i8_unaligned_monotonic:
; -O1: and w9, w1, #0xff
; -O1: ldxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stxrb w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i8 %value monotonic, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umin_i8_unaligned_acquire(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umin_i8_unaligned_acquire:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas1_acq
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umin_i8_unaligned_acquire:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stxrb w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i8 %value acquire, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umin_i8_unaligned_release(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umin_i8_unaligned_release:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas1_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umin_i8_unaligned_release:
; -O1: and w9, w1, #0xff
; -O1: ldxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i8 %value release, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umin_i8_unaligned_acq_rel(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umin_i8_unaligned_acq_rel:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umin_i8_unaligned_acq_rel:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i8 %value acq_rel, align 1
ret i8 %r
}
define dso_local i8 @atomicrmw_umin_i8_unaligned_seq_cst(ptr %ptr, i8 %value) {
; -O0-LABEL: atomicrmw_umin_i8_unaligned_seq_cst:
; -O0: and w9, w0, #0xff
; -O0: subs w9, w9, w8, uxtb
; -O0: csel w1, w0, w8, ls
; -O0: bl __aarch64_cas1_acq_rel
; -O0: and w8, w0, #0xff
; -O0: subs w8, w8, w9, uxtb
;
; -O1-LABEL: atomicrmw_umin_i8_unaligned_seq_cst:
; -O1: and w9, w1, #0xff
; -O1: ldaxrb w8, [x0]
; -O1: cmp w8, w9
; -O1: csel w10, w8, w9, ls
; -O1: stlxrb w11, w10, [x0]
%r = atomicrmw umin ptr %ptr, i8 %value seq_cst, align 1
ret i8 %r
}
define dso_local i16 @atomicrmw_umin_i16_unaligned_monotonic(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umin_i16_unaligned_monotonic:
; -O0: subs w10, w10, w8, uxth
; -O0: csel w8, w9, w8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i16_unaligned_monotonic:
; -O1: and w8, w0, #0xffff
; -O1: cmp w8, w20, uxth
; -O1: csel w8, w0, w20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i16 %value monotonic, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_umin_i16_unaligned_acquire(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umin_i16_unaligned_acquire:
; -O0: subs w10, w10, w8, uxth
; -O0: csel w8, w9, w8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i16_unaligned_acquire:
; -O1: and w8, w0, #0xffff
; -O1: cmp w8, w20, uxth
; -O1: csel w8, w0, w20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i16 %value acquire, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_umin_i16_unaligned_release(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umin_i16_unaligned_release:
; -O0: subs w10, w10, w8, uxth
; -O0: csel w8, w9, w8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i16_unaligned_release:
; -O1: and w8, w0, #0xffff
; -O1: cmp w8, w20, uxth
; -O1: csel w8, w0, w20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i16 %value release, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_umin_i16_unaligned_acq_rel(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umin_i16_unaligned_acq_rel:
; -O0: subs w10, w10, w8, uxth
; -O0: csel w8, w9, w8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i16_unaligned_acq_rel:
; -O1: and w8, w0, #0xffff
; -O1: cmp w8, w20, uxth
; -O1: csel w8, w0, w20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i16 %value acq_rel, align 1
ret i16 %r
}
define dso_local i16 @atomicrmw_umin_i16_unaligned_seq_cst(ptr %ptr, i16 %value) {
; -O0-LABEL: atomicrmw_umin_i16_unaligned_seq_cst:
; -O0: subs w10, w10, w8, uxth
; -O0: csel w8, w9, w8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i16_unaligned_seq_cst:
; -O1: and w8, w0, #0xffff
; -O1: cmp w8, w20, uxth
; -O1: csel w8, w0, w20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i16 %value seq_cst, align 1
ret i16 %r
}
define dso_local i32 @atomicrmw_umin_i32_unaligned_monotonic(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umin_i32_unaligned_monotonic:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i32_unaligned_monotonic:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i32 %value monotonic, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_umin_i32_unaligned_acquire(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umin_i32_unaligned_acquire:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i32_unaligned_acquire:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i32 %value acquire, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_umin_i32_unaligned_release(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umin_i32_unaligned_release:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i32_unaligned_release:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i32 %value release, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_umin_i32_unaligned_acq_rel(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umin_i32_unaligned_acq_rel:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i32_unaligned_acq_rel:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i32 %value acq_rel, align 1
ret i32 %r
}
define dso_local i32 @atomicrmw_umin_i32_unaligned_seq_cst(ptr %ptr, i32 %value) {
; -O0-LABEL: atomicrmw_umin_i32_unaligned_seq_cst:
; -O0: subs w10, w9, w8
; -O0: csel w8, w9, w8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i32_unaligned_seq_cst:
; -O1: cmp w0, w20
; -O1: csel w8, w0, w20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i32 %value seq_cst, align 1
ret i32 %r
}
define dso_local i64 @atomicrmw_umin_i64_unaligned_monotonic(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umin_i64_unaligned_monotonic:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i64_unaligned_monotonic:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i64 %value monotonic, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_umin_i64_unaligned_acquire(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umin_i64_unaligned_acquire:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i64_unaligned_acquire:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i64 %value acquire, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_umin_i64_unaligned_release(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umin_i64_unaligned_release:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i64_unaligned_release:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i64 %value release, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_umin_i64_unaligned_acq_rel(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umin_i64_unaligned_acq_rel:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i64_unaligned_acq_rel:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i64 %value acq_rel, align 1
ret i64 %r
}
define dso_local i64 @atomicrmw_umin_i64_unaligned_seq_cst(ptr %ptr, i64 %value) {
; -O0-LABEL: atomicrmw_umin_i64_unaligned_seq_cst:
; -O0: subs x10, x9, x8
; -O0: csel x8, x9, x8, ls
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i64_unaligned_seq_cst:
; -O1: cmp x0, x20
; -O1: csel x8, x0, x20, ls
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i64 %value seq_cst, align 1
ret i64 %r
}
define dso_local i128 @atomicrmw_umin_i128_unaligned_monotonic(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umin_i128_unaligned_monotonic:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i128_unaligned_monotonic:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, hs
; -O1: csel x9, x0, x21, hs
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i128 %value monotonic, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_umin_i128_unaligned_acquire(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umin_i128_unaligned_acquire:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i128_unaligned_acquire:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, hs
; -O1: csel x9, x0, x21, hs
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i128 %value acquire, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_umin_i128_unaligned_release(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umin_i128_unaligned_release:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i128_unaligned_release:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, hs
; -O1: csel x9, x0, x21, hs
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i128 %value release, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_umin_i128_unaligned_acq_rel(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umin_i128_unaligned_acq_rel:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i128_unaligned_acq_rel:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, hs
; -O1: csel x9, x0, x21, hs
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i128 %value acq_rel, align 1
ret i128 %r
}
define dso_local i128 @atomicrmw_umin_i128_unaligned_seq_cst(ptr %ptr, i128 %value) {
; -O0-LABEL: atomicrmw_umin_i128_unaligned_seq_cst:
; -O0: subs x8, x8, x10
; -O0: subs x8, x8, x9
; -O0: subs x12, x12, x10
; -O0: csel w11, w8, w11, eq
; -O0: ands w12, w11, #0x1
; -O0: csel x9, x8, x9, ne
; -O0: ands w11, w11, #0x1
; -O0: csel x8, x8, x10, ne
; -O0: bl __atomic_compare_exchange
;
; -O1-LABEL: atomicrmw_umin_i128_unaligned_seq_cst:
; -O1: ldp x0, x1, [x0]
; -O1: cmp x21, x0
; -O1: csel x8, x1, x19, hs
; -O1: csel x9, x0, x21, hs
; -O1: bl __atomic_compare_exchange
%r = atomicrmw umin ptr %ptr, i128 %value seq_cst, align 1
ret i128 %r
}