llvm/llvm/test/CodeGen/Hexagon/atomic-opaque-basic.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=hexagon < %s | FileCheck %s

%s.0 = type { i8 }
@g0 = internal global i8 0, align 1

define void @f0() #0 {
; CHECK-LABEL: f0:
; CHECK:         .cfi_startproc
; CHECK-NEXT:  // %bb.0:
; CHECK-NEXT:    {
; CHECK-NEXT:     r29 = add(r29,#-8)
; CHECK-NEXT:     r1 = #255
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r0 = add(r29,#7)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r2 = and(r0,#3)
; CHECK-NEXT:     r0 = and(r0,#-4)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r2 = asl(r2,#3)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r1 = asl(r1,r2)
; CHECK-NEXT:     r2 = lsl(#2,r2)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r3 = sub(#-1,r1)
; CHECK-NEXT:    }
; CHECK-NEXT:    .p2align 4
; CHECK-NEXT:  .LBB0_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    {
; CHECK-NEXT:     r4 = memw_locked(r0)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r5 = and(r4,r3)
; CHECK-NEXT:     r4 = add(r4,r2)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r5 |= and(r4,r1)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     memw_locked(r0,p0) = r5
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     if (!p0) jump:nt .LBB0_1
; CHECK-NEXT:    }
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    {
; CHECK-NEXT:     r29 = add(r29,#8)
; CHECK-NEXT:     jumpr r31
; CHECK-NEXT:    }
  %v0 = alloca %s.0
  atomicrmw add ptr %v0, i8 2 monotonic
  ret void
}

define void @f1() #0 {
; CHECK-LABEL: f1:
; CHECK:         .cfi_startproc
; CHECK-NEXT:  // %bb.0: // %entry
; CHECK-NEXT:    {
; CHECK-NEXT:     r3 = ##g0
; CHECK-NEXT:     r1:0 = combine(#1,##255)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r2 = and(r3,#3)
; CHECK-NEXT:     r3 = and(r3,#-4)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r2 = asl(r2,#3)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r4 = asl(r0,r2)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r4 = sub(#-1,r4)
; CHECK-NEXT:    }
; CHECK-NEXT:    .p2align 4
; CHECK-NEXT:  .LBB1_1: // %cmpxchg.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    {
; CHECK-NEXT:     r5 = memw_locked(r3)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r6 = lsr(r5,r2)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     p0 = !bitsclr(r6,r0)
; CHECK-NEXT:     if (p0.new) jumpr:nt r31
; CHECK-NEXT:    }
; CHECK-NEXT:  .LBB1_2: // %cmpxchg.trystore
; CHECK-NEXT:    // in Loop: Header=BB1_1 Depth=1
; CHECK-NEXT:    {
; CHECK-NEXT:     r5 = and(r5,r4)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     r5 |= asl(r1,r2)
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     memw_locked(r3,p0) = r5
; CHECK-NEXT:    }
; CHECK-NEXT:    {
; CHECK-NEXT:     if (!p0) jump:nt .LBB1_1
; CHECK-NEXT:    }
; CHECK-NEXT:  // %bb.3: // %cmpxchg.end
; CHECK-NEXT:    {
; CHECK-NEXT:     jumpr r31
; CHECK-NEXT:    }
entry:
  %v0 = cmpxchg volatile ptr @g0, i8 0, i8 1 seq_cst seq_cst
  ret void
}


attributes #0 = { "target-cpu"="hexagonv66" }