llvm/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s

define i32 @v_uaddo_i32(i32 %a, i32 %b) {
; GFX7-LABEL: v_uaddo_i32:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_uaddo_i32:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_uaddo_i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %uaddo = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
  %add = extractvalue {i32, i1} %uaddo, 0
  %of = extractvalue {i32, i1} %uaddo, 1
  %of.zext = zext i1 %of to i32
  %ret = add i32 %add, %of.zext
  ret i32 %ret
}

define i64 @v_uaddo_i64(i64 %a, i64 %b) {
; GFX7-LABEL: v_uaddo_i64:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX7-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_uaddo_i64:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX8-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_uaddo_i64:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
  %add = extractvalue {i64, i1} %uaddo, 0
  %of = extractvalue {i64, i1} %uaddo, 1
  %of.zext = zext i1 %of to i64
  %ret = add i64 %add, %of.zext
  ret i64 %ret
}

define i8 @v_uaddo_i8(i8 %a, i8 %b) {
; GFX7-LABEL: v_uaddo_i8:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_and_b32_e32 v0, 0xff, v0
; GFX7-NEXT:    v_and_b32_e32 v1, 0xff, v1
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    v_and_b32_e32 v1, 0xff, v0
; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_uaddo_i8:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_and_b32_e32 v0, 0xff, v0
; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v1
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v0
; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_uaddo_i8:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_u32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-NEXT:    v_cmp_ne_u32_sdwa s[4:5], v0, v0 src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %uaddo = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
  %add = extractvalue {i8, i1} %uaddo, 0
  %of = extractvalue {i8, i1} %uaddo, 1
  %of.zext = zext i1 %of to i8
  %ret = add i8 %add, %of.zext
  ret i8 %ret
}

define i7 @v_uaddo_i7(i7 %a, i7 %b) {
; GFX7-LABEL: v_uaddo_i7:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_and_b32_e32 v0, 0x7f, v0
; GFX7-NEXT:    v_and_b32_e32 v1, 0x7f, v1
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    v_and_b32_e32 v1, 0x7f, v0
; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_uaddo_i7:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_and_b32_e32 v0, 0x7f, v0
; GFX8-NEXT:    v_and_b32_e32 v1, 0x7f, v1
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
; GFX8-NEXT:    v_and_b32_e32 v1, 0x7f, v0
; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_uaddo_i7:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_and_b32_e32 v0, 0x7f, v0
; GFX9-NEXT:    v_and_b32_e32 v1, 0x7f, v1
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
; GFX9-NEXT:    v_and_b32_e32 v1, 0x7f, v0
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %uaddo = call {i7, i1} @llvm.uadd.with.overflow.i7(i7 %a, i7 %b)
  %add = extractvalue {i7, i1} %uaddo, 0
  %of = extractvalue {i7, i1} %uaddo, 1
  %of.zext = zext i1 %of to i7
  %ret = add i7 %add, %of.zext
  ret i7 %ret
}

define <2 x i32> @v_uaddo_v2i32(<2 x i32> %a, <2 x i32> %b) {
; GFX7-LABEL: v_uaddo_v2i32:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX7-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
; GFX7-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_uaddo_v2i32:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
; GFX8-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_uaddo_v2i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, v1, v3
; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v2
; GFX9-NEXT:    v_add_u32_e32 v1, v1, v3
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %uaddo = call {<2 x i32>, <2 x i1>} @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
  %add = extractvalue {<2 x i32>, <2 x i1>} %uaddo, 0
  %of = extractvalue {<2 x i32>, <2 x i1>} %uaddo, 1
  %of.zext = zext <2 x i1> %of to <2 x i32>
  %ret = add <2 x i32> %add, %of.zext
  ret <2 x i32> %ret
}

define i32 @v_saddo_i32(i32 %a, i32 %b) {
; GFX7-LABEL: v_saddo_i32:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v0, v1
; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v2, v0
; GFX7-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v1
; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddo_i32:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v0, v1
; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v2, v0
; GFX8-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v1
; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v2, v0
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddo_i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_u32_e32 v2, v0, v1
; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v2, v0
; GFX9-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v1
; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT:    v_add_u32_e32 v0, v2, v0
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %saddo = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
  %add = extractvalue {i32, i1} %saddo, 0
  %of = extractvalue {i32, i1} %saddo, 1
  %of.zext = zext i1 %of to i32
  %ret = add i32 %add, %of.zext
  ret i32 %ret
}

define i64 @v_saddo_i64(i64 %a, i64 %b) {
; GFX7-LABEL: v_saddo_i64:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_add_i32_e32 v4, vcc, v0, v2
; GFX7-NEXT:    v_addc_u32_e32 v5, vcc, v1, v3, vcc
; GFX7-NEXT:    v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
; GFX7-NEXT:    v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v4, v0
; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddo_i64:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v0, v2
; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, v1, v3, vcc
; GFX8-NEXT:    v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
; GFX8-NEXT:    v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v4, v0
; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddo_i64:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v0, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, v1, v3, vcc
; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
; GFX9-NEXT:    v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v4, v0
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v5, vcc
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %saddo = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
  %add = extractvalue {i64, i1} %saddo, 0
  %of = extractvalue {i64, i1} %saddo, 1
  %of.zext = zext i1 %of to i64
  %ret = add i64 %add, %of.zext
  ret i64 %ret
}

define <2 x i32> @v_saddo_v2i32(<2 x i32> %a, <2 x i32> %b) {
; GFX7-LABEL: v_saddo_v2i32:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_add_i32_e32 v4, vcc, v0, v2
; GFX7-NEXT:    v_add_i32_e32 v5, vcc, v1, v3
; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v4, v0
; GFX7-NEXT:    v_cmp_lt_i32_e64 s[4:5], v5, v1
; GFX7-NEXT:    v_cmp_gt_i32_e64 s[6:7], 0, v2
; GFX7-NEXT:    v_cmp_gt_i32_e64 s[8:9], 0, v3
; GFX7-NEXT:    s_xor_b64 s[6:7], s[6:7], vcc
; GFX7-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v4, v0
; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddo_v2i32:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v0, v2
; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v1, v3
; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v4, v0
; GFX8-NEXT:    v_cmp_lt_i32_e64 s[4:5], v5, v1
; GFX8-NEXT:    v_cmp_gt_i32_e64 s[6:7], 0, v2
; GFX8-NEXT:    v_cmp_gt_i32_e64 s[8:9], 0, v3
; GFX8-NEXT:    s_xor_b64 s[6:7], s[6:7], vcc
; GFX8-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v4, v0
; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v5, v1
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddo_v2i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_u32_e32 v4, v0, v2
; GFX9-NEXT:    v_add_u32_e32 v5, v1, v3
; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v4, v0
; GFX9-NEXT:    v_cmp_lt_i32_e64 s[4:5], v5, v1
; GFX9-NEXT:    v_cmp_gt_i32_e64 s[6:7], 0, v2
; GFX9-NEXT:    v_cmp_gt_i32_e64 s[8:9], 0, v3
; GFX9-NEXT:    s_xor_b64 s[6:7], s[6:7], vcc
; GFX9-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
; GFX9-NEXT:    v_add_u32_e32 v0, v4, v0
; GFX9-NEXT:    v_add_u32_e32 v1, v5, v1
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %saddo = call {<2 x i32>, <2 x i1>} @llvm.sadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
  %add = extractvalue {<2 x i32>, <2 x i1>} %saddo, 0
  %of = extractvalue {<2 x i32>, <2 x i1>} %saddo, 1
  %of.zext = zext <2 x i1> %of to <2 x i32>
  %ret = add <2 x i32> %add, %of.zext
  ret <2 x i32> %ret
}

define i8 @v_saddo_i8(i8 %a, i8 %b) {
; GFX7-LABEL: v_saddo_i8:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v0, v1
; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 8
; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 8
; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 8
; GFX7-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddo_i8:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_add_u16_e32 v2, v0, v1
; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 8
; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 8
; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 8
; GFX8-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT:    v_add_u16_e32 v0, v2, v0
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddo_i8:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_u16_e32 v2, v0, v1
; GFX9-NEXT:    v_cmp_lt_i32_sdwa s[4:5], sext(v2), sext(v0) src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    v_cmp_lt_i32_sdwa s[6:7], sext(v1), v0 src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT:    s_xor_b64 s[4:5], s[6:7], s[4:5]
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT:    v_add_u16_e32 v0, v2, v0
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %saddo = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b)
  %add = extractvalue {i8, i1} %saddo, 0
  %of = extractvalue {i8, i1} %saddo, 1
  %of.zext = zext i1 %of to i8
  %ret = add i8 %add, %of.zext
  ret i8 %ret
}

define i7 @v_saddo_i7(i7 %a, i7 %b) {
; GFX7-LABEL: v_saddo_i7:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v0, v1
; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 7
; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 7
; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 7
; GFX7-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddo_i7:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_add_u16_e32 v2, v0, v1
; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 7
; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 7
; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 7
; GFX8-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT:    v_add_u16_e32 v0, v2, v0
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddo_i7:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_u16_e32 v2, v0, v1
; GFX9-NEXT:    v_bfe_i32 v3, v2, 0, 7
; GFX9-NEXT:    v_bfe_i32 v0, v0, 0, 7
; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
; GFX9-NEXT:    v_bfe_i32 v0, v1, 0, 7
; GFX9-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT:    v_add_u16_e32 v0, v2, v0
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %saddo = call {i7, i1} @llvm.sadd.with.overflow.i7(i7 %a, i7 %b)
  %add = extractvalue {i7, i1} %saddo, 0
  %of = extractvalue {i7, i1} %saddo, 1
  %of.zext = zext i1 %of to i7
  %ret = add i7 %add, %of.zext
  ret i7 %ret
}

define amdgpu_ps i32 @s_uaddo_i32(i32 inreg %a, i32 inreg %b) {
; GFX7-LABEL: s_uaddo_i32:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_add_u32 s0, s0, s1
; GFX7-NEXT:    s_cselect_b32 s1, 1, 0
; GFX7-NEXT:    s_add_i32 s0, s0, s1
; GFX7-NEXT:    ; return to shader part epilog
;
; GFX8-LABEL: s_uaddo_i32:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_add_u32 s0, s0, s1
; GFX8-NEXT:    s_cselect_b32 s1, 1, 0
; GFX8-NEXT:    s_add_i32 s0, s0, s1
; GFX8-NEXT:    ; return to shader part epilog
;
; GFX9-LABEL: s_uaddo_i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_add_u32 s0, s0, s1
; GFX9-NEXT:    s_cselect_b32 s1, 1, 0
; GFX9-NEXT:    s_add_i32 s0, s0, s1
; GFX9-NEXT:    ; return to shader part epilog
  %uaddo = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
  %add = extractvalue {i32, i1} %uaddo, 0
  %of = extractvalue {i32, i1} %uaddo, 1
  %of.zext = zext i1 %of to i32
  %ret = add i32 %add, %of.zext
  ret i32 %ret
}

define amdgpu_ps i64 @s_uaddo_i64(i64 inreg %a, i64 inreg %b) {
; GFX7-LABEL: s_uaddo_i64:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_add_u32 s0, s0, s2
; GFX7-NEXT:    s_addc_u32 s1, s1, s3
; GFX7-NEXT:    s_cselect_b32 s2, 1, 0
; GFX7-NEXT:    s_add_u32 s0, s0, s2
; GFX7-NEXT:    s_addc_u32 s1, s1, 0
; GFX7-NEXT:    ; return to shader part epilog
;
; GFX8-LABEL: s_uaddo_i64:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_add_u32 s0, s0, s2
; GFX8-NEXT:    s_addc_u32 s1, s1, s3
; GFX8-NEXT:    s_cselect_b32 s2, 1, 0
; GFX8-NEXT:    s_add_u32 s0, s0, s2
; GFX8-NEXT:    s_addc_u32 s1, s1, 0
; GFX8-NEXT:    ; return to shader part epilog
;
; GFX9-LABEL: s_uaddo_i64:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_add_u32 s0, s0, s2
; GFX9-NEXT:    s_addc_u32 s1, s1, s3
; GFX9-NEXT:    s_cselect_b32 s2, 1, 0
; GFX9-NEXT:    s_add_u32 s0, s0, s2
; GFX9-NEXT:    s_addc_u32 s1, s1, 0
; GFX9-NEXT:    ; return to shader part epilog
  %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
  %add = extractvalue {i64, i1} %uaddo, 0
  %of = extractvalue {i64, i1} %uaddo, 1
  %of.zext = zext i1 %of to i64
  %ret = add i64 %add, %of.zext
  ret i64 %ret
}

define amdgpu_ps <2 x i32> @s_uaddo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b) {
; GFX7-LABEL: s_uaddo_v2i32:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_add_u32 s0, s0, s2
; GFX7-NEXT:    s_cselect_b32 s2, 1, 0
; GFX7-NEXT:    s_add_u32 s1, s1, s3
; GFX7-NEXT:    s_cselect_b32 s3, 1, 0
; GFX7-NEXT:    s_add_i32 s0, s0, s2
; GFX7-NEXT:    s_add_i32 s1, s1, s3
; GFX7-NEXT:    ; return to shader part epilog
;
; GFX8-LABEL: s_uaddo_v2i32:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_add_u32 s0, s0, s2
; GFX8-NEXT:    s_cselect_b32 s2, 1, 0
; GFX8-NEXT:    s_add_u32 s1, s1, s3
; GFX8-NEXT:    s_cselect_b32 s3, 1, 0
; GFX8-NEXT:    s_add_i32 s0, s0, s2
; GFX8-NEXT:    s_add_i32 s1, s1, s3
; GFX8-NEXT:    ; return to shader part epilog
;
; GFX9-LABEL: s_uaddo_v2i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_add_u32 s0, s0, s2
; GFX9-NEXT:    s_cselect_b32 s2, 1, 0
; GFX9-NEXT:    s_add_u32 s1, s1, s3
; GFX9-NEXT:    s_cselect_b32 s3, 1, 0
; GFX9-NEXT:    s_add_i32 s0, s0, s2
; GFX9-NEXT:    s_add_i32 s1, s1, s3
; GFX9-NEXT:    ; return to shader part epilog
  %uaddo = call {<2 x i32>, <2 x i1>} @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
  %add = extractvalue {<2 x i32>, <2 x i1>} %uaddo, 0
  %of = extractvalue {<2 x i32>, <2 x i1>} %uaddo, 1
  %of.zext = zext <2 x i1> %of to <2 x i32>
  %ret = add <2 x i32> %add, %of.zext
  ret <2 x i32> %ret
}

define i8 @s_uaddo_i8(i8 %a, i8 %b) {
; GFX7-LABEL: s_uaddo_i8:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_and_b32_e32 v0, 0xff, v0
; GFX7-NEXT:    v_and_b32_e32 v1, 0xff, v1
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    v_and_b32_e32 v1, 0xff, v0
; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: s_uaddo_i8:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_and_b32_e32 v0, 0xff, v0
; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v1
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v0
; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: s_uaddo_i8:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_u32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-NEXT:    v_cmp_ne_u32_sdwa s[4:5], v0, v0 src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %uaddo = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
  %add = extractvalue {i8, i1} %uaddo, 0
  %of = extractvalue {i8, i1} %uaddo, 1
  %of.zext = zext i1 %of to i8
  %ret = add i8 %add, %of.zext
  ret i8 %ret
}

define i7 @s_uaddo_i7(i7 %a, i7 %b) {
; GFX7-LABEL: s_uaddo_i7:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_and_b32_e32 v0, 0x7f, v0
; GFX7-NEXT:    v_and_b32_e32 v1, 0x7f, v1
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    v_and_b32_e32 v1, 0x7f, v0
; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: s_uaddo_i7:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_and_b32_e32 v0, 0x7f, v0
; GFX8-NEXT:    v_and_b32_e32 v1, 0x7f, v1
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
; GFX8-NEXT:    v_and_b32_e32 v1, 0x7f, v0
; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: s_uaddo_i7:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_and_b32_e32 v0, 0x7f, v0
; GFX9-NEXT:    v_and_b32_e32 v1, 0x7f, v1
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
; GFX9-NEXT:    v_and_b32_e32 v1, 0x7f, v0
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %uaddo = call {i7, i1} @llvm.uadd.with.overflow.i7(i7 %a, i7 %b)
  %add = extractvalue {i7, i1} %uaddo, 0
  %of = extractvalue {i7, i1} %uaddo, 1
  %of.zext = zext i1 %of to i7
  %ret = add i7 %add, %of.zext
  ret i7 %ret
}

define amdgpu_ps i32 @s_saddo_i32(i32 inreg %a, i32 inreg %b) {
; GFX7-LABEL: s_saddo_i32:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_add_i32 s2, s0, s1
; GFX7-NEXT:    s_cmp_lt_i32 s2, s0
; GFX7-NEXT:    s_cselect_b32 s0, 1, 0
; GFX7-NEXT:    s_cmp_lt_i32 s1, 0
; GFX7-NEXT:    s_cselect_b32 s1, 1, 0
; GFX7-NEXT:    s_xor_b32 s0, s1, s0
; GFX7-NEXT:    s_and_b32 s0, s0, 1
; GFX7-NEXT:    s_add_i32 s0, s2, s0
; GFX7-NEXT:    ; return to shader part epilog
;
; GFX8-LABEL: s_saddo_i32:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_add_i32 s2, s0, s1
; GFX8-NEXT:    s_cmp_lt_i32 s2, s0
; GFX8-NEXT:    s_cselect_b32 s0, 1, 0
; GFX8-NEXT:    s_cmp_lt_i32 s1, 0
; GFX8-NEXT:    s_cselect_b32 s1, 1, 0
; GFX8-NEXT:    s_xor_b32 s0, s1, s0
; GFX8-NEXT:    s_and_b32 s0, s0, 1
; GFX8-NEXT:    s_add_i32 s0, s2, s0
; GFX8-NEXT:    ; return to shader part epilog
;
; GFX9-LABEL: s_saddo_i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_add_i32 s2, s0, s1
; GFX9-NEXT:    s_cmp_lt_i32 s2, s0
; GFX9-NEXT:    s_cselect_b32 s0, 1, 0
; GFX9-NEXT:    s_cmp_lt_i32 s1, 0
; GFX9-NEXT:    s_cselect_b32 s1, 1, 0
; GFX9-NEXT:    s_xor_b32 s0, s1, s0
; GFX9-NEXT:    s_and_b32 s0, s0, 1
; GFX9-NEXT:    s_add_i32 s0, s2, s0
; GFX9-NEXT:    ; return to shader part epilog
  %saddo = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
  %add = extractvalue {i32, i1} %saddo, 0
  %of = extractvalue {i32, i1} %saddo, 1
  %of.zext = zext i1 %of to i32
  %ret = add i32 %add, %of.zext
  ret i32 %ret
}

define amdgpu_ps i64 @s_saddo_i64(i64 inreg %a, i64 inreg %b) {
; GFX7-LABEL: s_saddo_i64:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_add_u32 s4, s0, s2
; GFX7-NEXT:    v_mov_b32_e32 v0, s0
; GFX7-NEXT:    s_addc_u32 s5, s1, s3
; GFX7-NEXT:    v_mov_b32_e32 v1, s1
; GFX7-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX7-NEXT:    v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX7-NEXT:    v_mov_b32_e32 v1, s5
; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s4, v0
; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
; GFX7-NEXT:    v_readfirstlane_b32 s1, v1
; GFX7-NEXT:    ; return to shader part epilog
;
; GFX8-LABEL: s_saddo_i64:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_add_u32 s4, s0, s2
; GFX8-NEXT:    v_mov_b32_e32 v0, s0
; GFX8-NEXT:    s_addc_u32 s5, s1, s3
; GFX8-NEXT:    v_mov_b32_e32 v1, s1
; GFX8-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX8-NEXT:    v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX8-NEXT:    v_mov_b32_e32 v1, s5
; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s4, v0
; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
; GFX8-NEXT:    v_readfirstlane_b32 s1, v1
; GFX8-NEXT:    ; return to shader part epilog
;
; GFX9-LABEL: s_saddo_i64:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_add_u32 s4, s0, s2
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    s_addc_u32 s5, s1, s3
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
; GFX9-NEXT:    v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
; GFX9-NEXT:    v_mov_b32_e32 v1, s5
; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s4, v0
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s1, v1
; GFX9-NEXT:    ; return to shader part epilog
  %saddo = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
  %add = extractvalue {i64, i1} %saddo, 0
  %of = extractvalue {i64, i1} %saddo, 1
  %of.zext = zext i1 %of to i64
  %ret = add i64 %add, %of.zext
  ret i64 %ret
}

define amdgpu_ps <2 x i32> @s_saddo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b) {
; GFX7-LABEL: s_saddo_v2i32:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_add_i32 s4, s0, s2
; GFX7-NEXT:    s_add_i32 s5, s1, s3
; GFX7-NEXT:    s_cmp_lt_i32 s4, s0
; GFX7-NEXT:    s_cselect_b32 s0, 1, 0
; GFX7-NEXT:    s_cmp_lt_i32 s5, s1
; GFX7-NEXT:    s_cselect_b32 s1, 1, 0
; GFX7-NEXT:    s_cmp_lt_i32 s2, 0
; GFX7-NEXT:    s_cselect_b32 s2, 1, 0
; GFX7-NEXT:    s_cmp_lt_i32 s3, 0
; GFX7-NEXT:    s_cselect_b32 s3, 1, 0
; GFX7-NEXT:    s_xor_b32 s0, s2, s0
; GFX7-NEXT:    s_xor_b32 s1, s3, s1
; GFX7-NEXT:    s_and_b32 s0, s0, 1
; GFX7-NEXT:    s_and_b32 s1, s1, 1
; GFX7-NEXT:    s_add_i32 s0, s4, s0
; GFX7-NEXT:    s_add_i32 s1, s5, s1
; GFX7-NEXT:    ; return to shader part epilog
;
; GFX8-LABEL: s_saddo_v2i32:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_add_i32 s4, s0, s2
; GFX8-NEXT:    s_add_i32 s5, s1, s3
; GFX8-NEXT:    s_cmp_lt_i32 s4, s0
; GFX8-NEXT:    s_cselect_b32 s0, 1, 0
; GFX8-NEXT:    s_cmp_lt_i32 s5, s1
; GFX8-NEXT:    s_cselect_b32 s1, 1, 0
; GFX8-NEXT:    s_cmp_lt_i32 s2, 0
; GFX8-NEXT:    s_cselect_b32 s2, 1, 0
; GFX8-NEXT:    s_cmp_lt_i32 s3, 0
; GFX8-NEXT:    s_cselect_b32 s3, 1, 0
; GFX8-NEXT:    s_xor_b32 s0, s2, s0
; GFX8-NEXT:    s_xor_b32 s1, s3, s1
; GFX8-NEXT:    s_and_b32 s0, s0, 1
; GFX8-NEXT:    s_and_b32 s1, s1, 1
; GFX8-NEXT:    s_add_i32 s0, s4, s0
; GFX8-NEXT:    s_add_i32 s1, s5, s1
; GFX8-NEXT:    ; return to shader part epilog
;
; GFX9-LABEL: s_saddo_v2i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_add_i32 s4, s0, s2
; GFX9-NEXT:    s_add_i32 s5, s1, s3
; GFX9-NEXT:    s_cmp_lt_i32 s4, s0
; GFX9-NEXT:    s_cselect_b32 s0, 1, 0
; GFX9-NEXT:    s_cmp_lt_i32 s5, s1
; GFX9-NEXT:    s_cselect_b32 s1, 1, 0
; GFX9-NEXT:    s_cmp_lt_i32 s2, 0
; GFX9-NEXT:    s_cselect_b32 s2, 1, 0
; GFX9-NEXT:    s_cmp_lt_i32 s3, 0
; GFX9-NEXT:    s_cselect_b32 s3, 1, 0
; GFX9-NEXT:    s_xor_b32 s0, s2, s0
; GFX9-NEXT:    s_xor_b32 s1, s3, s1
; GFX9-NEXT:    s_and_b32 s0, s0, 1
; GFX9-NEXT:    s_and_b32 s1, s1, 1
; GFX9-NEXT:    s_add_i32 s0, s4, s0
; GFX9-NEXT:    s_add_i32 s1, s5, s1
; GFX9-NEXT:    ; return to shader part epilog
  %saddo = call {<2 x i32>, <2 x i1>} @llvm.sadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
  %add = extractvalue {<2 x i32>, <2 x i1>} %saddo, 0
  %of = extractvalue {<2 x i32>, <2 x i1>} %saddo, 1
  %of.zext = zext <2 x i1> %of to <2 x i32>
  %ret = add <2 x i32> %add, %of.zext
  ret <2 x i32> %ret
}

define i8 @s_saddo_i8(i8 %a, i8 %b) {
; GFX7-LABEL: s_saddo_i8:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v0, v1
; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 8
; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 8
; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 8
; GFX7-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: s_saddo_i8:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_add_u16_e32 v2, v0, v1
; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 8
; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 8
; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 8
; GFX8-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT:    v_add_u16_e32 v0, v2, v0
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: s_saddo_i8:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_u16_e32 v2, v0, v1
; GFX9-NEXT:    v_cmp_lt_i32_sdwa s[4:5], sext(v2), sext(v0) src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    v_cmp_lt_i32_sdwa s[6:7], sext(v1), v0 src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT:    s_xor_b64 s[4:5], s[6:7], s[4:5]
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT:    v_add_u16_e32 v0, v2, v0
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %saddo = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b)
  %add = extractvalue {i8, i1} %saddo, 0
  %of = extractvalue {i8, i1} %saddo, 1
  %of.zext = zext i1 %of to i8
  %ret = add i8 %add, %of.zext
  ret i8 %ret
}

define i7 @s_saddo_i7(i7 %a, i7 %b) {
; GFX7-LABEL: s_saddo_i7:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v0, v1
; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 7
; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 7
; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 7
; GFX7-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: s_saddo_i7:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_add_u16_e32 v2, v0, v1
; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 7
; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 7
; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 7
; GFX8-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT:    v_add_u16_e32 v0, v2, v0
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: s_saddo_i7:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_add_u16_e32 v2, v0, v1
; GFX9-NEXT:    v_bfe_i32 v3, v2, 0, 7
; GFX9-NEXT:    v_bfe_i32 v0, v0, 0, 7
; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
; GFX9-NEXT:    v_bfe_i32 v0, v1, 0, 7
; GFX9-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT:    v_add_u16_e32 v0, v2, v0
; GFX9-NEXT:    s_setpc_b64 s[30:31]
  %saddo = call {i7, i1} @llvm.sadd.with.overflow.i7(i7 %a, i7 %b)
  %add = extractvalue {i7, i1} %saddo, 0
  %of = extractvalue {i7, i1} %saddo, 1
  %of.zext = zext i1 %of to i7
  %ret = add i7 %add, %of.zext
  ret i7 %ret
}

define amdgpu_ps i32 @uaddo_i32_sv(i32 inreg %a, i32 %b) {
; GFX7-LABEL: uaddo_i32_sv:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
; GFX7-NEXT:    ; return to shader part epilog
;
; GFX8-LABEL: uaddo_i32_sv:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
; GFX8-NEXT:    ; return to shader part epilog
;
; GFX9-LABEL: uaddo_i32_sv:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
; GFX9-NEXT:    ; return to shader part epilog
  %uaddo = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
  %add = extractvalue {i32, i1} %uaddo, 0
  %of = extractvalue {i32, i1} %uaddo, 1
  %of.zext = zext i1 %of to i32
  %ret = add i32 %add, %of.zext
  ret i32 %ret
}

define amdgpu_ps i16 @uaddo_i16_sv(i16 inreg %a, i16 %b) {
; GFX7-LABEL: uaddo_i16_sv:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff
; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
; GFX7-NEXT:    v_and_b32_e32 v1, 0xffff, v0
; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
; GFX7-NEXT:    ; return to shader part epilog
;
; GFX8-LABEL: uaddo_i16_sv:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_and_b32 s0, s0, 0xffff
; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff, v0
; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1
; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
; GFX8-NEXT:    ; return to shader part epilog
;
; GFX9-LABEL: uaddo_i16_sv:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_and_b32 s0, s0, 0xffff
; GFX9-NEXT:    v_add_u32_sdwa v0, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; GFX9-NEXT:    v_cmp_ne_u32_sdwa s[0:1], v0, v0 src0_sel:DWORD src1_sel:WORD_0
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[0:1]
; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1
; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
; GFX9-NEXT:    ; return to shader part epilog
  %uaddo = call {i16, i1} @llvm.uadd.with.overflow.i16(i16 %a, i16 %b)
  %add = extractvalue {i16, i1} %uaddo, 0
  %of = extractvalue {i16, i1} %uaddo, 1
  %of.zext = zext i1 %of to i16
  %ret = add i16 %add, %of.zext
  ret i16 %ret
}

define amdgpu_ps i32 @saddo_i32_sv(i32 inreg %a, i32 %b) {
; GFX7-LABEL: saddo_i32_sv:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    v_add_i32_e32 v1, vcc, s0, v0
; GFX7-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v1
; GFX7-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v0
; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
; GFX7-NEXT:    ; return to shader part epilog
;
; GFX8-LABEL: saddo_i32_sv:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s0, v0
; GFX8-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v1
; GFX8-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v0
; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v1, v0
; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
; GFX8-NEXT:    ; return to shader part epilog
;
; GFX9-LABEL: saddo_i32_sv:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    v_add_u32_e32 v1, s0, v0
; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v1
; GFX9-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v0
; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX9-NEXT:    v_add_u32_e32 v0, v1, v0
; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
; GFX9-NEXT:    ; return to shader part epilog
  %saddo = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
  %add = extractvalue {i32, i1} %saddo, 0
  %of = extractvalue {i32, i1} %saddo, 1
  %of.zext = zext i1 %of to i32
  %ret = add i32 %add, %of.zext
  ret i32 %ret
}

define amdgpu_ps i16 @saddo_i16_sv(i16 inreg %a, i16 %b) {
; GFX7-LABEL: saddo_i16_sv:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    v_add_i32_e32 v1, vcc, s0, v0
; GFX7-NEXT:    v_bfe_i32 v2, v1, 0, 16
; GFX7-NEXT:    s_sext_i32_i16 s0, s0
; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 16
; GFX7-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v2
; GFX7-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v0
; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
; GFX7-NEXT:    ; return to shader part epilog
;
; GFX8-LABEL: saddo_i16_sv:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    v_add_u16_e32 v1, s0, v0
; GFX8-NEXT:    v_cmp_gt_i16_e32 vcc, s0, v1
; GFX8-NEXT:    v_cmp_gt_i16_e64 s[0:1], 0, v0
; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX8-NEXT:    v_add_u16_e32 v0, v1, v0
; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
; GFX8-NEXT:    ; return to shader part epilog
;
; GFX9-LABEL: saddo_i16_sv:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    v_add_u16_e32 v1, s0, v0
; GFX9-NEXT:    v_cmp_gt_i16_e32 vcc, s0, v1
; GFX9-NEXT:    v_cmp_gt_i16_e64 s[0:1], 0, v0
; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX9-NEXT:    v_add_u16_e32 v0, v1, v0
; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
; GFX9-NEXT:    ; return to shader part epilog
  %saddo = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a, i16 %b)
  %add = extractvalue {i16, i1} %saddo, 0
  %of = extractvalue {i16, i1} %saddo, 1
  %of.zext = zext i1 %of to i16
  %ret = add i16 %add, %of.zext
  ret i16 %ret
}

declare {i7, i1} @llvm.uadd.with.overflow.i7(i7 %a, i7 %b)
declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
declare {i16, i1} @llvm.uadd.with.overflow.i16(i16 %a, i16 %b)
declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
declare {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
declare {<2 x i32>, <2 x i1>} @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)

declare {i7, i1} @llvm.sadd.with.overflow.i7(i7 %a, i7 %b)
declare {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b)
declare {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a, i16 %b)
declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
declare {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
declare {<2 x i32>, <2 x i1>} @llvm.sadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)