Skip to content

[NFC][AMDGPU] Pre-commit test for setcc removal by using add/sub carryout #155118

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from

Conversation

LU-JOHN
Copy link
Contributor

@LU-JOHN LU-JOHN commented Aug 23, 2025

Pre-commit test for setcc removal by using add/sub carryout.

Signed-off-by: John Lu <John.Lu@amd.com>
@llvmbot
Copy link
Member

llvmbot commented Aug 23, 2025

@llvm/pr-subscribers-backend-amdgpu

Author: None (LU-JOHN)

Changes

Pre-commit test for setcc removal by using add/sub carryout.


Full diff: https://github.com/llvm/llvm-project/pull/155118.diff

1 Files Affected:

  • (added) llvm/test/CodeGen/AMDGPU/addsub64_carry.ll (+186)
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
new file mode 100644
index 0000000000000..5cdb0b2407bbc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -0,0 +1,186 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+;; Test that carryout from 64-bit add/sub (synthesized from two 32-bit adds/subs) is utilized
+;; (i.e. no additional compare is generated).
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
+
+%0 = type { i64, i64, i32, i32 }
+%1 = type { [64 x [8 x i64]] }
+%struct.uint96 = type { i64, i32 }
+%struct.uint64pair = type { i64, i64 }
+
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64)
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64)
+
+declare {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64>, <2 x i64>)
+declare {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64>, <2 x i64>)
+
+define hidden %struct.uint96 @add64_32(i64 %val64A, i64 %val64B, i32 %val32) {
+; CHECK-LABEL: add64_32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_add_co_u32_e32 v5, vcc, v0, v2
+; CHECK-NEXT:    v_addc_co_u32_e32 v6, vcc, v1, v3, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[5:6], v[0:1]
+; CHECK-NEXT:    v_mov_b32_e32 v0, v5
+; CHECK-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v4, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v6
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %sum64 = add i64 %val64A, %val64B
+  %obit = icmp ult i64 %sum64, %val64A
+  %obit32 = zext i1 %obit to i32
+  %sum32 = add i32 %val32, %obit32
+  %.fca.0.insert = insertvalue %struct.uint96 poison, i64 %sum64, 0
+  %.fca.1.insert = insertvalue %struct.uint96 %.fca.0.insert, i32 %sum32, 1
+  ret %struct.uint96 %.fca.1.insert
+}
+
+define <2 x i64> @uadd_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
+; CHECK-LABEL: uadd_v2i64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_add_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT:    v_addc_co_u32_e32 v7, vcc, v3, v7, vcc
+; CHECK-NEXT:    v_add_co_u32_e32 v4, vcc, v0, v4
+; CHECK-NEXT:    v_addc_co_u32_e32 v5, vcc, v1, v5, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v3, v2
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %pair = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
+  %val = extractvalue {<2 x i64>, <2 x i1>} %pair, 0
+  %obit = extractvalue {<2 x i64>, <2 x i1>} %pair, 1
+  %res = sext <2 x i1> %obit to <2 x i64>
+  store <2 x i64> %val, ptr %ptrval
+  ret <2 x i64> %res
+}
+
+define <2 x i64> @usub_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
+; CHECK-LABEL: usub_v2i64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_sub_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT:    v_subb_co_u32_e32 v7, vcc, v3, v7, vcc
+; CHECK-NEXT:    v_sub_co_u32_e32 v4, vcc, v0, v4
+; CHECK-NEXT:    v_subb_co_u32_e32 v5, vcc, v1, v5, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[0:1]
+; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v3, v2
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %pair = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
+  %val = extractvalue {<2 x i64>, <2 x i1>} %pair, 0
+  %obit = extractvalue {<2 x i64>, <2 x i1>} %pair, 1
+  %res = sext <2 x i1> %obit to <2 x i64>
+  store <2 x i64> %val, ptr %ptrval
+  ret <2 x i64> %res
+}
+
+define i64 @uadd_i64(i64 %val0, i64 %val1, ptr %ptrval) {
+; CHECK-LABEL: uadd_i64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
+; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %pair = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val0, i64 %val1)
+  %val = extractvalue {i64, i1} %pair, 0
+  %obit = extractvalue {i64, i1} %pair, 1
+  %res = sext i1 %obit to i64
+  store i64 %val, ptr %ptrval
+  ret i64 %res
+}
+
+define i64 @uadd_p1(i64 %val0, i64 %val1, ptr %ptrval) {
+; CHECK-LABEL: uadd_p1:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, 1, v0
+; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %pair = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val0, i64 1)
+  %val = extractvalue {i64, i1} %pair, 0
+  %obit = extractvalue {i64, i1} %pair, 1
+  %res = sext i1 %obit to i64
+  store i64 %val, ptr %ptrval
+  ret i64 %res
+}
+
+define i64 @uadd_n1(i64 %val0, i64 %val1, ptr %ptrval) {
+; CHECK-LABEL: uadd_n1:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, -1, v0
+; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
+; CHECK-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %pair = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val0, i64 -1)
+  %val = extractvalue {i64, i1} %pair, 0
+  %obit = extractvalue {i64, i1} %pair, 1
+  %res = sext i1 %obit to i64
+  store i64 %val, ptr %ptrval
+  ret i64 %res
+}
+
+define i64 @usub_p1(i64 %val0, i64 %val1, ptr %ptrval) {
+; CHECK-LABEL: usub_p1:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, -1, v0
+; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %pair = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val0, i64 1)
+  %val = extractvalue {i64, i1} %pair, 0
+  %obit = extractvalue {i64, i1} %pair, 1
+  %res = sext i1 %obit to i64
+  store i64 %val, ptr %ptrval
+  ret i64 %res
+}
+
+define i64 @usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
+; CHECK-LABEL: usub_n1:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, 1, v0
+; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %pair = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val0, i64 -1)
+  %val = extractvalue {i64, i1} %pair, 0
+  %obit = extractvalue {i64, i1} %pair, 1
+  %res = sext i1 %obit to i64
+  store i64 %val, ptr %ptrval
+  ret i64 %res
+}

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

2 participants