diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 1487747899d6..f9a500bb7c64 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -247,7 +247,6 @@ struct MissingFeatures { static bool emitConstrainedFPCall() { return false; } static bool emitEmptyRecordCheck() { return false; } static bool isPPC_FP128Ty() { return false; } - static bool emitBinaryAtomicPostHasInvert() { return false; } static bool createLaunderInvariantGroup() { return false; } // Inline assembly diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 572ef5d6022c..24e929ea4b83 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -293,8 +293,8 @@ static RValue emitBinaryAtomic(CIRGenFunction &CGF, cir::AtomicFetchKind kind, static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, - const CallExpr *e, - cir::BinOpKind binopKind) { + const CallExpr *e, cir::BinOpKind binopKind, + bool invert = false) { mlir::Value val; mlir::Type valueType; clang::QualType typ = e->getType(); @@ -302,12 +302,10 @@ static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, makeBinaryAtomicValue(cgf, atomicOpkind, e, &val, &valueType); clang::CIRGen::CIRGenBuilderTy &builder = cgf.getBuilder(); result = builder.create(result.getLoc(), binopKind, result, val); + if (invert) + result = builder.create(result.getLoc(), + cir::UnaryOpKind::Not, result); result = emitFromInt(cgf, result, typ, valueType); - // FIXME: Some callers of this function expect the result to be inverted, - // which would need invert flag passed in and do the inversion here like - // traditional clang code gen does. When we implment those caller builtins - // we should implement the inversion here. - assert(!MissingFeatures::emitBinaryAtomicPostHasInvert()); return RValue::get(result); } @@ -1841,7 +1839,8 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_nand_and_fetch_4: case Builtin::BI__sync_nand_and_fetch_8: case Builtin::BI__sync_nand_and_fetch_16: - llvm_unreachable("BI__sync_nand_and_fetch like NYI"); + return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Nand, E, + cir::BinOpKind::And, true); case Builtin::BI__sync_val_compare_and_swap_1: case Builtin::BI__sync_val_compare_and_swap_2: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index fb1f792a4027..54244c6d4f74 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -676,7 +676,7 @@ void cmp_val_ulong(unsigned long* p, long x, long u) { // CHECK-LABEL: @test_op_and_fetch // LLVM-LABEL: @test_op_and_fetch -extern "C" void test_op_and_fetch (void) +extern "C" void test_op_and_fetch(void) { // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i // CHECK: [[RES0:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i @@ -685,7 +685,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES0:%.*]] = atomicrmw add ptr @sc, i8 [[VAL0]] seq_cst, align 1 // LLVM: [[RET0:%.*]] = add i8 [[RES0]], [[VAL0]] // LLVM: store i8 [[RET0]], ptr @sc, align 1 - sc = __sync_add_and_fetch (&sc, uc); + sc = __sync_add_and_fetch(&sc, uc); // CHECK: [[RES1:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i // CHECK: [[RET1:%.*]] = cir.binop(add, [[RES1]], [[VAL1]]) : !u8i @@ -693,7 +693,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES1:%.*]] = atomicrmw add ptr @uc, i8 [[VAL1]] seq_cst, align 1 // LLVM: [[RET1:%.*]] = add i8 [[RES1]], [[VAL1]] // LLVM: store i8 [[RET1]], ptr @uc, align 1 - uc = __sync_add_and_fetch (&uc, uc); + uc = __sync_add_and_fetch(&uc, uc); // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i // CHECK: [[RES2:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i @@ -703,7 +703,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES2:%.*]] = atomicrmw add ptr @ss, i16 [[CONV2]] seq_cst, align 2 // LLVM: [[RET2:%.*]] = add i16 [[RES2]], [[CONV2]] // LLVM: store i16 [[RET2]], ptr @ss, align 2 - ss = __sync_add_and_fetch (&ss, uc); + ss = __sync_add_and_fetch(&ss, uc); // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i // CHECK: [[RES3:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i @@ -713,7 +713,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES3:%.*]] = atomicrmw add ptr @us, i16 [[CONV3]] seq_cst, align 2 // LLVM: [[RET3:%.*]] = add i16 [[RES3]], [[CONV3]] // LLVM: store i16 [[RET3]], ptr @us - us = __sync_add_and_fetch (&us, uc); + us = __sync_add_and_fetch(&us, uc); // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i // CHECK: [[RES4:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i @@ -723,7 +723,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES4:%.*]] = atomicrmw add ptr @si, i32 [[CONV4]] seq_cst, align 4 // LLVM: [[RET4:%.*]] = add i32 [[RES4]], [[CONV4]] // LLVM: store i32 [[RET4]], ptr @si, align 4 - si = __sync_add_and_fetch (&si, uc); + si = __sync_add_and_fetch(&si, uc); // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i // CHECK: [[RES5:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i @@ -733,7 +733,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES5:%.*]] = atomicrmw add ptr @ui, i32 [[CONV5]] seq_cst, align 4 // LLVM: [[RET5:%.*]] = add i32 [[RES5]], [[CONV5]] // LLVM: store i32 [[RET5]], ptr @ui, align 4 - ui = __sync_add_and_fetch (&ui, uc); + ui = __sync_add_and_fetch(&ui, uc); // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i // CHECK: [[RES6:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i @@ -743,7 +743,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES6:%.*]] = atomicrmw add ptr @sll, i64 [[CONV6]] seq_cst, align 8 // LLVM: [[RET6:%.*]] = add i64 [[RES6]], [[CONV6]] // LLVM: store i64 [[RET6]], ptr @sll, align 8 - sll = __sync_add_and_fetch (&sll, uc); + sll = __sync_add_and_fetch(&sll, uc); // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i // CHECK: [[RES7:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i @@ -753,7 +753,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES7:%.*]] = atomicrmw add ptr @ull, i64 [[CONV7]] seq_cst, align 8 // LLVM: [[RET7:%.*]] = add i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 - ull = __sync_add_and_fetch (&ull, uc); + ull = __sync_add_and_fetch(&ull, uc); // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i // CHECK: [[RES0:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i @@ -762,7 +762,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES0:%.*]] = atomicrmw sub ptr @sc, i8 [[VAL0]] seq_cst, align 1 // LLVM: [[RET0:%.*]] = sub i8 [[RES0]], [[VAL0]] // LLVM: store i8 [[RET0]], ptr @sc, align 1 - sc = __sync_sub_and_fetch (&sc, uc); + sc = __sync_sub_and_fetch(&sc, uc); // CHECK: [[RES1:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i // CHECK: [[RET1:%.*]] = cir.binop(sub, [[RES1]], [[VAL1]]) : !u8i @@ -770,7 +770,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES1:%.*]] = atomicrmw sub ptr @uc, i8 [[VAL1]] seq_cst, align 1 // LLVM: [[RET1:%.*]] = sub i8 [[RES1]], [[VAL1]] // LLVM: store i8 [[RET1]], ptr @uc, align 1 - uc = __sync_sub_and_fetch (&uc, uc); + uc = __sync_sub_and_fetch(&uc, uc); // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i // CHECK: [[RES2:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i @@ -780,7 +780,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES2:%.*]] = atomicrmw sub ptr @ss, i16 [[CONV2]] seq_cst, align 2 // LLVM: [[RET2:%.*]] = sub i16 [[RES2]], [[CONV2]] // LLVM: store i16 [[RET2]], ptr @ss, align 2 - ss = __sync_sub_and_fetch (&ss, uc); + ss = __sync_sub_and_fetch(&ss, uc); // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i // CHECK: [[RES3:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i @@ -790,7 +790,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES3:%.*]] = atomicrmw sub ptr @us, i16 [[CONV3]] seq_cst, align 2 // LLVM: [[RET3:%.*]] = sub i16 [[RES3]], [[CONV3]] // LLVM: store i16 [[RET3]], ptr @us - us = __sync_sub_and_fetch (&us, uc); + us = __sync_sub_and_fetch(&us, uc); // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i // CHECK: [[RES4:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i @@ -800,7 +800,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES4:%.*]] = atomicrmw sub ptr @si, i32 [[CONV4]] seq_cst, align 4 // LLVM: [[RET4:%.*]] = sub i32 [[RES4]], [[CONV4]] // LLVM: store i32 [[RET4]], ptr @si, align 4 - si = __sync_sub_and_fetch (&si, uc); + si = __sync_sub_and_fetch(&si, uc); // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i // CHECK: [[RES5:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i @@ -810,7 +810,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES5:%.*]] = atomicrmw sub ptr @ui, i32 [[CONV5]] seq_cst, align 4 // LLVM: [[RET5:%.*]] = sub i32 [[RES5]], [[CONV5]] // LLVM: store i32 [[RET5]], ptr @ui, align 4 - ui = __sync_sub_and_fetch (&ui, uc); + ui = __sync_sub_and_fetch(&ui, uc); // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i // CHECK: [[RES6:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i @@ -820,7 +820,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES6:%.*]] = atomicrmw sub ptr @sll, i64 [[CONV6]] seq_cst, align 8 // LLVM: [[RET6:%.*]] = sub i64 [[RES6]], [[CONV6]] // LLVM: store i64 [[RET6]], ptr @sll, align 8 - sll = __sync_sub_and_fetch (&sll, uc); + sll = __sync_sub_and_fetch(&sll, uc); // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i // CHECK: [[RES7:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i @@ -830,7 +830,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES7:%.*]] = atomicrmw sub ptr @ull, i64 [[CONV7]] seq_cst, align 8 // LLVM: [[RET7:%.*]] = sub i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 - ull = __sync_sub_and_fetch (&ull, uc); + ull = __sync_sub_and_fetch(&ull, uc); // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i // CHECK: [[RES0:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i @@ -839,7 +839,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES0:%.*]] = atomicrmw and ptr @sc, i8 [[VAL0]] seq_cst, align 1 // LLVM: [[RET0:%.*]] = and i8 [[RES0]], [[VAL0]] // LLVM: store i8 [[RET0]], ptr @sc, align 1 - sc = __sync_and_and_fetch (&sc, uc); + sc = __sync_and_and_fetch(&sc, uc); // CHECK: [[RES1:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i // CHECK: [[RET1:%.*]] = cir.binop(and, [[RES1]], [[VAL1]]) : !u8i @@ -847,7 +847,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES1:%.*]] = atomicrmw and ptr @uc, i8 [[VAL1]] seq_cst, align 1 // LLVM: [[RET1:%.*]] = and i8 [[RES1]], [[VAL1]] // LLVM: store i8 [[RET1]], ptr @uc, align 1 - uc = __sync_and_and_fetch (&uc, uc); + uc = __sync_and_and_fetch(&uc, uc); // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i // CHECK: [[RES2:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i @@ -857,7 +857,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES2:%.*]] = atomicrmw and ptr @ss, i16 [[CONV2]] seq_cst, align 2 // LLVM: [[RET2:%.*]] = and i16 [[RES2]], [[CONV2]] // LLVM: store i16 [[RET2]], ptr @ss, align 2 - ss = __sync_and_and_fetch (&ss, uc); + ss = __sync_and_and_fetch(&ss, uc); // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i // CHECK: [[RES3:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i @@ -867,7 +867,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES3:%.*]] = atomicrmw and ptr @us, i16 [[CONV3]] seq_cst, align 2 // LLVM: [[RET3:%.*]] = and i16 [[RES3]], [[CONV3]] // LLVM: store i16 [[RET3]], ptr @us - us = __sync_and_and_fetch (&us, uc); + us = __sync_and_and_fetch(&us, uc); // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i // CHECK: [[RES4:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i @@ -877,7 +877,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES4:%.*]] = atomicrmw and ptr @si, i32 [[CONV4]] seq_cst, align 4 // LLVM: [[RET4:%.*]] = and i32 [[RES4]], [[CONV4]] // LLVM: store i32 [[RET4]], ptr @si, align 4 - si = __sync_and_and_fetch (&si, uc); + si = __sync_and_and_fetch(&si, uc); // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i // CHECK: [[RES5:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i @@ -887,7 +887,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES5:%.*]] = atomicrmw and ptr @ui, i32 [[CONV5]] seq_cst, align 4 // LLVM: [[RET5:%.*]] = and i32 [[RES5]], [[CONV5]] // LLVM: store i32 [[RET5]], ptr @ui, align 4 - ui = __sync_and_and_fetch (&ui, uc); + ui = __sync_and_and_fetch(&ui, uc); // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i // CHECK: [[RES6:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i @@ -897,7 +897,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES6:%.*]] = atomicrmw and ptr @sll, i64 [[CONV6]] seq_cst, align 8 // LLVM: [[RET6:%.*]] = and i64 [[RES6]], [[CONV6]] // LLVM: store i64 [[RET6]], ptr @sll, align 8 - sll = __sync_and_and_fetch (&sll, uc); + sll = __sync_and_and_fetch(&sll, uc); // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i // CHECK: [[RES7:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i @@ -907,7 +907,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES7:%.*]] = atomicrmw and ptr @ull, i64 [[CONV7]] seq_cst, align 8 // LLVM: [[RET7:%.*]] = and i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 - ull = __sync_and_and_fetch (&ull, uc); + ull = __sync_and_and_fetch(&ull, uc); // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i // CHECK: [[RES0:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i @@ -916,7 +916,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES0:%.*]] = atomicrmw or ptr @sc, i8 [[VAL0]] seq_cst, align 1 // LLVM: [[RET0:%.*]] = or i8 [[RES0]], [[VAL0]] // LLVM: store i8 [[RET0]], ptr @sc, align 1 - sc = __sync_or_and_fetch (&sc, uc); + sc = __sync_or_and_fetch(&sc, uc); // CHECK: [[RES1:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i // CHECK: [[RET1:%.*]] = cir.binop(or, [[RES1]], [[VAL1]]) : !u8i @@ -924,7 +924,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES1:%.*]] = atomicrmw or ptr @uc, i8 [[VAL1]] seq_cst, align 1 // LLVM: [[RET1:%.*]] = or i8 [[RES1]], [[VAL1]] // LLVM: store i8 [[RET1]], ptr @uc, align 1 - uc = __sync_or_and_fetch (&uc, uc); + uc = __sync_or_and_fetch(&uc, uc); // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i // CHECK: [[RES2:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i @@ -934,7 +934,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES2:%.*]] = atomicrmw or ptr @ss, i16 [[CONV2]] seq_cst, align 2 // LLVM: [[RET2:%.*]] = or i16 [[RES2]], [[CONV2]] // LLVM: store i16 [[RET2]], ptr @ss, align 2 - ss = __sync_or_and_fetch (&ss, uc); + ss = __sync_or_and_fetch(&ss, uc); // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i // CHECK: [[RES3:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i @@ -944,7 +944,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES3:%.*]] = atomicrmw or ptr @us, i16 [[CONV3]] seq_cst, align 2 // LLVM: [[RET3:%.*]] = or i16 [[RES3]], [[CONV3]] // LLVM: store i16 [[RET3]], ptr @us - us = __sync_or_and_fetch (&us, uc); + us = __sync_or_and_fetch(&us, uc); // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i // CHECK: [[RES4:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i @@ -954,7 +954,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES4:%.*]] = atomicrmw or ptr @si, i32 [[CONV4]] seq_cst, align 4 // LLVM: [[RET4:%.*]] = or i32 [[RES4]], [[CONV4]] // LLVM: store i32 [[RET4]], ptr @si, align 4 - si = __sync_or_and_fetch (&si, uc); + si = __sync_or_and_fetch(&si, uc); // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i // CHECK: [[RES5:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i @@ -964,7 +964,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES5:%.*]] = atomicrmw or ptr @ui, i32 [[CONV5]] seq_cst, align 4 // LLVM: [[RET5:%.*]] = or i32 [[RES5]], [[CONV5]] // LLVM: store i32 [[RET5]], ptr @ui, align 4 - ui = __sync_or_and_fetch (&ui, uc); + ui = __sync_or_and_fetch(&ui, uc); // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i // CHECK: [[RES6:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i @@ -974,7 +974,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES6:%.*]] = atomicrmw or ptr @sll, i64 [[CONV6]] seq_cst, align 8 // LLVM: [[RET6:%.*]] = or i64 [[RES6]], [[CONV6]] // LLVM: store i64 [[RET6]], ptr @sll, align 8 - sll = __sync_or_and_fetch (&sll, uc); + sll = __sync_or_and_fetch(&sll, uc); // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i // CHECK: [[RES7:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i @@ -984,7 +984,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES7:%.*]] = atomicrmw or ptr @ull, i64 [[CONV7]] seq_cst, align 8 // LLVM: [[RET7:%.*]] = or i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 - ull = __sync_or_and_fetch (&ull, uc); + ull = __sync_or_and_fetch(&ull, uc); // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i // CHECK: [[RES0:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i @@ -993,7 +993,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES0:%.*]] = atomicrmw xor ptr @sc, i8 [[VAL0]] seq_cst, align 1 // LLVM: [[RET0:%.*]] = xor i8 [[RES0]], [[VAL0]] // LLVM: store i8 [[RET0]], ptr @sc, align 1 - sc = __sync_xor_and_fetch (&sc, uc); + sc = __sync_xor_and_fetch(&sc, uc); // CHECK: [[RES1:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i // CHECK: [[RET1:%.*]] = cir.binop(xor, [[RES1]], [[VAL1]]) : !u8i @@ -1001,7 +1001,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES1:%.*]] = atomicrmw xor ptr @uc, i8 [[VAL1]] seq_cst, align 1 // LLVM: [[RET1:%.*]] = xor i8 [[RES1]], [[VAL1]] // LLVM: store i8 [[RET1]], ptr @uc, align 1 - uc = __sync_xor_and_fetch (&uc, uc); + uc = __sync_xor_and_fetch(&uc, uc); // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i // CHECK: [[RES2:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i @@ -1011,7 +1011,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES2:%.*]] = atomicrmw xor ptr @ss, i16 [[CONV2]] seq_cst, align 2 // LLVM: [[RET2:%.*]] = xor i16 [[RES2]], [[CONV2]] // LLVM: store i16 [[RET2]], ptr @ss, align 2 - ss = __sync_xor_and_fetch (&ss, uc); + ss = __sync_xor_and_fetch(&ss, uc); // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i // CHECK: [[RES3:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i @@ -1021,7 +1021,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES3:%.*]] = atomicrmw xor ptr @us, i16 [[CONV3]] seq_cst, align 2 // LLVM: [[RET3:%.*]] = xor i16 [[RES3]], [[CONV3]] // LLVM: store i16 [[RET3]], ptr @us - us = __sync_xor_and_fetch (&us, uc); + us = __sync_xor_and_fetch(&us, uc); // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i // CHECK: [[RES4:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i @@ -1031,7 +1031,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES4:%.*]] = atomicrmw xor ptr @si, i32 [[CONV4]] seq_cst, align 4 // LLVM: [[RET4:%.*]] = xor i32 [[RES4]], [[CONV4]] // LLVM: store i32 [[RET4]], ptr @si, align 4 - si = __sync_xor_and_fetch (&si, uc); + si = __sync_xor_and_fetch(&si, uc); // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i // CHECK: [[RES5:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i @@ -1041,7 +1041,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES5:%.*]] = atomicrmw xor ptr @ui, i32 [[CONV5]] seq_cst, align 4 // LLVM: [[RET5:%.*]] = xor i32 [[RES5]], [[CONV5]] // LLVM: store i32 [[RET5]], ptr @ui, align 4 - ui = __sync_xor_and_fetch (&ui, uc); + ui = __sync_xor_and_fetch(&ui, uc); // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i // CHECK: [[RES6:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i @@ -1051,7 +1051,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES6:%.*]] = atomicrmw xor ptr @sll, i64 [[CONV6]] seq_cst, align 8 // LLVM: [[RET6:%.*]] = xor i64 [[RES6]], [[CONV6]] // LLVM: store i64 [[RET6]], ptr @sll, align 8 - sll = __sync_xor_and_fetch (&sll, uc); + sll = __sync_xor_and_fetch(&sll, uc); // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i // CHECK: [[RES7:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i @@ -1061,6 +1061,98 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES7:%.*]] = atomicrmw xor ptr @ull, i64 [[CONV7]] seq_cst, align 8 // LLVM: [[RET7:%.*]] = xor i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 - ull = __sync_xor_and_fetch (&ull, uc); + ull = __sync_xor_and_fetch(&ull, uc); + // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i + // CHECK: [[RES0:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i + // CHECK: [[INTERM0:%.*]] = cir.binop(and, [[RES0]], [[VAL0]]) : !s8i + // CHECK: [[RET0:%.*]] = cir.unary(not, [[INTERM0]]) : !s8i, !s8i + // LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES0:%.*]] = atomicrmw nand ptr @sc, i8 [[VAL0]] seq_cst, align 1 + // LLVM: [[INTERM0:%.*]] = and i8 [[RES0]], [[VAL0]] + // LLVM: [[RET0:%.*]] = xor i8 [[INTERM0]], -1 + // LLVM: store i8 [[RET0]], ptr @sc, align 1 + sc = __sync_nand_and_fetch(&sc, uc); + + // CHECK: [[RES1:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i + // CHECK: [[INTERM1:%.*]] = cir.binop(and, [[RES1]], [[VAL1]]) : !u8i + // CHECK: [[RET1:%.*]] = cir.unary(not, [[INTERM1]]) : !u8i, !u8i + // LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES1:%.*]] = atomicrmw nand ptr @uc, i8 [[VAL1]] seq_cst, align 1 + // LLVM: [[INTERM1:%.*]] = and i8 [[RES1]], [[VAL1]] + // LLVM: [[RET1:%.*]] = xor i8 [[INTERM1]], -1 + // LLVM: store i8 [[RET1]], ptr @uc, align 1 + uc = __sync_nand_and_fetch(&uc, uc); + + // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i + // CHECK: [[RES2:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i + // CHECK: [[INTERM2:%.*]] = cir.binop(and, [[RES2]], [[VAL2]]) : !s16i + // CHECK: [[RET2:%.*]] = cir.unary(not, [[INTERM2]]) : !s16i, !s16i + // LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16 + // LLVM: [[RES2:%.*]] = atomicrmw nand ptr @ss, i16 [[CONV2]] seq_cst, align 2 + // LLVM: [[INTERM2:%.*]] = and i16 [[RES2]], [[CONV2]] + // LLVM: [[RET2:%.*]] = xor i16 [[INTERM2]], -1 + // LLVM: store i16 [[RET2]], ptr @ss, align 2 + ss = __sync_nand_and_fetch(&ss, uc); + + // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i + // CHECK: [[RES3:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i + // CHECK: [[INTERM3:%.*]] = cir.binop(and, [[RES3]], [[VAL3]]) : !u16i + // CHECK: [[RET3:%.*]] = cir.unary(not, [[INTERM3]]) : !u16i, !u16i + // LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16 + // LLVM: [[RES3:%.*]] = atomicrmw nand ptr @us, i16 [[CONV3]] seq_cst, align 2 + // LLVM: [[INTERM3:%.*]] = and i16 [[RES3]], [[CONV3]] + // LLVM: [[RET3:%.*]] = xor i16 [[INTERM3]], -1 + // LLVM: store i16 [[RET3]], ptr @us, align 2 + us = __sync_nand_and_fetch(&us, uc); + + // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i + // CHECK: [[RES4:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i + // CHECK: [[INTERM4:%.*]] = cir.binop(and, [[RES4]], [[VAL4]]) : !s32i + // CHECK: [[RET4:%.*]] = cir.unary(not, [[INTERM4]]) : !s32i, !s32i + // LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32 + // LLVM: [[RES4:%.*]] = atomicrmw nand ptr @si, i32 [[CONV4]] seq_cst, align 4 + // LLVM: [[INTERM4:%.*]] = and i32 [[RES4]], [[CONV4]] + // LLVM: [[RET4:%.*]] = xor i32 [[INTERM4]], -1 + // LLVM: store i32 [[RET4]], ptr @si, align 4 + si = __sync_nand_and_fetch(&si, uc); + + // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i + // CHECK: [[RES5:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i + // CHECK: [[INTERM5:%.*]] = cir.binop(and, [[RES5]], [[VAL5]]) : !u32i + // CHECK: [[RET5:%.*]] = cir.unary(not, [[INTERM5]]) : !u32i, !u32i + // LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32 + // LLVM: [[RES5:%.*]] = atomicrmw nand ptr @ui, i32 [[CONV5]] seq_cst, align 4 + // LLVM: [[INTERM5:%.*]] = and i32 [[RES5]], [[CONV5]] + // LLVM: [[RET5:%.*]] = xor i32 [[INTERM5]], -1 + // LLVM: store i32 [[RET5]], ptr @ui, align 4 + ui = __sync_nand_and_fetch(&ui, uc); + + // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i + // CHECK: [[RES6:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i + // CHECK: [[INTERM6:%.*]] = cir.binop(and, [[RES6]], [[VAL6]]) : !s64i + // CHECK: [[RET6:%.*]] = cir.unary(not, [[INTERM6]]) : !s64i, !s64i + // LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64 + // LLVM: [[RES6:%.*]] = atomicrmw nand ptr @sll, i64 [[CONV6]] seq_cst, align 8 + // LLVM: [[INTERM6:%.*]] = and i64 [[RES6]], [[CONV6]] + // LLVM: [[RET6:%.*]] = xor i64 [[INTERM6]], -1 + // LLVM: store i64 [[RET6]], ptr @sll, align 8 + sll = __sync_nand_and_fetch(&sll, uc); + + // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i + // CHECK: [[RES7:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i + // CHECK: [[INTERM7:%.*]] = cir.binop(and, [[RES7]], [[VAL7]]) : !u64i + // CHECK: [[RET7:%.*]] = cir.unary(not, [[INTERM7]]) : !u64i, !u64i + // LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64 + // LLVM: [[RES7:%.*]] = atomicrmw nand ptr @ull, i64 [[CONV7]] seq_cst, align 8 + // LLVM: [[INTERM7:%.*]] = and i64 [[RES7]], [[CONV7]] + // LLVM: [[RET7:%.*]] = xor i64 [[INTERM7]], -1 + // LLVM: store i64 [[RET7]], ptr @ull, align 8 + ull = __sync_nand_and_fetch(&ull, uc); }