@@ -1495,3 +1495,52 @@ void calling_function_that_return_complex() {
14951495// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
14961496// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
14971497// OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
1498+
1499+ void load_store_volatile () {
1500+ volatile double _Complex a;
1501+ volatile double _Complex b;
1502+ a = b;
1503+
1504+ volatile int _Complex c;
1505+ volatile int _Complex d;
1506+ c = d;
1507+ }
1508+
1509+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>, ["a"]
1510+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>, ["b"]
1511+ // CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["c"]
1512+ // CIR: %[[D_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["d"]
1513+ // CIR: %[[TMP_B:.*]] = cir.load volatile {{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.double>>, !cir.complex<!cir.double>
1514+ // CIR: cir.store volatile {{.*}} %[[TMP_B]], %[[A_ADDR]] : !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>
1515+ // CIR: %[[TMP_D:.*]] = cir.load volatile {{.*}} %[[D_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
1516+ // CIR: cir.store volatile {{.*}} %[[TMP_D]], %[[C_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
1517+
1518+ // LLVM: %[[A_ADDR:.*]] = alloca { double, double }, i64 1, align 8
1519+ // LLVM: %[[B_ADDR:.*]] = alloca { double, double }, i64 1, align 8
1520+ // LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
1521+ // LLVM: %[[D_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
1522+ // LLVM: %[[TMP_B:.*]] = load volatile { double, double }, ptr %[[B_ADDR]], align 8
1523+ // LLVM: store volatile { double, double } %[[TMP_B]], ptr %[[A_ADDR]], align 8
1524+ // LLVM: %[[TMP_D:.*]] = load volatile { i32, i32 }, ptr %[[D_ADDR]], align 4
1525+ // LLVM: store volatile { i32, i32 } %[[TMP_D]], ptr %[[C_ADDR]], align 4
1526+
1527+ // OGCG: %[[A_ADDR:.*]] = alloca { double, double }, align 8
1528+ // OGCG: %[[B_ADDR:.*]] = alloca { double, double }, align 8
1529+ // OGCG: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4
1530+ // OGCG: %[[D_ADDR:.*]] = alloca { i32, i32 }, align 4
1531+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[B_ADDR]], i32 0, i32 0
1532+ // OGCG: %[[B_REAL:.*]] = load volatile double, ptr %[[B_REAL_PTR]], align 8
1533+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[B_ADDR]], i32 0, i32 1
1534+ // OGCG: %[[B_IMAG:.*]] = load volatile double, ptr %[[B_IMAG_PTR]], align 8
1535+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[A_ADDR]], i32 0, i32 0
1536+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[A_ADDR]], i32 0, i32 1
1537+ // OGCG: store volatile double %[[B_REAL]], ptr %[[A_REAL_PTR]], align 8
1538+ // OGCG: store volatile double %[[B_IMAG]], ptr %[[A_IMAG_PTR]], align 8
1539+ // OGCG: %[[D_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[D_ADDR]], i32 0, i32 0
1540+ // OGCG: %[[D_REAL:.*]] = load volatile i32, ptr %[[D_REAL_PTR]], align 4
1541+ // OGCG: %[[D_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[D_ADDR]], i32 0, i32 1
1542+ // OGCG: %[[D_IMAG:.*]] = load volatile i32, ptr %[[D_IMAG_PTR]], align 4
1543+ // OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 0
1544+ // OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
1545+ // OGCG: store volatile i32 %[[D_REAL]], ptr %[[C_REAL_PTR]], align 4
1546+ // OGCG: store volatile i32 %[[D_IMAG]], ptr %[[C_IMAG_PTR]], align 4
0 commit comments