@@ -1534,3 +1534,52 @@ void imag_literal_gnu_extension() {
15341534// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
15351535// OGCG: store i32 0, ptr %[[C_REAL_PTR]], align 4
15361536// OGCG: store i32 3, ptr %[[C_IMAG_PTR]], align 4
1537+
1538+ void load_store_volatile () {
1539+ volatile double _Complex a;
1540+ volatile double _Complex b;
1541+ a = b;
1542+
1543+ volatile int _Complex c;
1544+ volatile int _Complex d;
1545+ c = d;
1546+ }
1547+
1548+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>, ["a"]
1549+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>, ["b"]
1550+ // CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["c"]
1551+ // CIR: %[[D_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["d"]
1552+ // CIR: %[[TMP_B:.*]] = cir.load volatile {{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.double>>, !cir.complex<!cir.double>
1553+ // CIR: cir.store volatile {{.*}} %[[TMP_B]], %[[A_ADDR]] : !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>
1554+ // CIR: %[[TMP_D:.*]] = cir.load volatile {{.*}} %[[D_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
1555+ // CIR: cir.store volatile {{.*}} %[[TMP_D]], %[[C_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
1556+
1557+ // LLVM: %[[A_ADDR:.*]] = alloca { double, double }, i64 1, align 8
1558+ // LLVM: %[[B_ADDR:.*]] = alloca { double, double }, i64 1, align 8
1559+ // LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
1560+ // LLVM: %[[D_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
1561+ // LLVM: %[[TMP_B:.*]] = load volatile { double, double }, ptr %[[B_ADDR]], align 8
1562+ // LLVM: store volatile { double, double } %[[TMP_B]], ptr %[[A_ADDR]], align 8
1563+ // LLVM: %[[TMP_D:.*]] = load volatile { i32, i32 }, ptr %[[D_ADDR]], align 4
1564+ // LLVM: store volatile { i32, i32 } %[[TMP_D]], ptr %[[C_ADDR]], align 4
1565+
1566+ // OGCG: %[[A_ADDR:.*]] = alloca { double, double }, align 8
1567+ // OGCG: %[[B_ADDR:.*]] = alloca { double, double }, align 8
1568+ // OGCG: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4
1569+ // OGCG: %[[D_ADDR:.*]] = alloca { i32, i32 }, align 4
1570+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[B_ADDR]], i32 0, i32 0
1571+ // OGCG: %[[B_REAL:.*]] = load volatile double, ptr %[[B_REAL_PTR]], align 8
1572+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[B_ADDR]], i32 0, i32 1
1573+ // OGCG: %[[B_IMAG:.*]] = load volatile double, ptr %[[B_IMAG_PTR]], align 8
1574+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[A_ADDR]], i32 0, i32 0
1575+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[A_ADDR]], i32 0, i32 1
1576+ // OGCG: store volatile double %[[B_REAL]], ptr %[[A_REAL_PTR]], align 8
1577+ // OGCG: store volatile double %[[B_IMAG]], ptr %[[A_IMAG_PTR]], align 8
1578+ // OGCG: %[[D_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[D_ADDR]], i32 0, i32 0
1579+ // OGCG: %[[D_REAL:.*]] = load volatile i32, ptr %[[D_REAL_PTR]], align 4
1580+ // OGCG: %[[D_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[D_ADDR]], i32 0, i32 1
1581+ // OGCG: %[[D_IMAG:.*]] = load volatile i32, ptr %[[D_IMAG_PTR]], align 4
1582+ // OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 0
1583+ // OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
1584+ // OGCG: store volatile i32 %[[D_REAL]], ptr %[[C_REAL_PTR]], align 4
1585+ // OGCG: store volatile i32 %[[D_IMAG]], ptr %[[C_IMAG_PTR]], align 4
0 commit comments