@@ -1583,3 +1583,97 @@ void load_store_volatile() {
15831583// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
15841584// OGCG: store volatile i32 %[[D_REAL]], ptr %[[C_REAL_PTR]], align 4
15851585// OGCG: store volatile i32 %[[D_IMAG]], ptr %[[C_IMAG_PTR]], align 4
1586+
1587+
1588+ void load_store_volatile_2 () {
1589+ volatile double _Complex av;
1590+ double _Complex a;
1591+ av = a;
1592+
1593+ double _Complex b;
1594+ volatile double _Complex bv;
1595+ b = bv;
1596+
1597+ int _Complex c;
1598+ volatile int _Complex cv;
1599+ c = cv;
1600+
1601+ volatile int _Complex dv;
1602+ int _Complex d;
1603+ dv = d;
1604+ }
1605+
1606+ // CIR: %[[AV_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>, ["av"]
1607+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>, ["a"]
1608+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>, ["b"]
1609+ // CIR: %[[BV_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>, ["bv"]
1610+ // CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["c"]
1611+ // CIR: %[[CV_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["cv"]
1612+ // CIR: %[[DV_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["dv"]
1613+ // CIR: %[[D_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["d"]
1614+ // CIR: %[[TMP_A:.*]] = cir.load {{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.double>>, !cir.complex<!cir.double>
1615+ // CIR: cir.store volatile {{.*}} %[[TMP_A]], %[[AV_ADDR]] : !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>
1616+ // CIR: %[[TMP_BV:.*]] = cir.load volatile {{.*}} %[[BV_ADDR]] : !cir.ptr<!cir.complex<!cir.double>>, !cir.complex<!cir.double>
1617+ // CIR: cir.store {{.*}} %[[TMP_BV]], %[[B_ADDR]] : !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>
1618+ // CIR: %[[TMP_CV:.*]] = cir.load volatile {{.*}} %[[CV_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
1619+ // CIR: cir.store {{.*}} %[[TMP_CV]], %[[C_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
1620+ // CIR: %[[TMP_D:.*]] = cir.load {{.*}} %[[D_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
1621+ // CIR: cir.store volatile {{.*}} %[[TMP_D]], %[[DV_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
1622+
1623+ // LLVM: %[[AV_ADDR:.*]] = alloca { double, double }, i64 1, align 8
1624+ // LLVM: %[[A_ADDR:.*]] = alloca { double, double }, i64 1, align 8
1625+ // LLVM: %[[B_ADDR:.*]] = alloca { double, double }, i64 1, align 8
1626+ // LLVM: %[[BV_ADDR:.*]] = alloca { double, double }, i64 1, align 8
1627+ // LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
1628+ // LLVM: %[[CV_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
1629+ // LLVM: %[[DV_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
1630+ // LLVM: %[[D_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
1631+ // LLVM: %[[TMP_A:.*]] = load { double, double }, ptr %[[A_ADDR]], align 8
1632+ // LLVM: store volatile { double, double } %[[TMP_A]], ptr %[[AV_ADDR]], align 8
1633+ // LLVM: %[[TMP_BV:.*]] = load volatile { double, double }, ptr %[[BV_ADDR]], align 8
1634+ // LLVM: store { double, double } %[[TMP_BV]], ptr %[[B_ADDR]], align 8
1635+ // LLVM: %[[TMP_CV:.*]] = load volatile { i32, i32 }, ptr %[[CV_ADDR]], align 4
1636+ // LLVM: store { i32, i32 } %[[TMP_CV]], ptr %[[C_ADDR]], align 4
1637+ // LLVM: %[[TMP_D:.*]] = load { i32, i32 }, ptr %[[D_ADDR]], align 4
1638+ // LLVM: store volatile { i32, i32 } %[[TMP_D]], ptr %[[DV_ADDR]], align 4
1639+
1640+ // OGCG: %[[AV_ADDR:.*]] = alloca { double, double }, align 8
1641+ // OGCG: %[[A_ADDR:.*]] = alloca { double, double }, align 8
1642+ // OGCG: %[[B_ADDR:.*]] = alloca { double, double }, align 8
1643+ // OGCG: %[[BV_ADDR:.*]] = alloca { double, double }, align 8
1644+ // OGCG: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4
1645+ // OGCG: %[[CV_ADDR:.*]] = alloca { i32, i32 }, align 4
1646+ // OGCG: %[[DV_ADDR:.*]] = alloca { i32, i32 }, align 4
1647+ // OGCG: %[[D_ADDR:.*]] = alloca { i32, i32 }, align 4
1648+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[A_ADDR]], i32 0, i32 0
1649+ // OGCG: %[[A_REAL:.*]] = load double, ptr %[[A_REAL_PTR]], align 8
1650+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[A_ADDR]], i32 0, i32 1
1651+ // OGCG: %[[A_IMAG:.*]] = load double, ptr %[[A_IMAG_PTR]], align 8
1652+ // OGCG: %[[AV_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[AV_ADDR]], i32 0, i32 0
1653+ // OGCG: %[[AV_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[AV_ADDR]], i32 0, i32 1
1654+ // OGCG: store volatile double %[[A_REAL]], ptr %[[AV_REAL_PTR]], align 8
1655+ // OGCG: store volatile double %[[A_IMAG]], ptr %[[AV_IMAG_PTR]], align 8
1656+ // OGCG: %[[BV_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[BV_ADDR]], i32 0, i32 0
1657+ // OGCG: %[[BV_REAL:.*]] = load volatile double, ptr %[[BV_REAL_PTR]], align 8
1658+ // OGCG: %[[BV_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[BV_ADDR]], i32 0, i32 1
1659+ // OGCG: %[[BV_IMAG:.*]] = load volatile double, ptr %[[BV_IMAG_PTR]], align 8
1660+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[B_ADDR]], i32 0, i32 0
1661+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[B_ADDR]], i32 0, i32 1
1662+ // OGCG: store double %[[BV_REAL]], ptr %[[B_REAL_PTR]], align 8
1663+ // OGCG: store double %[[BV_IMAG]], ptr %[[B_IMAG_PTR]], align 8
1664+ // OGCG: %[[CV_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[CV_ADDR]], i32 0, i32 0
1665+ // OGCG: %[[CV_REAL:.*]] = load volatile i32, ptr %[[CV_REAL_PTR]], align 4
1666+ // OGCG: %[[CV_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[CV_ADDR]], i32 0, i32 1
1667+ // OGCG: %[[CV_IMAG:.*]] = load volatile i32, ptr %[[CV_IMAG_PTR]], align 4
1668+ // OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 0
1669+ // OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
1670+ // OGCG: store i32 %[[CV_REAL]], ptr %[[C_REAL_PTR]], align 4
1671+ // OGCG: store i32 %[[CV_IMAG]], ptr %[[C_IMAG_PTR]], align 4
1672+ // OGCG: %[[D_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[D_ADDR]], i32 0, i32 0
1673+ // OGCG: %[[D_REAL:.*]] = load i32, ptr %[[D_REAL_PTR]], align 4
1674+ // OGCG: %[[D_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[D_ADDR]], i32 0, i32 1
1675+ // OGCG: %[[D_IMAG:.*]] = load i32, ptr %[[D_IMAG_PTR]], align 4
1676+ // OGCG: %[[DV_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[DV_ADDR]], i32 0, i32 0
1677+ // OGCG: %[[DV_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[DV_ADDR]], i32 0, i32 1
1678+ // OGCG: store volatile i32 %[[D_REAL]], ptr %[[DV_REAL_PTR]], align 4
1679+ // OGCG: store volatile i32 %[[D_IMAG]], ptr %[[DV_IMAG_PTR]], align 4
0 commit comments