diff --git a/.cargo/config.toml b/.cargo/config.toml
new file mode 100644
index 00000000..b438243a
--- /dev/null
+++ b/.cargo/config.toml
@@ -0,0 +1,5 @@
+[alias]
+testall = ["test", "--all", "--release"]
+
+[target.'cfg(all())']
+rustflags = ["-C", "target-cpu=native"]
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index beebda35..5996f514 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
/target
.vscode
/docs/benchmark_graphs/.venv
+minimal_zkVM.synctex.gz
\ No newline at end of file
diff --git a/Cargo.lock b/Cargo.lock
index f00002a1..5a4ccc2a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -16,7 +16,6 @@ name = "air"
version = "0.1.0"
dependencies = [
"multilinear-toolkit",
- "p3-air",
"p3-koala-bear",
"p3-util",
"rand",
@@ -24,6 +23,14 @@ dependencies = [
"utils",
]
+[[package]]
+name = "air"
+version = "0.3.0"
+source = "git+https://github.com/leanEthereum/multilinear-toolkit.git#eeb6404188e4b4c9207b9e8e8a2120156602d92e"
+dependencies = [
+ "p3-field",
+]
+
[[package]]
name = "ansi_term"
version = "0.12.1"
@@ -69,7 +76,7 @@ version = "1.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc"
dependencies = [
- "windows-sys 0.61.2",
+ "windows-sys",
]
[[package]]
@@ -80,7 +87,7 @@ checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d"
dependencies = [
"anstyle",
"once_cell_polyfill",
- "windows-sys 0.61.2",
+ "windows-sys",
]
[[package]]
@@ -92,15 +99,13 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "backend"
version = "0.3.0"
-source = "git+https://github.com/leanEthereum/multilinear-toolkit.git#62766141561550c3540f9f644085fec53d721f16"
+source = "git+https://github.com/leanEthereum/multilinear-toolkit.git#eeb6404188e4b4c9207b9e8e8a2120156602d92e"
dependencies = [
- "fiat-shamir",
"itertools",
"p3-field",
"p3-util",
"rand",
"rayon",
- "tracing",
]
[[package]]
@@ -129,9 +134,9 @@ checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "clap"
-version = "4.5.53"
+version = "4.5.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8"
+checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394"
dependencies = [
"clap_builder",
"clap_derive",
@@ -139,9 +144,9 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.5.53"
+version = "4.5.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00"
+checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00"
dependencies = [
"anstream",
"anstyle",
@@ -163,9 +168,9 @@ dependencies = [
[[package]]
name = "clap_lex"
-version = "0.7.6"
+version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
+checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32"
[[package]]
name = "colorchoice"
@@ -175,32 +180,23 @@ checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "colored"
-version = "3.0.0"
+version = "3.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e"
+checksum = "faf9468729b8cbcea668e36183cb69d317348c2e08e994829fb56ebfdfbaac34"
dependencies = [
- "windows-sys 0.59.0",
+ "windows-sys",
]
[[package]]
name = "constraints-folder"
version = "0.3.0"
-source = "git+https://github.com/leanEthereum/multilinear-toolkit.git#62766141561550c3540f9f644085fec53d721f16"
+source = "git+https://github.com/leanEthereum/multilinear-toolkit.git#eeb6404188e4b4c9207b9e8e8a2120156602d92e"
dependencies = [
- "fiat-shamir",
- "p3-air",
+ "air 0.3.0",
+ "backend",
"p3-field",
]
-[[package]]
-name = "convert_case"
-version = "0.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9"
-dependencies = [
- "unicode-segmentation",
-]
-
[[package]]
name = "cpufeatures"
version = "0.2.17"
@@ -245,29 +241,6 @@ dependencies = [
"typenum",
]
-[[package]]
-name = "derive_more"
-version = "2.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134"
-dependencies = [
- "derive_more-impl",
-]
-
-[[package]]
-name = "derive_more-impl"
-version = "2.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb"
-dependencies = [
- "convert_case",
- "proc-macro2",
- "quote",
- "rustc_version",
- "syn",
- "unicode-xid",
-]
-
[[package]]
name = "digest"
version = "0.10.7"
@@ -293,11 +266,11 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
[[package]]
name = "fiat-shamir"
version = "0.1.0"
-source = "git+https://github.com/leanEthereum/fiat-shamir.git#bcf23c766f2e930acf11e68777449483a55af077"
+source = "git+https://github.com/leanEthereum/fiat-shamir.git#493be07bdd8669a816d28c3befc08bc3e68e590a"
dependencies = [
- "p3-challenger",
"p3-field",
- "p3-koala-bear",
+ "p3-symmetric",
+ "rayon",
"serde",
]
@@ -337,9 +310,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "indexmap"
-version = "2.12.1"
+version = "2.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2"
+checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017"
dependencies = [
"equivalent",
"hashbrown",
@@ -362,9 +335,9 @@ dependencies = [
[[package]]
name = "itoa"
-version = "1.0.16"
+version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010"
+checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
[[package]]
name = "keccak"
@@ -385,11 +358,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
name = "lean-multisig"
version = "0.1.0"
dependencies = [
+ "air 0.1.0",
"clap",
+ "lean_vm",
"multilinear-toolkit",
"p3-koala-bear",
- "poseidon_circuit",
+ "rand",
"rec_aggregation",
+ "sub_protocols",
+ "utils",
"whir-p3",
"xmss",
]
@@ -398,12 +375,9 @@ dependencies = [
name = "lean_compiler"
version = "0.1.0"
dependencies = [
- "air",
+ "air 0.1.0",
"lean_vm",
- "lookup",
"multilinear-toolkit",
- "p3-air",
- "p3-challenger",
"p3-koala-bear",
"p3-poseidon2",
"p3-symmetric",
@@ -422,21 +396,17 @@ dependencies = [
name = "lean_prover"
version = "0.1.0"
dependencies = [
- "air",
+ "air 0.1.0",
"itertools",
"lean_compiler",
"lean_vm",
- "lookup",
"multilinear-toolkit",
- "p3-air",
- "p3-challenger",
"p3-koala-bear",
"p3-poseidon2",
"p3-symmetric",
"p3-util",
"pest",
"pest_derive",
- "poseidon_circuit",
"rand",
"sub_protocols",
"tracing",
@@ -450,15 +420,11 @@ dependencies = [
name = "lean_vm"
version = "0.1.0"
dependencies = [
- "air",
+ "air 0.1.0",
"colored",
- "derive_more",
"itertools",
- "lookup",
"multilinear-toolkit",
"num_enum",
- "p3-air",
- "p3-challenger",
"p3-koala-bear",
"p3-poseidon2",
"p3-symmetric",
@@ -467,7 +433,6 @@ dependencies = [
"pest_derive",
"rand",
"strum",
- "sub_protocols",
"thiserror",
"tracing",
"utils",
@@ -477,9 +442,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.178"
+version = "0.2.180"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091"
+checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc"
[[package]]
name = "log"
@@ -487,20 +452,6 @@ version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
-[[package]]
-name = "lookup"
-version = "0.1.0"
-dependencies = [
- "multilinear-toolkit",
- "p3-challenger",
- "p3-koala-bear",
- "p3-util",
- "rand",
- "tracing",
- "utils",
- "whir-p3",
-]
-
[[package]]
name = "matchers"
version = "0.2.0"
@@ -519,8 +470,9 @@ checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273"
[[package]]
name = "multilinear-toolkit"
version = "0.3.0"
-source = "git+https://github.com/leanEthereum/multilinear-toolkit.git#62766141561550c3540f9f644085fec53d721f16"
+source = "git+https://github.com/leanEthereum/multilinear-toolkit.git#eeb6404188e4b4c9207b9e8e8a2120156602d92e"
dependencies = [
+ "air 0.3.0",
"backend",
"constraints-folder",
"fiat-shamir",
@@ -528,6 +480,7 @@ dependencies = [
"p3-util",
"rayon",
"sumcheck",
+ "tracing",
]
[[package]]
@@ -536,7 +489,7 @@ version = "0.50.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
dependencies = [
- "windows-sys 0.61.2",
+ "windows-sys",
]
[[package]]
@@ -601,19 +554,10 @@ version = "1.70.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe"
-[[package]]
-name = "p3-air"
-version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
-dependencies = [
- "p3-field",
- "p3-matrix",
-]
-
[[package]]
name = "p3-baby-bear"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"p3-field",
"p3-mds",
@@ -626,7 +570,7 @@ dependencies = [
[[package]]
name = "p3-challenger"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"p3-field",
"p3-maybe-rayon",
@@ -638,7 +582,7 @@ dependencies = [
[[package]]
name = "p3-commit"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"itertools",
"p3-challenger",
@@ -652,7 +596,7 @@ dependencies = [
[[package]]
name = "p3-dft"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"itertools",
"p3-field",
@@ -665,7 +609,7 @@ dependencies = [
[[package]]
name = "p3-field"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"itertools",
"num-bigint",
@@ -680,7 +624,7 @@ dependencies = [
[[package]]
name = "p3-interpolation"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"p3-field",
"p3-matrix",
@@ -691,7 +635,7 @@ dependencies = [
[[package]]
name = "p3-koala-bear"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"itertools",
"num-bigint",
@@ -707,7 +651,7 @@ dependencies = [
[[package]]
name = "p3-matrix"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"itertools",
"p3-field",
@@ -722,7 +666,7 @@ dependencies = [
[[package]]
name = "p3-maybe-rayon"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"rayon",
]
@@ -730,7 +674,7 @@ dependencies = [
[[package]]
name = "p3-mds"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"p3-dft",
"p3-field",
@@ -742,7 +686,7 @@ dependencies = [
[[package]]
name = "p3-merkle-tree"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"itertools",
"p3-commit",
@@ -759,7 +703,7 @@ dependencies = [
[[package]]
name = "p3-monty-31"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"itertools",
"num-bigint",
@@ -781,7 +725,7 @@ dependencies = [
[[package]]
name = "p3-poseidon2"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"p3-field",
"p3-mds",
@@ -793,7 +737,7 @@ dependencies = [
[[package]]
name = "p3-symmetric"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"itertools",
"p3-field",
@@ -803,7 +747,7 @@ dependencies = [
[[package]]
name = "p3-util"
version = "0.3.0"
-source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#1db9df28abd6db586eaa891af2416d94d1b026ae"
+source = "git+https://github.com/TomWambsgans/Plonky3.git?branch=lean-multisig#87bad263fb05630a27ba2d72ac1509d4b9fe91b1"
dependencies = [
"rayon",
"serde",
@@ -817,9 +761,9 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "pest"
-version = "2.8.4"
+version = "2.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cbcfd20a6d4eeba40179f05735784ad32bdaef05ce8e8af05f180d45bb3e7e22"
+checksum = "2c9eb05c21a464ea704b53158d358a31e6425db2f63a1a7312268b05fe2b75f7"
dependencies = [
"memchr",
"ucd-trie",
@@ -827,9 +771,9 @@ dependencies = [
[[package]]
name = "pest_derive"
-version = "2.8.4"
+version = "2.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51f72981ade67b1ca6adc26ec221be9f463f2b5839c7508998daa17c23d94d7f"
+checksum = "68f9dbced329c441fa79d80472764b1a2c7e57123553b8519b36663a2fb234ed"
dependencies = [
"pest",
"pest_generator",
@@ -837,9 +781,9 @@ dependencies = [
[[package]]
name = "pest_generator"
-version = "2.8.4"
+version = "2.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dee9efd8cdb50d719a80088b76f81aec7c41ed6d522ee750178f83883d271625"
+checksum = "3bb96d5051a78f44f43c8f712d8e810adb0ebf923fc9ed2655a7f66f63ba8ee5"
dependencies = [
"pest",
"pest_meta",
@@ -850,9 +794,9 @@ dependencies = [
[[package]]
name = "pest_meta"
-version = "2.8.4"
+version = "2.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf1d70880e76bdc13ba52eafa6239ce793d85c8e43896507e43dd8984ff05b82"
+checksum = "602113b5b5e8621770cfd490cfd90b9f84ab29bd2b0e49ad83eb6d186cef2365"
dependencies = [
"pest",
"sha2",
@@ -864,21 +808,6 @@ version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
-[[package]]
-name = "poseidon_circuit"
-version = "0.1.0"
-dependencies = [
- "multilinear-toolkit",
- "p3-koala-bear",
- "p3-monty-31",
- "p3-poseidon2",
- "rand",
- "sub_protocols",
- "tracing",
- "utils",
- "whir-p3",
-]
-
[[package]]
name = "ppv-lite86"
version = "0.2.21"
@@ -899,18 +828,18 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.103"
+version = "1.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
+checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
-version = "1.0.42"
+version = "1.0.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"
+checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4"
dependencies = [
"proc-macro2",
]
@@ -943,9 +872,9 @@ dependencies = [
[[package]]
name = "rand_core"
-version = "0.9.3"
+version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
+checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c"
dependencies = [
"getrandom",
]
@@ -974,15 +903,12 @@ dependencies = [
name = "rec_aggregation"
version = "0.1.0"
dependencies = [
- "air",
+ "air 0.1.0",
"bincode",
"lean_compiler",
"lean_prover",
"lean_vm",
- "lookup",
"multilinear-toolkit",
- "p3-air",
- "p3-challenger",
"p3-koala-bear",
"p3-poseidon2",
"p3-symmetric",
@@ -1014,27 +940,12 @@ version = "0.8.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58"
-[[package]]
-name = "rustc_version"
-version = "0.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
-dependencies = [
- "semver",
-]
-
[[package]]
name = "rustversion"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
-[[package]]
-name = "semver"
-version = "1.0.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
-
[[package]]
name = "serde"
version = "1.0.228"
@@ -1067,9 +978,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.147"
+version = "1.0.149"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6af14725505314343e673e9ecb7cd7e8a36aa9791eb936235a3567cc31447ae4"
+checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86"
dependencies = [
"itoa",
"memchr",
@@ -1151,8 +1062,7 @@ dependencies = [
name = "sub_protocols"
version = "0.1.0"
dependencies = [
- "derive_more",
- "lookup",
+ "lean_vm",
"multilinear-toolkit",
"p3-koala-bear",
"p3-util",
@@ -1165,12 +1075,12 @@ dependencies = [
[[package]]
name = "sumcheck"
version = "0.3.0"
-source = "git+https://github.com/leanEthereum/multilinear-toolkit.git#62766141561550c3540f9f644085fec53d721f16"
+source = "git+https://github.com/leanEthereum/multilinear-toolkit.git#eeb6404188e4b4c9207b9e8e8a2120156602d92e"
dependencies = [
+ "air 0.3.0",
"backend",
"constraints-folder",
"fiat-shamir",
- "p3-air",
"p3-field",
"p3-util",
"rayon",
@@ -1178,9 +1088,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.111"
+version = "2.0.114"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87"
+checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a"
dependencies = [
"proc-macro2",
"quote",
@@ -1189,18 +1099,18 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "2.0.17"
+version = "2.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8"
+checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "2.0.17"
+version = "2.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913"
+checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5"
dependencies = [
"proc-macro2",
"quote",
@@ -1280,9 +1190,9 @@ dependencies = [
[[package]]
name = "tracing-forest"
-version = "0.3.0"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "92bdb3c949c9e81b71f78ba782f956b896019d82cc2f31025d21e04adab4d695"
+checksum = "f09cb459317a3811f76644334473239d696cd8efc606963ae7d1c308cead3b74"
dependencies = [
"ansi_term",
"smallvec",
@@ -1348,18 +1258,6 @@ version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
-[[package]]
-name = "unicode-segmentation"
-version = "1.12.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
-
-[[package]]
-name = "unicode-xid"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
-
[[package]]
name = "utf8parse"
version = "0.2.2"
@@ -1371,8 +1269,6 @@ name = "utils"
version = "0.1.0"
dependencies = [
"multilinear-toolkit",
- "p3-air",
- "p3-challenger",
"p3-koala-bear",
"p3-poseidon2",
"p3-symmetric",
@@ -1397,9 +1293,9 @@ checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "wasip2"
-version = "1.0.1+wasi-0.2.4"
+version = "1.0.2+wasi-0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7"
+checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5"
dependencies = [
"wit-bindgen",
]
@@ -1407,12 +1303,11 @@ dependencies = [
[[package]]
name = "whir-p3"
version = "0.1.0"
-source = "git+https://github.com/TomWambsgans/whir-p3?branch=lean-multisig#04fb1c1f2e3bbd14e6e4aee32621656eb3f3949f"
+source = "git+https://github.com/TomWambsgans/whir-p3?branch=lean-multisig#bc7bf99c224d63582945f065f555b9824d843d3c"
dependencies = [
"itertools",
"multilinear-toolkit",
"p3-baby-bear",
- "p3-challenger",
"p3-commit",
"p3-dft",
"p3-field",
@@ -1425,7 +1320,6 @@ dependencies = [
"p3-util",
"rand",
"rayon",
- "thiserror",
"tracing",
"tracing-forest",
"tracing-subscriber",
@@ -1459,15 +1353,6 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
-[[package]]
-name = "windows-sys"
-version = "0.59.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
-dependencies = [
- "windows-targets",
-]
-
[[package]]
name = "windows-sys"
version = "0.61.2"
@@ -1477,70 +1362,6 @@ dependencies = [
"windows-link",
]
-[[package]]
-name = "windows-targets"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
-dependencies = [
- "windows_aarch64_gnullvm",
- "windows_aarch64_msvc",
- "windows_i686_gnu",
- "windows_i686_gnullvm",
- "windows_i686_msvc",
- "windows_x86_64_gnu",
- "windows_x86_64_gnullvm",
- "windows_x86_64_msvc",
-]
-
-[[package]]
-name = "windows_aarch64_gnullvm"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
-
-[[package]]
-name = "windows_aarch64_msvc"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
-
-[[package]]
-name = "windows_i686_gnu"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
-
-[[package]]
-name = "windows_i686_gnullvm"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
-
-[[package]]
-name = "windows_i686_msvc"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
-
-[[package]]
-name = "windows_x86_64_gnu"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
-
-[[package]]
-name = "windows_x86_64_gnullvm"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
-
-[[package]]
-name = "windows_x86_64_msvc"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
-
[[package]]
name = "winnow"
version = "0.7.14"
@@ -1552,22 +1373,18 @@ dependencies = [
[[package]]
name = "wit-bindgen"
-version = "0.46.0"
+version = "0.51.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
+checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5"
[[package]]
name = "witness_generation"
version = "0.1.0"
dependencies = [
- "air",
- "derive_more",
+ "air 0.1.0",
"lean_compiler",
"lean_vm",
- "lookup",
"multilinear-toolkit",
- "p3-air",
- "p3-challenger",
"p3-koala-bear",
"p3-monty-31",
"p3-poseidon2",
@@ -1575,7 +1392,6 @@ dependencies = [
"p3-util",
"pest",
"pest_derive",
- "poseidon_circuit",
"rand",
"sub_protocols",
"tracing",
@@ -1598,18 +1414,18 @@ dependencies = [
[[package]]
name = "zerocopy"
-version = "0.8.31"
+version = "0.8.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3"
+checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
-version = "0.8.31"
+version = "0.8.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a"
+checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1"
dependencies = [
"proc-macro2",
"quote",
@@ -1618,6 +1434,6 @@ dependencies = [
[[package]]
name = "zmij"
-version = "0.1.9"
+version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d0095ecd462946aa3927d9297b63ef82fb9a5316d7a37d134eeb36e58228615a"
+checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65"
diff --git a/Cargo.toml b/Cargo.toml
index 15369a4b..bc37a467 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -48,19 +48,16 @@ utils = { path = "crates/utils" }
lean_vm = { path = "crates/lean_vm" }
xmss = { path = "crates/xmss" }
sub_protocols = { path = "crates/sub_protocols" }
-lookup = { path = "crates/lookup" }
lean_compiler = { path = "crates/lean_compiler" }
lean_prover = { path = "crates/lean_prover" }
rec_aggregation = { path = "crates/rec_aggregation" }
witness_generation = { path = "crates/lean_prover/witness_generation" }
-poseidon_circuit = { path = "crates/poseidon_circuit" }
# External
thiserror = "2.0"
clap = { version = "4.5.52", features = ["derive"] }
rand = "0.9.2"
sha3 = "0.10.8"
-derive_more = { version = "2.0.1", features = ["full"] }
pest = "2.7"
pest_derive = "2.7"
itertools = "0.14.0"
@@ -77,47 +74,24 @@ p3-koala-bear = { git = "https://github.com/TomWambsgans/Plonky3.git", branch =
p3-baby-bear = { git = "https://github.com/TomWambsgans/Plonky3.git", branch = "lean-multisig" }
p3-poseidon2 = { git = "https://github.com/TomWambsgans/Plonky3.git", branch = "lean-multisig" }
p3-symmetric = { git = "https://github.com/TomWambsgans/Plonky3.git", branch = "lean-multisig" }
-p3-air = { git = "https://github.com/TomWambsgans/Plonky3.git", branch = "lean-multisig" }
p3-goldilocks = { git = "https://github.com/TomWambsgans/Plonky3.git", branch = "lean-multisig" }
-p3-challenger = { git = "https://github.com/TomWambsgans/Plonky3.git", branch = "lean-multisig" }
p3-util = { git = "https://github.com/TomWambsgans/Plonky3.git", branch = "lean-multisig" }
p3-monty-31 = { git = "https://github.com/TomWambsgans/Plonky3.git", branch = "lean-multisig" }
whir-p3 = { git = "https://github.com/TomWambsgans/whir-p3", branch = "lean-multisig" }
multilinear-toolkit = { git = "https://github.com/leanEthereum/multilinear-toolkit.git" }
-
[dependencies]
clap.workspace = true
rec_aggregation.workspace = true
xmss.workspace = true
-poseidon_circuit.workspace = true
+air.workspace = true
+rand.workspace = true
+sub_protocols.workspace = true
+utils.workspace = true
p3-koala-bear.workspace = true
+lean_vm.workspace = true
multilinear-toolkit.workspace = true
whir-p3.workspace = true
-# [patch."https://github.com/TomWambsgans/Plonky3.git"]
-# p3-koala-bear = { path = "../Plonky3/koala-bear" }
-# p3-field = { path = "../Plonky3/field" }
-# p3-poseidon2 = { path = "../Plonky3/poseidon2" }
-# p3-symmetric = { path = "../Plonky3/symmetric" }
-# p3-air = { path = "../Plonky3/air" }
-# p3-merkle-tree = { path = "../Plonky3/merkle-tree" }
-# p3-commit = { path = "../Plonky3/commit" }
-# p3-matrix = { path = "../Plonky3/matrix" }
-# p3-dft = { path = "../Plonky3/dft" }
-# p3-challenger = { path = "../Plonky3/challenger" }
-# p3-monty-31 = { path = "../Plonky3/monty-31" }
-# p3-maybe-rayon = { path = "../Plonky3/maybe-rayon" }
-# p3-util = { path = "../Plonky3/util" }
-
-# [patch."https://github.com/TomWambsgans/whir-p3.git"]
-# whir-p3 = { path = "../whir-p3" }
-
-# [patch."https://github.com/leanEthereum/multilinear-toolkit.git"]
-# multilinear-toolkit = { path = "../multilinear-toolkit" }
-
-# [profile.release]
-# opt-level = 1
-
[profile.release]
lto = "thin"
diff --git a/README.md b/README.md
index b317b721..2e171dcd 100644
--- a/README.md
+++ b/README.md
@@ -1,95 +1,53 @@
-
♦ leanMultisig ♦
+leanMultisig
-XMSS + minimal [zkVM](minimal_zkVM.pdf) = lightweight PQ signatures, with unbounded aggregation
+
+
+
-## Status
+Minimal hash-based zkVM, targeting recursion and aggregation of hash-based signatures, for a Post-Quantum Ethereum.
-- branch [main](https://github.com/leanEthereum/leanMultisig): optimized for **prover efficiency**
-- branch [lean-vm-simple](https://github.com/leanEthereum/leanMultisig/tree/lean-vm-simple): optimized for **simplicity**
-
-Both versions will eventually merge into one.
+Documentation: [PDF](minimal_zkVM.pdf)
## Proving System
-- [WHIR](https://eprint.iacr.org/2024/1586.pdf)
-- [SuperSpartan](https://eprint.iacr.org/2023/552.pdf), with AIR-specific optimizations developed by W. Borgeaud in [A simple multivariate AIR argument inspired by SuperSpartan](https://solvable.group/posts/super-air/#fnref:1)
-- [Univariate Skip](https://eprint.iacr.org/2024/108.pdf)
-- [Logup*](https://eprint.iacr.org/2025/946.pdf)
-- ...
+- multilinear with [WHIR](https://eprint.iacr.org/2024/1586.pdf)
+- [SuperSpartan](https://eprint.iacr.org/2023/552.pdf), with [AIR-specific optimizations](https://solvable.group/posts/super-air/#fnref:1)
+- [Logup](https://eprint.iacr.org/2023/1284.pdf) / [Logup*](https://eprint.iacr.org/2025/946.pdf)
The VM design is inspired by the famous [Cairo paper](https://eprint.iacr.org/2021/1063.pdf).
-## Benchmarks
-
-Benchmarks are performed on 2 laptops:
-- i9-12900H, 32 gb of RAM
-- mac m4 max
-
-target ≈ 128 bits of security, currently using conjecture: 4.12 of [WHIR](https://eprint.iacr.org/2024/1586.pdf), "up to capacity" (TODO: provable security)
-
-### Poseidon2
-
-Poseidon2 over 16 KoalaBear field elements.
-
-```console
-RUSTFLAGS='-C target-cpu=native' cargo run --release -- poseidon --log-n-perms 20
-```
-
-
-
-### Recursion
-
-The full recursion program is not finished yet. Instead, we prove validity of a WHIR opening, with 25 variables, and rate = 1/4.
+### Security
-- 1-to-1: Recursive proof of a single WHIR opening
-- n-to-1: Recursive proof of many WHIR openings (≈ 8) (we report prover time per WHIR)
+123 bits of security. Johnson bound + degree 5 extension of koala-bear -> **no proximity gaps conjecture**. (TODO 128 bits, which requires hash digests bigger than 8 koala-bears).
-```console
-RUSTFLAGS='-C target-cpu=native' cargo run --release -- recursion --count 8
-```
-
-
-
-
-### XMSS aggregation
-
-```console
-RUSTFLAGS='-C target-cpu=native' cargo run --release -- xmss --n-signatures 1775
-```
-
-[Trivial encoding](docs/XMSS_trivial_encoding.pdf) (for now).
-
-
-
-
-
+## Benchmarks
-### Fibonacci:
+Machine: M4 Max 48GB (CPU only)
-n = 2,000,000
+| Benchmark | Current | Target |
+| -------------------------- | -------------------- | --------------- |
+| Poseidon2 (16 koala-bears) | `560K Poseidon2 / s` | n/a |
+| 2 -> 1 Recursion | `1.35 s` | `0.25 s ` |
+| XMSS aggregation | `554 XMSS / s` | `1000 XMSS / s` |
-```
-FIB_N=2000000 RUSTFLAGS='-C target-cpu=native' cargo test --release --package lean_prover --test test_zkvm -- --nocapture -- test_prove_fibonacci --exact --nocapture
-```
+*Expect incoming perf improvements.*
-Proving time:
+To reproduce:
+- `cargo run --release -- poseidon --log-n-perms 20`
+- `cargo run --release -- recursion --n 2`
+- `cargo run --release -- xmss --n-signatures 1350`
-- i9-12900H: 2.0 s (1.0 MHz)
-- mac m4 max: 1.2 s (1.7 MHz)
+(Small detail remaining in recursion: final (multilinear) evaluation of the guest program bytecode, there are multiple ways of handling it... TBD soon)
### Proof size
-With conjecture "up to capacity", and rate = 1/2, current proofs are about ≈ 400 - 500 KiB. On the [lean-vm-simple](https://github.com/leanEthereum/leanMultisig/tree/lean-vm-simple) branch, proofs are ≈ 300 KiB. This part has not (at all) been optimized (no Merkle pruning...): big gains are expected.
-
-Target:
-- 256 KiB for fast proof (rate = 1/2)
-- close to 128 KiB for slower proofs (rate = 1/4 or 1/8)
+WHIR intial rate = 1/4. Proof size ≈ 380 KiB. TODO: Merkle pruning + WHIR batch opening -> 256 KiB. (To go below 256 KiB -> rate 1/8 or 1/16 in the final recursion).
## Credits
-- [Plonky3](https://github.com/Plonky3/Plonky3) for its various performant crates (Finite fields, poseidon2 AIR etc)
+- [Plonky3](https://github.com/Plonky3/Plonky3) for its various performant crates
- [whir-p3](https://github.com/tcoratger/whir-p3): a Plonky3-compatible WHIR implementation
- [Whirlaway](https://github.com/TomWambsgans/Whirlaway): Multilinear snark for AIR + minimal zkVM
diff --git a/TODO.md b/TODO.md
index 715e4e3a..83747478 100644
--- a/TODO.md
+++ b/TODO.md
@@ -2,40 +2,24 @@
## Perf
-- WHIR univariate skip?
-- Opti recursion bytecode
-- packing (SIMD) everywhere
-- one can "move out" the variable of the eq(.) polynomials out of the sumcheck computation in WHIR (as done in the PIOP)
-- Structured AIR: often no all the columns use both up/down -> only handle the used ones to speed up the PIOP zerocheck
-- Use Univariate Skip to commit to tables with k.2^n rows (k small)
-- opti logup* GKR when the indexes are not a power of 2 (which is the case in the execution table)
-- incremental merkle paths in whir-p3
-- Avoid embedding overhead on the flag, len, and index columns in the AIR table for dot products
-- Lev's trick to skip some low-level modular reduction
-- Sumcheck, case z = 0, no need to fold, only keep first half of the values (done in PR 33 by Lambda)
-- Custom AVX2 / AVX512 / Neon implem in Plonky3 for all of the finite field operations (done for degree 4 extension, but not degree 5)
-- Many times, we evaluate different multilinear polynomials (different columns of the same table etc) at a common point. OPTI = compute the eq(.) once, and then dot_product with everything
-- To commit to multiple AIR table using 1 single pcs, the most general form our "packed pcs" api should accept is:
- a list of n (n not a power of 2) columns, each ending with m repeated values (in this manner we can reduce proof size when they are a lot of columns (poseidons ...))
-- in the runner of leanISA program, if we call 2 times the same function with the same arguments, we can reuse the same memory frame
+- 128 bits security
+- Merkle pruning
- the interpreter of leanISA (+ witness generation) can be partially parallelized when there are some independent loops
-- (1 - x).r1 + x.r2 = x.(r2 - r1) + r1 TODO this opti is not everywhere currently + TODO generalize this with the univariate skip
-- opti compute_eval_eq when scalar = ONE
-- Dmitry's range check, bonus: we can spare 2 memory cells if the value being range check is small (using the zeros present by conventio on the public memory)
- Make everything "padding aware" (including WHIR, logup*, AIR, etc)
- Opti WHIR: in sumcheck we know more than f(0) + f(1), we know f(0) and f(1)
-- Opti WHIR https://github.com/tcoratger/whir-p3/issues/303 and https://github.com/tcoratger/whir-p3/issues/306
-- Avoid committing to the 3 index columns, and replace it by a sumcheck? Using this idea, we would only commit to PC and FP for the execution table. Idea by Georg (Powdr). Do we even need to commit to FP then?
-- Avoid the embedding overhead in logup, when denominators = "c - index", as it was previously done
-- SIMD (Packing) for PoW grinding in Fiat-Shamir (has been implemented in the lean-vm-simple branch by [x-senpai-x](https://github.com/x-senpai-x), see [here](https://github.com/leanEthereum/fiat-shamir/blob/d80da40a76c00aaa6d35fe5e51c3bf31eaf8fe17/src/prover.rs#L98))
-
+- Opti WHIR https://github.com/tcoratger/whir-p3/issues/303 and https://github.com/tcoratger/whir-p3/issues/306 ?
+- Avoid the embedding overhead in logup, when denominators = "c - index"
+- Proof size: replace all equality checks in the verifier algo by value deduction
+- Poseidon in 'Compression' mode everywhere (except in 'Sponge' mode? cf. eprint 2014/223)
+- XMSS: move from toy implem (usefull for benchmark) to a secure implem
+- Recursion: Remove the few hardcoded constants that depend on the guest execution (cycles etc)
- About the ordering of the variables in sumchecks, currently we do as follows:
[a, b, c, d, e, f, g, h] (1st round of sumcheck)
[(a-r).a + r.e, (1-r).b + r.f, (1-r).c + r.g, (1-r).d + r.h] (2nd round of sumcheck)
... etc
-This is otpimal for packing (SIMD) but not optimal when to comes to padding.
+This is optimal for packing (SIMD) but not optimal when to comes to padding.
When there are a lot of "ending" zeros, the optimal way of folding is:
[a, b, c, d, e, 0, 0, 0] (1st round of sumcheck)
@@ -48,92 +32,15 @@ But we can get the bost of both worlds (suggested by Lev, TODO implement):
[(1-r).a + r.c, (1-r).b + r.d, (1-r).e + r.g, (1-r).f + r.h, (1-r).i, 0, 0, 0] (2nd round of sumcheck)
... etc
-About "the packed pcs" (similar to SP1 Jagged PCS, slightly less efficient, but simpler (no sumchecks)):
-- The best strategy is probably to pack as much as possible (the cost increasing the density = additional inner evaluations), if we can fit below a power of 2 - epsilon (epsilon = 20% for instance, tbd), if the sum of the non zero data is just above a power of 2, no packed technique, even the best, can help us, so we should spread aniway (to reduce the pressure of inner evaluations)
-- About those inner evaluations, there is a trick: we need to compute M1(a, b, c, d, ...) then M2(b, c, d, ...), then M3(c, d, ...) -> The trick = compute the "eq(.) for (b, c, d), then dot product with M3. Then expand to eq(b, c, d, ...), dot product with M2. Then expand to eq(a, b, c, d, ...), dot product with M1. The idea is that in this order, computing each "eq" is easier is we start from the previous one.
-- Currently the packed pcs works as follows:
-
-```
-┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐
-| || || || || || || || || || || || || || |
-| || || || || || || || || || || || || || |
-| || || || || || || || || || || || || || |
-| || || || || || || || || || || || || || |
-| || || || || || || || || || || || || || |
-| || || || || || || || || || || || || || |
-└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘
-┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐
-| || || || || || || || || || || || || || |
-| || || || || || || || || || || || || || |
-└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘
-┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐┌─┐
-└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘└─┘
-```
-
-But we reduce proof size a lot using instead (TODO):
-
-```
-┌────────────────────────┐┌──────────┐┌─┐
-| || || |
-| || || |
-| || || |
-| || || |
-| || || |
-| || || |
-└────────────────────────┘└──────────┘└─┘
-┌────────────────────────┐┌──────────┐┌─┐
-| || || |
-| || || |
-└────────────────────────┘└──────────┘└─┘
-┌────────────────────────┐┌──────────┐┌─┐
-└────────────────────────┘└──────────┘└─┘
-```
-
## Security:
-Fiat Shamir: add a claim tracing feature, to ensure all the claims are indeed checked (Lev)
-
-## Not Perf
-
-- Whir batching: handle the case where the second polynomial is too small compared to the first one
-- bounddary condition on dot_product table: first flag = 1
-- verify correctness of the Grand Product check
-- Proof size: replace all equality checks in the verifier algo by value deduction
+- Fiat Shamir: add a claim tracing feature, to ensure all the claims are indeed checked (Lev)
+- Double Check AIR constraints, logup overflows etc
+- Formal Verification
-- KoalaBear extension of degree 5: the current implem (in a fork of Plonky3) has not been been optimized
-- KoalaBear extension of degree 6: in order to use the (proven) Johnson bound in WHIR
-- current "packed PCS" is not optimal in the end: can lead to [16][4][2][2] (instead of [16][8])
-- make test_packed_pcs pass again
-- Poseidon AIR: handle properly the compression mode ? (where output = poseidon(input) + input) (both in WHIR / XMSS)
-- XMSS: implem the hash tweak (almost no performance impact as long as we use 1 tweak / XMSS, but this requires further security analysis)
-- Grinding before GKR (https://eprint.iacr.org/2025/118)
-
-
-# Random ideas
+# Ideas
- About range checks, that can currently be done in 3 cycles (see 2.5.3 of the zkVM pdf) + 3 memory cells used. For small ranges we can save 2 memory cells.
-
-## Known leanISA compiler bugs:
-
-### Non exhaustive conditions in inlined functions
-
-Currently, to inline functions we simply replace the "return" keyword by some variable assignment, i.e.
-we do not properly handle conditions, we would need to add some JUMPs ...
-
-```
-fn works(x) inline -> 1 {
- if x == 0 {
- return 0;
- } else {
- return 1;
- }
-}
-
-fn doesnt_work(x) inline -> 1 {
- if x == 0 {
- return 0; // will be compiled to `res = 0`;
- } // the bug: we do not JUMP here, when inlined
- return 1; // will be compiled to `res = 1`; -> invalid (res = 0 and = 1 at the same time)
-}
-```
-
+- Avoid committing to the 3 index columns, and replace it by a sumcheck? Idea by Georg (Powdr). Advantage: Less commitment surface. Drawback: increase the number of instances in the final WHIR batch opening -> proof size overhead
+- Lev's trick to skip some low-level modular reduction?
+
diff --git a/crates/air/Cargo.toml b/crates/air/Cargo.toml
index e8eb3f85..57afa0a7 100644
--- a/crates/air/Cargo.toml
+++ b/crates/air/Cargo.toml
@@ -9,7 +9,6 @@ workspace = true
[dependencies]
tracing.workspace = true
utils.workspace = true
-p3-air.workspace = true
p3-util.workspace = true
multilinear-toolkit.workspace = true
diff --git a/crates/air/src/lib.rs b/crates/air/src/lib.rs
index df7cb7ec..a3544550 100644
--- a/crates/air/src/lib.rs
+++ b/crates/air/src/lib.rs
@@ -6,6 +6,19 @@ mod utils;
mod validity_check;
mod verify;
+use multilinear_toolkit::prelude::{Field, MultilinearPoint};
pub use prove::*;
pub use validity_check::*;
pub use verify::*;
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct AirClaims {
+ pub point: MultilinearPoint,
+ pub evals_f: Vec,
+ pub evals_ef: Vec,
+
+ // only for columns with a "shift", in case univariate skip == 1
+ pub down_point: Option>,
+ pub evals_f_on_down_columns: Vec,
+ pub evals_ef_on_down_columns: Vec,
+}
diff --git a/crates/air/src/prove.rs b/crates/air/src/prove.rs
index ad6b8409..51cf62ca 100644
--- a/crates/air/src/prove.rs
+++ b/crates/air/src/prove.rs
@@ -1,10 +1,9 @@
use multilinear_toolkit::prelude::*;
-use p3_air::Air;
use p3_util::{log2_ceil_usize, log2_strict_usize};
use tracing::{info_span, instrument};
-use utils::{FSProver, fold_multilinear_chunks, multilinears_linear_combination};
+use utils::{fold_multilinear_chunks, multilinears_linear_combination};
-use crate::{uni_skip_utils::matrix_next_mle_folded, utils::column_shifted};
+use crate::{AirClaims, uni_skip_utils::matrix_next_mle_folded, utils::column_shifted};
/*
@@ -15,17 +14,15 @@ cf https://eprint.iacr.org/2023/552.pdf and https://solvable.group/posts/super-a
#[instrument(name = "prove air", skip_all)]
#[allow(clippy::too_many_arguments)]
pub fn prove_air>, A: Air>(
- prover_state: &mut FSProver>,
+ prover_state: &mut impl FSProver,
air: &A,
- mut extra_data: A::ExtraData,
+ extra_data: A::ExtraData,
univariate_skips: usize,
columns_f: &[impl AsRef<[PF]>],
columns_ef: &[impl AsRef<[EF]>],
- last_row_shifted_f: &[PF],
- last_row_shifted_ef: &[EF],
- virtual_column_statements: Option>, // point should be randomness generated after committing to the columns
+ virtual_column_statement: Option>, // point should be randomness generated after committing to the columns
store_intermediate_foldings: bool,
-) -> (MultilinearPoint, Vec, Vec)
+) -> AirClaims
where
A::ExtraData: AlphaPowersMut + AlphaPowers,
{
@@ -50,16 +47,11 @@ where
// )
// .unwrap();
- let alpha = prover_state.sample(); // random challenge for batching constraints
-
- *extra_data.alpha_powers_mut() = alpha
- .powers()
- .take(air.n_constraints() + virtual_column_statements.as_ref().map_or(0, |s| s.values.len()))
- .collect();
+ assert!(extra_data.alpha_powers().len() >= air.n_constraints() + virtual_column_statement.is_some() as usize);
let n_sc_rounds = log_n_rows + 1 - univariate_skips;
- let zerocheck_challenges = virtual_column_statements
+ let zerocheck_challenges = virtual_column_statement
.as_ref()
.map(|st| st.point.0.clone())
.unwrap_or_else(|| prover_state.sample_vec(n_sc_rounds));
@@ -68,14 +60,12 @@ where
let shifted_rows_f = air
.down_column_indexes_f()
.par_iter()
- .zip_eq(last_row_shifted_f)
- .map(|(&col_index, &final_value)| column_shifted(columns_f[col_index], final_value.as_base().unwrap()))
+ .map(|&col_index| column_shifted(columns_f[col_index]))
.collect::>();
let shifted_rows_ef = air
.down_column_indexes_ef()
.par_iter()
- .zip_eq(last_row_shifted_ef)
- .map(|(&col_index, &final_value)| column_shifted(columns_ef[col_index], final_value))
+ .map(|&col_index| column_shifted(columns_ef[col_index]))
.collect::>();
let mut columns_up_down_f = columns_f.to_vec(); // orginal columns, followed by shifted ones
@@ -98,11 +88,11 @@ where
air,
&extra_data,
Some((zerocheck_challenges, None)),
- virtual_column_statements.is_none(),
+ virtual_column_statement.is_none(),
prover_state,
- virtual_column_statements
+ virtual_column_statement
.as_ref()
- .map(|st| dot_product(st.values.iter().copied(), alpha.powers()))
+ .map(|st| st.value)
.unwrap_or_else(|| EF::ZERO),
store_intermediate_foldings,
)
@@ -110,29 +100,41 @@ where
prover_state.add_extension_scalars(&inner_sums);
- open_columns(
- prover_state,
- univariate_skips,
- &air.down_column_indexes_f(),
- &air.down_column_indexes_ef(),
- &columns_f,
- &columns_ef,
- &outer_sumcheck_challenge,
- )
+ if univariate_skips == 1 {
+ open_columns_no_skip(
+ prover_state,
+ &inner_sums,
+ &air.down_column_indexes_f(),
+ &air.down_column_indexes_ef(),
+ &columns_f,
+ &columns_ef,
+ &outer_sumcheck_challenge,
+ )
+ } else if shifted_rows_f.is_empty() && shifted_rows_ef.is_empty() {
+ // usefull for poseidon2 benchmark
+ open_flat_columns(
+ prover_state,
+ univariate_skips,
+ &columns_f,
+ &columns_ef,
+ &outer_sumcheck_challenge,
+ )
+ } else {
+ panic!(
+ "Currently unsupported for simplicty (checkout c7944152a4325b1e1913446e6684112099db5d78 for a version that supported this case)"
+ );
+ }
}
#[instrument(skip_all)]
-fn open_columns>>(
- prover_state: &mut FSProver>,
+fn open_flat_columns>>(
+ prover_state: &mut impl FSProver,
univariate_skips: usize,
- columns_with_shift_f: &[usize],
- columns_with_shift_ef: &[usize],
columns_f: &[&[PF]],
columns_ef: &[&[EF]],
outer_sumcheck_challenge: &[EF],
-) -> (MultilinearPoint, Vec, Vec) {
- let n_up_down_columns =
- columns_f.len() + columns_ef.len() + columns_with_shift_f.len() + columns_with_shift_ef.len();
+) -> AirClaims {
+ let n_up_down_columns = columns_f.len() + columns_ef.len();
let batching_scalars = prover_state.sample_vec(log2_ceil_usize(n_up_down_columns));
let eval_eq_batching_scalars = eval_eq(&batching_scalars)[..n_up_down_columns].to_vec();
@@ -152,47 +154,10 @@ fn open_columns>>(
});
}
- let columns_shifted_f = &columns_with_shift_f.iter().map(|&i| columns_f[i]).collect::>();
- let columns_shifted_ef = &columns_with_shift_ef.iter().map(|&i| columns_ef[i]).collect::>();
-
- let mut batched_column_down = if columns_shifted_f.is_empty() {
- tracing::warn!("TODO optimize open_columns when no shifted F columns");
- vec![EF::ZERO; batched_column_up.len()]
- } else {
- multilinears_linear_combination(
- columns_shifted_f,
- &eval_eq_batching_scalars[columns_f.len() + columns_ef.len()..][..columns_shifted_f.len()],
- )
- };
-
- if !columns_shifted_ef.is_empty() {
- let batched_column_down_ef = multilinears_linear_combination(
- columns_shifted_ef,
- &eval_eq_batching_scalars[columns_f.len() + columns_ef.len() + columns_shifted_f.len()..],
- );
- batched_column_down
- .par_iter_mut()
- .zip(&batched_column_down_ef)
- .for_each(|(a, &b)| {
- *a += b;
- });
- }
-
- let sub_evals = info_span!("fold_multilinear_chunks").in_scope(|| {
- let sub_evals_up = fold_multilinear_chunks(
- &batched_column_up,
- &MultilinearPoint(outer_sumcheck_challenge[1..].to_vec()),
- );
- let sub_evals_down = fold_multilinear_chunks(
- &column_shifted(&batched_column_down, EF::ZERO),
- &MultilinearPoint(outer_sumcheck_challenge[1..].to_vec()),
- );
- sub_evals_up
- .iter()
- .zip(sub_evals_down.iter())
- .map(|(&up, &down)| up + down)
- .collect::>()
- });
+ let sub_evals = fold_multilinear_chunks(
+ &batched_column_up,
+ &MultilinearPoint(outer_sumcheck_challenge[1..].to_vec()),
+ );
prover_state.add_extension_scalars(&sub_evals);
let epsilons = prover_state.sample_vec(univariate_skips);
@@ -203,14 +168,8 @@ fn open_columns>>(
// TODO opti in case of flat AIR (no need of `matrix_next_mle_folded`)
let matrix_up = eval_eq(&point);
- let matrix_down = matrix_next_mle_folded(&point);
let inner_mle = info_span!("packing").in_scope(|| {
- MleGroupOwned::ExtensionPacked(vec![
- pack_extension(&matrix_up),
- pack_extension(&batched_column_up),
- pack_extension(&matrix_down),
- pack_extension(&batched_column_down),
- ])
+ MleGroupOwned::ExtensionPacked(vec![pack_extension(&matrix_up), pack_extension(&batched_column_up)])
});
let (inner_challenges, _, _) = info_span!("structured columns sumcheck").in_scope(|| {
@@ -218,7 +177,7 @@ fn open_columns>>(
1,
inner_mle,
None,
- &MySumcheck,
+ &ProductComputation {},
&vec![],
None,
false,
@@ -244,40 +203,115 @@ fn open_columns>>(
prover_state.add_extension_scalars(&evaluations_remaining_to_prove_f);
prover_state.add_extension_scalars(&evaluations_remaining_to_prove_ef);
- (
- inner_challenges,
- evaluations_remaining_to_prove_f,
- evaluations_remaining_to_prove_ef,
- )
+ AirClaims {
+ point: inner_challenges,
+ evals_f: evaluations_remaining_to_prove_f,
+ evals_ef: evaluations_remaining_to_prove_ef,
+ down_point: None,
+ evals_f_on_down_columns: vec![],
+ evals_ef_on_down_columns: vec![],
+ }
}
-struct MySumcheck;
+#[instrument(skip_all)]
+fn open_columns_no_skip>>(
+ prover_state: &mut impl FSProver,
+ inner_evals: &[EF],
+ columns_with_shift_f: &[usize],
+ columns_with_shift_ef: &[usize],
+ columns_f: &[&[PF]],
+ columns_ef: &[&[EF]],
+ outer_sumcheck_challenge: &[EF],
+) -> AirClaims {
+ let n_columns_f_up = columns_f.len();
+ let n_columns_ef_up = columns_ef.len();
+ let n_columns_f_down = columns_with_shift_f.len();
+ let n_columns_ef_down = columns_with_shift_ef.len();
+ let n_down_columns = n_columns_f_down + n_columns_ef_down;
+ assert_eq!(inner_evals.len(), n_columns_f_up + n_columns_ef_up + n_down_columns);
+
+ let evals_up_f = inner_evals[..n_columns_f_up].to_vec();
+ let evals_down_f = &inner_evals[n_columns_f_up..][..n_columns_f_down];
+ let evals_up_ef = inner_evals[n_columns_f_up + n_columns_f_down..][..n_columns_ef_up].to_vec();
+ let evals_down_ef = &inner_evals[n_columns_f_up + n_columns_f_down + n_columns_ef_up..];
+
+ if n_down_columns == 0 {
+ return AirClaims {
+ point: MultilinearPoint(outer_sumcheck_challenge.to_vec()),
+ evals_f: evals_up_f,
+ evals_ef: evals_up_ef,
+ down_point: None,
+ evals_f_on_down_columns: vec![],
+ evals_ef_on_down_columns: vec![],
+ };
+ }
-impl>> SumcheckComputation for MySumcheck {
- type ExtraData = Vec;
+ let batching_scalar = prover_state.sample();
+ let batching_scalar_powers = batching_scalar.powers().collect_n(n_down_columns);
- fn degree(&self) -> usize {
- 2
- }
- #[inline(always)]
- fn eval_base(&self, _: &[PF], _: &[EF], _: &Self::ExtraData) -> EF {
- unreachable!()
- }
- #[inline(always)]
- fn eval_extension(&self, point: &[EF], _: &[EF], _: &Self::ExtraData) -> EF {
- point[0] * point[1] + point[2] * point[3]
- }
- #[inline(always)]
- fn eval_packed_base(&self, _: &[PFPacking], _: &[EFPacking], _: &Self::ExtraData) -> EFPacking {
- unreachable!()
+ let columns_shifted_f = &columns_with_shift_f.iter().map(|&i| columns_f[i]).collect::>();
+ let columns_shifted_ef = &columns_with_shift_ef.iter().map(|&i| columns_ef[i]).collect::>();
+
+ let mut batched_column_down =
+ multilinears_linear_combination(columns_shifted_f, &batching_scalar_powers[..n_columns_f_down]);
+
+ if n_columns_ef_down > 0 {
+ let batched_column_down_ef =
+ multilinears_linear_combination(columns_shifted_ef, &batching_scalar_powers[n_columns_f_down..]);
+ batched_column_down
+ .par_iter_mut()
+ .zip(&batched_column_down_ef)
+ .for_each(|(a, &b)| {
+ *a += b;
+ });
}
- #[inline(always)]
- fn eval_packed_extension(
- &self,
- point: &[EFPacking],
- _: &[EFPacking],
- _: &Self::ExtraData,
- ) -> EFPacking {
- point[0] * point[1] + point[2] * point[3]
+
+ let matrix_down = matrix_next_mle_folded(outer_sumcheck_challenge);
+ let inner_mle = info_span!("packing").in_scope(|| {
+ MleGroupOwned::ExtensionPacked(vec![pack_extension(&matrix_down), pack_extension(&batched_column_down)])
+ });
+
+ let inner_sum = dot_product(
+ evals_down_f.iter().chain(evals_down_ef).copied(),
+ batching_scalar_powers.iter().copied(),
+ );
+
+ let (inner_challenges, _, _) = info_span!("structured columns sumcheck").in_scope(|| {
+ sumcheck_prove::(
+ 1,
+ inner_mle,
+ None,
+ &ProductComputation {},
+ &vec![],
+ None,
+ false,
+ prover_state,
+ inner_sum,
+ false,
+ )
+ });
+
+ let (evals_f_on_down_columns, evals_ef_on_down_columns) = info_span!("final evals").in_scope(|| {
+ (
+ columns_shifted_f
+ .par_iter()
+ .map(|col| col.evaluate(&inner_challenges))
+ .collect::>(),
+ columns_shifted_ef
+ .par_iter()
+ .map(|col| col.evaluate(&inner_challenges))
+ .collect::>(),
+ )
+ });
+ prover_state.add_extension_scalars(&evals_f_on_down_columns);
+ prover_state.add_extension_scalars(&evals_ef_on_down_columns);
+
+ AirClaims {
+ point: MultilinearPoint(outer_sumcheck_challenge.to_vec()),
+ evals_f: evals_up_f,
+ evals_ef: evals_up_ef,
+ down_point: Some(inner_challenges),
+ evals_f_on_down_columns,
+ evals_ef_on_down_columns,
}
}
diff --git a/crates/air/src/uni_skip_utils.rs b/crates/air/src/uni_skip_utils.rs
index b1b42a37..65cef1b3 100644
--- a/crates/air/src/uni_skip_utils.rs
+++ b/crates/air/src/uni_skip_utils.rs
@@ -15,6 +15,8 @@ pub fn matrix_next_mle_folded>>(outer_challenges: &[F])
res[i] += *v;
}
}
+ res[(1 << n) - 1] += outer_challenges.iter().copied().product::();
+
res
}
@@ -35,9 +37,13 @@ mod tests {
let matrix = matrix_next_mle_folded(&x_bools);
for y in 0..1 << n_vars {
let y_bools = to_big_endian_in_field::(y, n_vars);
- let expected = F::from_bool(x + 1 == y);
+ let expected = F::from_bool(if (x, y) == ((1 << n_vars) - 1, (1 << n_vars) - 1) {
+ true
+ } else {
+ x + 1 == y
+ });
assert_eq!(matrix.evaluate(&MultilinearPoint(y_bools.clone())), expected);
- assert_eq!(next_mle(&[x_bools.clone(), y_bools].concat()), expected);
+ assert_eq!(next_mle(&x_bools, &y_bools), expected);
}
}
}
diff --git a/crates/air/src/utils.rs b/crates/air/src/utils.rs
index 63ae1972..0614f515 100644
--- a/crates/air/src/utils.rs
+++ b/crates/air/src/utils.rs
@@ -11,7 +11,7 @@ use multilinear_toolkit::prelude::*;
/// ... ... ...
/// (0 0 0 0 ... 0 1 0)
/// (0 0 0 0 ... 0 0 1)
-/// (0 0 0 0 ... 0 0 0)
+/// (0 0 0 0 ... 0 0 1)
///
/// # Arguments
/// - `point`: A slice of 2n field elements representing two n-bit vectors concatenated.
@@ -34,49 +34,31 @@ use multilinear_toolkit::prelude::*;
///
/// # Returns
/// Field element: 1 if y = x + 1, 0 otherwise.
-pub(crate) fn next_mle(point: &[F]) -> F {
- // Check that the point length is even: we split into x and y of equal length.
- assert_eq!(point.len() % 2, 0, "Input point must have an even number of variables.");
- let n = point.len() / 2;
-
- // Split point into x (first n) and y (last n).
- let (x, y) = point.split_at(n);
-
- // Sum contributions for each possible carry position k = 0..n-1.
- (0..n)
- .map(|k| {
- // Term 1: bits to the left of k match
- //
- // For i > k, enforce x_i == y_i.
- // Using equality polynomial: x_i * y_i + (1 - x_i)*(1 - y_i).
- //
- // Indices are reversed because bits are big-endian.
- let eq_high_bits = (k + 1..n)
- .map(|i| x[n - 1 - i] * y[n - 1 - i] + (F::ONE - x[n - 1 - i]) * (F::ONE - y[n - 1 - i]))
- .product::();
-
- // Term 2: carry bit at position k
- //
- // Enforce x_k = 0 and y_k = 1.
- // Condition: (1 - x_k) * y_k.
- let carry_bit = (F::ONE - x[n - 1 - k]) * y[n - 1 - k];
-
- // Term 3: bits to the right of k are 1 in x and 0 in y
- //
- // For i < k, enforce x_i = 1 and y_i = 0.
- // Condition: x_i * (1 - y_i).
- let low_bits_are_one_zero = (0..k).map(|i| x[n - 1 - i] * (F::ONE - y[n - 1 - i])).product::();
+pub(crate) fn next_mle(x: &[F], y: &[F]) -> F {
+ assert_eq!(x.len(), y.len());
+ let n = x.len();
+ let mut eq_prefix = Vec::with_capacity(n + 1);
+ eq_prefix.push(F::ONE);
+ for i in 0..n {
+ let eq_i = x[i] * y[i] + (F::ONE - x[i]) * (F::ONE - y[i]);
+ eq_prefix.push(eq_prefix[i] * eq_i);
+ }
+ let mut low_suffix = vec![F::ONE; n + 1];
+ for i in (0..n).rev() {
+ low_suffix[i] = low_suffix[i + 1] * x[i] * (F::ONE - y[i]);
+ }
+ let mut sum = F::ZERO;
+ for arr in 0..n {
+ let carry = (F::ONE - x[arr]) * y[arr];
+ sum += eq_prefix[arr] * carry * low_suffix[arr + 1];
+ }
- // Multiply the three terms for this k, representing one "carry pattern".
- eq_high_bits * carry_bit * low_bits_are_one_zero
- })
- // Sum over all carry positions: any valid "k" gives contribution 1.
- .sum()
+ sum + x.iter().chain(y).copied().product::()
}
-pub(crate) fn column_shifted(column: &[F], final_value: F) -> Vec {
+pub(crate) fn column_shifted(column: &[F]) -> Vec {
let mut down = unsafe { uninitialized_vec(column.len()) };
parallel_clone(&column[1..], &mut down[..column.len() - 1]);
- down[column.len() - 1] = final_value;
+ down[column.len() - 1] = column[column.len() - 1];
down
}
diff --git a/crates/air/src/validity_check.rs b/crates/air/src/validity_check.rs
index f9cb2876..1afd6007 100644
--- a/crates/air/src/validity_check.rs
+++ b/crates/air/src/validity_check.rs
@@ -1,5 +1,4 @@
-use multilinear_toolkit::prelude::{ExtensionField, PF};
-use p3_air::Air;
+use multilinear_toolkit::prelude::*;
use tracing::instrument;
use utils::ConstraintChecker;
@@ -68,8 +67,8 @@ pub fn check_air_validity>>(
let up_ef = (0..air.n_columns_ef_air())
.map(|j| columns_ef[j][n_rows - 1])
.collect::>();
- assert_eq!(last_row_f.len(), air.down_column_indexes_f().len());
- assert_eq!(last_row_ef.len(), air.down_column_indexes_ef().len());
+ assert_eq!(last_row_f.len(), air.n_down_columns_f());
+ assert_eq!(last_row_ef.len(), air.n_down_columns_ef());
let mut constraints_checker = ConstraintChecker {
up_f,
up_ef,
diff --git a/crates/air/src/verify.rs b/crates/air/src/verify.rs
index cdbfec62..2e3d00ef 100644
--- a/crates/air/src/verify.rs
+++ b/crates/air/src/verify.rs
@@ -1,63 +1,49 @@
use multilinear_toolkit::prelude::*;
-use p3_air::Air;
use p3_util::log2_ceil_usize;
-use crate::utils::next_mle;
+use crate::{AirClaims, utils::next_mle};
#[allow(clippy::type_complexity)]
#[allow(clippy::too_many_arguments)]
pub fn verify_air>, A: Air>(
- verifier_state: &mut FSVerifier>,
+ verifier_state: &mut impl FSVerifier,
air: &A,
- mut extra_data: A::ExtraData,
+ extra_data: A::ExtraData,
univariate_skips: usize,
log_n_rows: usize,
- last_row_f: &[PF],
- last_row_ef: &[EF],
- virtual_column_statements: Option>, // point should be randomness generated after committing to the columns
-) -> Result<(MultilinearPoint, Vec, Vec), ProofError>
+ virtual_column_statement: Option>, // point should be randomness generated after committing to the columns
+) -> ProofResult>
where
A::ExtraData: AlphaPowersMut + AlphaPowers,
{
- let alpha = verifier_state.sample(); // random challenge for batching constraints
-
- *extra_data.alpha_powers_mut() = alpha
- .powers()
- .take(air.n_constraints() + virtual_column_statements.as_ref().map_or(0, |s| s.values.len()))
- .collect();
+ assert!(extra_data.alpha_powers().len() >= air.n_constraints() + virtual_column_statement.is_some() as usize);
let n_sc_rounds = log_n_rows + 1 - univariate_skips;
- let zerocheck_challenges = virtual_column_statements
+ let zerocheck_challenges = virtual_column_statement
.as_ref()
.map(|st| st.point.0.clone())
.unwrap_or_else(|| verifier_state.sample_vec(n_sc_rounds));
assert_eq!(zerocheck_challenges.len(), n_sc_rounds);
let (sc_sum, outer_statement) =
- sumcheck_verify_with_univariate_skip::(verifier_state, air.degree() + 1, log_n_rows, univariate_skips)?;
+ sumcheck_verify_with_univariate_skip::(verifier_state, air.degree_air() + 1, log_n_rows, univariate_skips)?;
if sc_sum
- != virtual_column_statements
+ != virtual_column_statement
.as_ref()
- .map(|st| dot_product(st.values.iter().copied(), alpha.powers()))
+ .map(|st| st.value)
.unwrap_or_else(|| EF::ZERO)
{
return Err(ProofError::InvalidProof);
}
- let outer_selector_evals = univariate_selectors::>(univariate_skips)
- .iter()
- .map(|s| s.evaluate(outer_statement.point[0]))
- .collect::>();
-
- let mut inner_sums = verifier_state.next_extension_scalars_vec(
- air.n_columns_air() + air.down_column_indexes_f().len() + air.down_column_indexes_ef().len(),
- )?;
+ let inner_evals = verifier_state
+ .next_extension_scalars_vec(air.n_columns_air() + air.n_down_columns_f() + air.n_down_columns_ef())?;
- let n_columns_down_f = air.down_column_indexes_f().len();
- let constraint_evals = SumcheckComputation::eval_extension(
- air,
- &inner_sums[..air.n_columns_f_air() + n_columns_down_f],
- &inner_sums[air.n_columns_f_air() + n_columns_down_f..],
+ let n_columns_down_f = air.n_down_columns_f();
+ let n_columns_down_ef = air.n_down_columns_ef();
+ let constraint_evals = air.eval_extension(
+ &inner_evals[..air.n_columns_f_air() + n_columns_down_f],
+ &inner_evals[air.n_columns_f_air() + n_columns_down_f..],
&extra_data,
);
@@ -67,76 +53,54 @@ where
return Err(ProofError::InvalidProof);
}
- inner_sums = [
- inner_sums[..air.n_columns_f_air()].to_vec(),
- inner_sums[air.n_columns_f_air() + n_columns_down_f..][..air.n_columns_ef_air()].to_vec(),
- inner_sums[air.n_columns_f_air()..][..n_columns_down_f].to_vec(),
- inner_sums[air.n_columns_f_air() + n_columns_down_f + air.n_columns_ef_air()..].to_vec(),
- ]
- .concat();
-
- open_columns(
- verifier_state,
- air.n_columns_f_air(),
- air.n_columns_ef_air(),
- univariate_skips,
- &air.down_column_indexes_f(),
- &air.down_column_indexes_ef(),
- inner_sums,
- &Evaluation::new(outer_statement.point[1..].to_vec(), outer_statement.value),
- &outer_selector_evals,
- log_n_rows,
- last_row_f,
- last_row_ef,
- )
+ if univariate_skips == 1 {
+ open_columns_no_skip(verifier_state, air, log_n_rows, &inner_evals, &outer_statement.point)
+ } else if n_columns_down_f == 0 && n_columns_down_ef == 0 {
+ // usefull for poseidon2 benchmark
+ let outer_selector_evals = univariate_selectors::>(univariate_skips)
+ .iter()
+ .map(|s| s.evaluate(outer_statement.point[0]))
+ .collect::>();
+ open_flat_columns(
+ verifier_state,
+ air.n_columns_f_air(),
+ air.n_columns_ef_air(),
+ univariate_skips,
+ inner_evals,
+ &Evaluation::new(outer_statement.point[1..].to_vec(), outer_statement.value),
+ &outer_selector_evals,
+ log_n_rows,
+ )
+ } else {
+ panic!(
+ "Currently unsupported for simplicty (checkout c7944152a4325b1e1913446e6684112099db5d78 for a version that supported this case)"
+ );
+ }
}
#[allow(clippy::too_many_arguments)] // TODO
#[allow(clippy::type_complexity)]
-fn open_columns>>(
- verifier_state: &mut FSVerifier>,
+fn open_flat_columns>>(
+ verifier_state: &mut impl FSVerifier,
n_columns_f: usize,
n_columns_ef: usize,
univariate_skips: usize,
- columns_with_shift_f: &[usize],
- columns_with_shift_ef: &[usize],
- mut evals_up_and_down: Vec,
+ inner_evals: Vec,
outer_sumcheck_challenge: &Evaluation,
outer_selector_evals: &[EF],
log_n_rows: usize,
- last_row_f: &[PF],
- last_row_ef: &[EF],
-) -> Result<(MultilinearPoint, Vec, Vec), ProofError> {
+) -> ProofResult> {
let n_columns = n_columns_f + n_columns_ef;
- assert_eq!(
- n_columns + last_row_f.len() + last_row_ef.len(),
- evals_up_and_down.len()
- );
- let last_row_selector = outer_selector_evals[(1 << univariate_skips) - 1]
- * outer_sumcheck_challenge.point.iter().copied().product::();
- for (&last_row_value, down_col_eval) in last_row_f.iter().zip(&mut evals_up_and_down[n_columns..]) {
- *down_col_eval -= last_row_selector * last_row_value;
- }
- for (&last_row_value, down_col_eval) in last_row_ef
- .iter()
- .zip(&mut evals_up_and_down[n_columns + last_row_f.len()..])
- {
- *down_col_eval -= last_row_selector * last_row_value;
- }
- let batching_scalars = verifier_state.sample_vec(log2_ceil_usize(n_columns + last_row_f.len() + last_row_ef.len()));
+ let batching_scalars = verifier_state.sample_vec(log2_ceil_usize(n_columns));
let eval_eq_batching_scalars = eval_eq(&batching_scalars);
let batching_scalars_up = &eval_eq_batching_scalars[..n_columns];
- let batching_scalars_down = &eval_eq_batching_scalars[n_columns..];
let sub_evals = verifier_state.next_extension_scalars_vec(1 << univariate_skips)?;
if dot_product::(sub_evals.iter().copied(), outer_selector_evals.iter().copied())
- != dot_product::(
- evals_up_and_down.iter().copied(),
- eval_eq_batching_scalars.iter().copied(),
- )
+ != dot_product::(inner_evals.iter().copied(), eval_eq_batching_scalars.iter().copied())
{
return Err(ProofError::InvalidProof);
}
@@ -151,14 +115,6 @@ fn open_columns>>(
let matrix_up_sc_eval = MultilinearPoint([epsilons.0.clone(), outer_sumcheck_challenge.point.0.clone()].concat())
.eq_poly_outside(&inner_sumcheck_stement.point);
- let matrix_down_sc_eval = next_mle(
- &[
- epsilons.0,
- outer_sumcheck_challenge.point.to_vec(),
- inner_sumcheck_stement.point.0.clone(),
- ]
- .concat(),
- );
let evaluations_remaining_to_verify_f = verifier_state.next_extension_scalars_vec(n_columns_f)?;
let evaluations_remaining_to_verify_ef = verifier_state.next_extension_scalars_vec(n_columns_ef)?;
@@ -171,26 +127,84 @@ fn open_columns>>(
batching_scalars_up.iter().copied(),
evaluations_remaining_to_verify.iter().copied(),
);
- let mut columns_with_shift = columns_with_shift_f.to_vec();
- columns_with_shift.extend_from_slice(
- columns_with_shift_ef
- .iter()
- .map(|&x| x + n_columns_f)
- .collect::>()
- .as_slice(),
+
+ if inner_sumcheck_stement.value != matrix_up_sc_eval * batched_col_up_sc_eval {
+ return Err(ProofError::InvalidProof);
+ }
+ Ok(AirClaims {
+ point: inner_sumcheck_stement.point.clone(),
+ evals_f: evaluations_remaining_to_verify_f,
+ evals_ef: evaluations_remaining_to_verify_ef,
+ down_point: None,
+ evals_f_on_down_columns: vec![],
+ evals_ef_on_down_columns: vec![],
+ })
+}
+
+fn open_columns_no_skip>>(
+ verifier_state: &mut impl FSVerifier,
+ air: &A,
+ log_n_rows: usize,
+ inner_evals: &[EF],
+ outer_sumcheck_challenge: &[EF],
+) -> ProofResult> {
+ let n_columns_f_up = air.n_columns_f_air();
+ let n_columns_ef_up = air.n_columns_ef_air();
+ let n_columns_f_down = air.n_down_columns_f();
+ let n_columns_ef_down = air.n_down_columns_ef();
+ let n_down_columns = n_columns_f_down + n_columns_ef_down;
+ assert_eq!(inner_evals.len(), n_columns_f_up + n_columns_ef_up + n_down_columns);
+
+ let evals_up_f = inner_evals[..n_columns_f_up].to_vec();
+ let evals_down_f = inner_evals[n_columns_f_up..][..n_columns_f_down].to_vec();
+ let evals_up_ef = inner_evals[n_columns_f_up + n_columns_f_down..][..n_columns_ef_up].to_vec();
+ let evals_down_ef = inner_evals[n_columns_f_up + n_columns_f_down + n_columns_ef_up..].to_vec();
+
+ if n_down_columns == 0 {
+ return Ok(AirClaims {
+ point: MultilinearPoint(outer_sumcheck_challenge.to_vec()),
+ evals_f: evals_up_f,
+ evals_ef: evals_up_ef,
+ down_point: None,
+ evals_f_on_down_columns: vec![],
+ evals_ef_on_down_columns: vec![],
+ });
+ }
+
+ let batching_scalar = verifier_state.sample();
+ let batching_scalar_powers = batching_scalar.powers().collect_n(n_down_columns);
+
+ let inner_sum: EF = dot_product(
+ evals_down_f.into_iter().chain(evals_down_ef),
+ batching_scalar_powers.iter().copied(),
);
- let batched_col_down_sc_eval = (0..columns_with_shift.len())
- .map(|i| evaluations_remaining_to_verify[columns_with_shift[i]] * batching_scalars_down[i])
- .sum::();
- if inner_sumcheck_stement.value
- != matrix_up_sc_eval * batched_col_up_sc_eval + matrix_down_sc_eval * batched_col_down_sc_eval
- {
+ let (inner_sum_retrieved, inner_sumcheck_stement) = sumcheck_verify(verifier_state, log_n_rows, 2)?;
+
+ if inner_sum != inner_sum_retrieved {
return Err(ProofError::InvalidProof);
}
- Ok((
- inner_sumcheck_stement.point.clone(),
- evaluations_remaining_to_verify_f,
- evaluations_remaining_to_verify_ef,
- ))
+
+ let matrix_down_sc_eval = next_mle(outer_sumcheck_challenge, &inner_sumcheck_stement.point);
+
+ let evals_f_on_down_columns = verifier_state.next_extension_scalars_vec(n_columns_f_down)?;
+ let evals_ef_on_down_columns = verifier_state.next_extension_scalars_vec(n_columns_ef_down)?;
+ let evaluations_remaining_to_verify = [evals_f_on_down_columns.clone(), evals_ef_on_down_columns.clone()].concat();
+ let batched_col_down_sc_eval = dot_product::(
+ batching_scalar_powers.iter().copied(),
+ evaluations_remaining_to_verify.iter().copied(),
+ );
+
+ if inner_sumcheck_stement.value != matrix_down_sc_eval * batched_col_down_sc_eval {
+ return Err(ProofError::InvalidProof);
+ }
+
+ Ok(AirClaims {
+ point: MultilinearPoint(outer_sumcheck_challenge.to_vec()),
+ evals_f: evals_up_f,
+ evals_ef: evals_up_ef,
+ down_point: Some(inner_sumcheck_stement.point.clone()),
+ evals_f_on_down_columns,
+ evals_ef_on_down_columns,
+ })
}
diff --git a/crates/air/tests/complex_air.rs b/crates/air/tests/complex_air.rs
deleted file mode 100644
index d2cc7c76..00000000
--- a/crates/air/tests/complex_air.rs
+++ /dev/null
@@ -1,214 +0,0 @@
-use multilinear_toolkit::prelude::*;
-use p3_air::{Air, AirBuilder};
-use p3_koala_bear::{KoalaBear, QuinticExtensionFieldKB};
-use rand::{Rng, SeedableRng, rngs::StdRng};
-use utils::{build_prover_state, build_verifier_state};
-
-use air::{check_air_validity, prove_air, verify_air};
-
-const UNIVARIATE_SKIPS: usize = 3;
-
-const N_COLS_F: usize = 2;
-
-type F = KoalaBear;
-type EF = QuinticExtensionFieldKB;
-
-struct ExampleStructuredAir;
-
-impl Air
- for ExampleStructuredAir
-{
- type ExtraData = Vec;
-
- fn n_columns_f_air(&self) -> usize {
- N_COLS_F
- }
- fn n_columns_ef_air(&self) -> usize {
- N_COLUMNS - N_COLS_F
- }
- fn degree(&self) -> usize {
- N_PREPROCESSED_COLUMNS
- }
- fn n_constraints(&self) -> usize {
- 50 // too much, but ok for tests
- }
- fn down_column_indexes_f(&self) -> Vec {
- vec![]
- }
- fn down_column_indexes_ef(&self) -> Vec {
- (N_PREPROCESSED_COLUMNS - N_COLS_F..N_COLUMNS - N_COLS_F).collect::>()
- }
-
- #[inline]
- fn eval(&self, builder: &mut AB, _: &Self::ExtraData) {
- let up_f = builder.up_f().to_vec();
- let up_ef = builder.up_ef().to_vec();
- let down_ef = builder.down_ef().to_vec();
- assert_eq!(up_f.len(), N_COLS_F);
- assert_eq!(up_f.len() + up_ef.len(), N_COLUMNS);
- assert_eq!(down_ef.len(), N_COLUMNS - N_PREPROCESSED_COLUMNS);
-
- if VIRTUAL_COLUMN {
- // virtual column A = col_0 * col_1 + col_2
- // virtual column B = col_0 - col_1
- builder.eval_virtual_column(up_ef[0].clone() + up_f[0].clone() * up_f[1].clone());
- builder.eval_virtual_column(AB::EF::from(up_f[0].clone() - up_f[1].clone()));
- }
-
- for j in N_PREPROCESSED_COLUMNS..N_COLUMNS {
- builder.assert_eq_ef(
- down_ef[j - N_PREPROCESSED_COLUMNS].clone(),
- up_ef[j - N_COLS_F].clone()
- + AB::F::from_usize(j)
- + (0..N_PREPROCESSED_COLUMNS - N_COLS_F)
- .map(|k| up_ef[k].clone())
- .product::()
- * up_f[0].clone()
- * up_f[1].clone(),
- );
- }
- }
-}
-
-fn generate_trace(
- n_rows: usize,
-) -> (Vec>, Vec>) {
- let mut rng = StdRng::seed_from_u64(0);
- let mut trace_f = vec![];
- for _ in 0..N_COLS_F {
- trace_f.push((0..n_rows).map(|_| rng.random()).collect::>());
- }
- let mut trace_ef = vec![];
- for _ in N_COLS_F..N_PREPROCESSED_COLUMNS {
- trace_ef.push((0..n_rows).map(|_| rng.random()).collect::>());
- }
- let mut witness_cols = vec![vec![EF::ZERO]; N_COLUMNS - N_PREPROCESSED_COLUMNS];
- for i in 1..n_rows {
- for (j, witness_col) in witness_cols.iter_mut().enumerate() {
- let witness_cols_j_i_min_1 = witness_col[i - 1];
- witness_col.push(
- witness_cols_j_i_min_1
- + F::from_usize(j + N_PREPROCESSED_COLUMNS)
- + (0..3).map(|k| trace_ef[k][i - 1]).product::() * trace_f[0][i - 1] * trace_f[1][i - 1],
- );
- }
- }
- trace_ef.extend(witness_cols);
- (trace_f, trace_ef)
-}
-
-#[test]
-fn test_air() {
- test_air_helper::();
- test_air_helper::();
-}
-
-fn test_air_helper() {
- const N_COLUMNS: usize = 17;
- const N_PREPROCESSED_COLUMNS: usize = 5;
- let log_n_rows = 12;
- let n_rows = 1 << log_n_rows;
- let mut prover_state = build_prover_state::(false);
-
- let (columns_plus_one_f, columns_plus_one_ef) = generate_trace::(n_rows + 1);
- let columns_ref_f = columns_plus_one_f.iter().map(|col| &col[..n_rows]).collect::>();
- let columns_ref_ef = columns_plus_one_ef.iter().map(|col| &col[..n_rows]).collect::>();
- let mut last_row_ef = columns_plus_one_ef.iter().map(|col| col[n_rows]).collect::>();
- last_row_ef = last_row_ef[N_PREPROCESSED_COLUMNS - N_COLS_F..].to_vec();
-
- let virtual_column_statement_prover = if VIRTUAL_COLUMN {
- let virtual_column_a = columns_ref_f[0]
- .iter()
- .zip(columns_ref_f[1].iter())
- .zip(columns_ref_ef[0].iter())
- .map(|((&a, &b), &c)| c + a * b)
- .collect::>();
- let virtual_column_evaluation_point =
- MultilinearPoint(prover_state.sample_vec(log_n_rows + 1 - UNIVARIATE_SKIPS));
- let selectors = univariate_selectors::>(UNIVARIATE_SKIPS);
- let virtual_column_value_a = evaluate_univariate_multilinear::<_, _, _, true>(
- &virtual_column_a,
- &virtual_column_evaluation_point,
- &selectors,
- None,
- );
- let virtual_column_b = columns_ref_f[0]
- .iter()
- .zip(columns_ref_f[1].iter())
- .map(|(&a, &b)| EF::from(a - b))
- .collect::>();
- let virtual_column_value_b = evaluate_univariate_multilinear::<_, _, _, true>(
- &virtual_column_b,
- &virtual_column_evaluation_point,
- &selectors,
- None,
- );
- prover_state.add_extension_scalar(virtual_column_value_a);
- prover_state.add_extension_scalar(virtual_column_value_b);
-
- Some(MultiEvaluation::new(
- virtual_column_evaluation_point.0.clone(),
- vec![virtual_column_value_a, virtual_column_value_b],
- ))
- } else {
- None
- };
-
- let air = ExampleStructuredAir:: {};
-
- check_air_validity(&air, &vec![], &columns_ref_f, &columns_ref_ef, &[], &last_row_ef).unwrap();
-
- let (point_prover, evaluations_remaining_to_prove_f, evaluations_remaining_to_prove_ef) = prove_air(
- &mut prover_state,
- &air,
- vec![],
- UNIVARIATE_SKIPS,
- &columns_ref_f,
- &columns_ref_ef,
- &[],
- &last_row_ef,
- virtual_column_statement_prover,
- true,
- );
- let mut verifier_state = build_verifier_state(prover_state);
-
- let virtual_column_statement_verifier = if VIRTUAL_COLUMN {
- let virtual_column_evaluation_point =
- MultilinearPoint(verifier_state.sample_vec(log_n_rows + 1 - UNIVARIATE_SKIPS));
- let virtual_column_value_a = verifier_state.next_extension_scalar().unwrap();
- let virtual_column_value_b = verifier_state.next_extension_scalar().unwrap();
- Some(MultiEvaluation::new(
- virtual_column_evaluation_point.0.clone(),
- vec![virtual_column_value_a, virtual_column_value_b],
- ))
- } else {
- None
- };
-
- let (point_verifier, evaluations_remaining_to_verify_f, evaluations_remaining_to_verify_ef) = verify_air(
- &mut verifier_state,
- &air,
- vec![],
- UNIVARIATE_SKIPS,
- log_n_rows,
- &[],
- &last_row_ef,
- virtual_column_statement_verifier,
- )
- .unwrap();
- assert_eq!(point_prover, point_verifier);
- assert_eq!(&evaluations_remaining_to_prove_f, &evaluations_remaining_to_verify_f);
- assert_eq!(&evaluations_remaining_to_prove_ef, &evaluations_remaining_to_verify_ef);
- for i in 0..N_COLS_F {
- assert_eq!(
- columns_ref_f[i].evaluate(&point_prover),
- evaluations_remaining_to_verify_f[i]
- );
- }
- for i in 0..N_COLUMNS - N_COLS_F {
- assert_eq!(
- columns_ref_ef[i].evaluate(&point_prover),
- evaluations_remaining_to_verify_ef[i]
- );
- }
-}
diff --git a/crates/air/tests/fib_air.rs b/crates/air/tests/fib_air.rs
deleted file mode 100644
index 968a3fe4..00000000
--- a/crates/air/tests/fib_air.rs
+++ /dev/null
@@ -1,119 +0,0 @@
-use multilinear_toolkit::prelude::*;
-use p3_air::{Air, AirBuilder};
-use p3_koala_bear::{KoalaBear, QuinticExtensionFieldKB};
-use utils::{build_prover_state, build_verifier_state};
-
-use air::{check_air_validity, prove_air, verify_air};
-
-const UNIVARIATE_SKIPS: usize = 3;
-
-type F = KoalaBear;
-type EF = QuinticExtensionFieldKB;
-
-struct FibonacciAir;
-
-impl Air for FibonacciAir {
- type ExtraData = Vec;
-
- fn n_columns_f_air(&self) -> usize {
- 1
- }
- fn n_columns_ef_air(&self) -> usize {
- 1
- }
- fn degree(&self) -> usize {
- 1
- }
- fn n_constraints(&self) -> usize {
- 10 // too much, but ok for tests
- }
- fn down_column_indexes_f(&self) -> Vec {
- vec![0]
- }
- fn down_column_indexes_ef(&self) -> Vec {
- vec![0]
- }
- #[inline]
- fn eval(&self, builder: &mut AB, _: &Self::ExtraData) {
- let a_up = builder.up_f()[0].clone();
- let b_up = builder.up_ef()[0].clone();
- let a_down = builder.down_f()[0].clone();
- let b_down = builder.down_ef()[0].clone();
- builder.assert_eq_ef(b_down, b_up.clone() + a_up);
- builder.assert_eq_ef(AB::EF::from(a_down), b_up);
- }
-}
-
-fn generate_trace(n_rows: usize) -> (Vec, Vec) {
- let mut col_a = vec![F::ONE];
- let mut col_b = vec![EF::ONE];
- for i in 1..n_rows {
- let a_next = col_b[i - 1].as_base().unwrap();
- let b_next = col_b[i - 1] + col_a[i - 1];
- col_a.push(a_next);
- col_b.push(b_next);
- }
- (col_a, col_b)
-}
-
-#[test]
-fn test_air_fibonacci() {
- let log_n_rows = 14;
- let n_rows = 1 << log_n_rows;
- let mut prover_state = build_prover_state::(false);
-
- let (columns_plus_one_f, columns_plus_one_ef) = generate_trace(n_rows + 1);
- let columns_ref_f = vec![&columns_plus_one_f[..n_rows]];
- let columns_ref_ef = vec![&columns_plus_one_ef[..n_rows]];
- let last_row_f = vec![columns_plus_one_f[n_rows]];
- let last_row_ef = vec![columns_plus_one_ef[n_rows]];
-
- let air = FibonacciAir {};
-
- check_air_validity(
- &air,
- &vec![],
- &columns_ref_f,
- &columns_ref_ef,
- &last_row_f,
- &last_row_ef,
- )
- .unwrap();
-
- let (point_prover, evaluations_remaining_to_prove_f, evaluations_remaining_to_prove_ef) = prove_air(
- &mut prover_state,
- &air,
- vec![],
- UNIVARIATE_SKIPS,
- &columns_ref_f,
- &columns_ref_ef,
- &last_row_f,
- &last_row_ef,
- None,
- true,
- );
- let mut verifier_state = build_verifier_state(prover_state);
-
- let (point_verifier, evaluations_remaining_to_verify_f, evaluations_remaining_to_verify_ef) = verify_air(
- &mut verifier_state,
- &air,
- vec![],
- UNIVARIATE_SKIPS,
- log_n_rows,
- &last_row_f,
- &last_row_ef,
- None,
- )
- .unwrap();
- assert_eq!(point_prover, point_verifier);
- assert_eq!(&evaluations_remaining_to_prove_f, &evaluations_remaining_to_verify_f);
- assert_eq!(&evaluations_remaining_to_prove_ef, &evaluations_remaining_to_verify_ef);
- assert_eq!(
- columns_ref_f[0].evaluate(&point_prover),
- evaluations_remaining_to_verify_f[0]
- );
- assert_eq!(
- columns_ref_ef[0].evaluate(&point_prover),
- evaluations_remaining_to_verify_ef[0]
- );
-}
diff --git a/crates/lean_compiler/Cargo.toml b/crates/lean_compiler/Cargo.toml
index 8441e6dc..e33b21b2 100644
--- a/crates/lean_compiler/Cargo.toml
+++ b/crates/lean_compiler/Cargo.toml
@@ -14,14 +14,13 @@ xmss.workspace = true
rand.workspace = true
p3-poseidon2.workspace = true
p3-koala-bear.workspace = true
-p3-challenger.workspace = true
-p3-air.workspace = true
p3-symmetric.workspace = true
p3-util.workspace = true
whir-p3.workspace = true
tracing.workspace = true
air.workspace = true
sub_protocols.workspace = true
-lookup.workspace = true
lean_vm.workspace = true
-multilinear-toolkit.workspace = true
\ No newline at end of file
+multilinear-toolkit.workspace = true
+
+[dev-dependencies]
\ No newline at end of file
diff --git a/crates/lean_compiler/snark_lib.py b/crates/lean_compiler/snark_lib.py
new file mode 100644
index 00000000..dcda6560
--- /dev/null
+++ b/crates/lean_compiler/snark_lib.py
@@ -0,0 +1,98 @@
+# Import this in zkDSL .py files to make them executable as normal Python
+
+import math
+from typing import Any
+
+# Type annotations
+Mut = Any
+Const = Any
+Imu = Any
+
+
+# @inline decorator (does nothing in Python execution)
+def inline(fn):
+ return fn
+
+
+# unroll(a, b) returns range(a, b) for Python execution
+def unroll(a: int, b: int):
+ return range(a, b)
+
+
+# Array - simulates write-once memory with pointer arithmetic
+class Array:
+ def __init__(self, size: int):
+ # TODO
+ return
+
+ def __getitem__(self, idx):
+ # TODO
+ return
+
+ def __setitem__(self, idx, value):
+ # TODO
+ return
+
+ def __add__(self, offset: int):
+ # TODO
+ return
+
+ def __len__(self):
+ # TODO
+ return
+
+
+# DynArray - dynamic array with push/pop (compile-time construct)
+class DynArray:
+ def __init__(self, initial: list):
+ self._data = list(initial)
+
+ def __getitem__(self, idx):
+ return self._data[idx]
+
+ def __len__(self):
+ return len(self._data)
+
+ def push(self, value):
+ self._data.append(value)
+
+ def pop(self):
+ self._data.pop()
+
+
+# Built-in constants
+ZERO_VEC_PTR = 0
+ONE_VEC_PTR = 16
+NONRESERVED_PROGRAM_INPUT_START = 58
+
+
+def poseidon16(left, right, output, mode):
+ _ = left, right, output, mode
+
+
+def dot_product(a, b, result, length, mode):
+ _ = a, b, result, length, mode
+
+
+def hint_decompose_bits(value, bits, n_bits, endian):
+ _ = value, bits, n_bits, endian
+
+
+def log2_ceil(x: int) -> int:
+ assert x > 0
+ return math.ceil(math.log2(x))
+
+
+def next_multiple_of(x: int, n: int) -> int:
+ return x + (n - x % n) % n
+
+
+def saturating_sub(a: int, b: int) -> int:
+ return max(0, a - b)
+
+
+def debug_assert(cond, msg=None):
+ if not cond:
+ if msg:
+ raise AssertionError(msg)
+ raise AssertionError()
diff --git a/crates/lean_compiler/src/a_simplify_lang.rs b/crates/lean_compiler/src/a_simplify_lang.rs
index 72192c41..f2735f81 100644
--- a/crates/lean_compiler/src/a_simplify_lang.rs
+++ b/crates/lean_compiler/src/a_simplify_lang.rs
@@ -1,19 +1,10 @@
-use crate::{
- Counter, F,
- lang::{
- AssignmentTarget, Condition, ConstExpression, ConstMallocLabel, Context, Expression, Function, Line,
- MathOperation, Program, Scope, SimpleExpr, Var,
- },
- parser::ConstArrayValue,
-};
-use lean_vm::{
- Boolean, BooleanExpr, CustomHint, FileId, FunctionName, SourceLineNumber, SourceLocation, Table, TableT,
-};
+use crate::{F, lang::*, parser::ConstArrayValue};
+use lean_vm::{ALL_TABLES, Boolean, BooleanExpr, CustomHint, FunctionName, SourceLocation, Table, TableT};
use std::{
collections::{BTreeMap, BTreeSet},
fmt::{Display, Formatter},
};
-use utils::ToUsize;
+use utils::{Counter, ToUsize};
#[derive(Debug, Clone)]
pub struct SimpleProgram {
@@ -23,7 +14,6 @@ pub struct SimpleProgram {
#[derive(Debug, Clone)]
pub struct SimpleFunction {
pub name: String,
- pub file_id: FileId,
pub arguments: Vec,
pub n_returned_vars: usize,
pub instructions: Vec,
@@ -85,7 +75,7 @@ pub enum SimpleLine {
condition: SimpleExpr,
then_branch: Vec,
else_branch: Vec,
- line_number: SourceLineNumber,
+ location: SourceLocation,
},
AssertZero {
operation: MathOperation,
@@ -96,7 +86,7 @@ pub enum SimpleLine {
function_name: String,
args: Vec,
return_data: Vec,
- line_number: SourceLineNumber,
+ location: SourceLocation,
},
FunctionRet {
return_data: Vec,
@@ -105,16 +95,15 @@ pub enum SimpleLine {
table: Table,
args: Vec,
},
- Panic,
+ Panic {
+ message: Option,
+ },
// Hints
/// each field element x is decomposed to: (a0, a1, a2, ..., a11, b) where:
/// x = a0 + a1.4 + a2.4^2 + a3.4^3 + ... + a11.4^11 + b.2^24
/// and ai < 4, b < 2^7 - 1
/// The decomposition is unique, and always exists (except for x = -1)
CustomHint(CustomHint, Vec),
- PrivateInputStart {
- result: Var,
- },
Print {
line_info: String,
content: Vec,
@@ -122,8 +111,6 @@ pub enum SimpleLine {
HintMAlloc {
var: Var,
size: SimpleExpr,
- vectorized: bool,
- vectorized_len: SimpleExpr,
},
ConstMalloc {
// always not vectorized
@@ -135,7 +122,12 @@ pub enum SimpleLine {
LocationReport {
location: SourceLocation,
},
- DebugAssert(BooleanExpr, SourceLineNumber),
+ DebugAssert(BooleanExpr, SourceLocation),
+ /// Range check: assert val <= bound
+ RangeCheck {
+ val: SimpleExpr,
+ bound: SimpleExpr,
+ },
}
impl SimpleLine {
@@ -147,27 +139,83 @@ impl SimpleLine {
arg1: SimpleExpr::zero(),
}
}
+
+ /// Returns mutable references to all nested blocks (arms of match, branches of if).
+ pub fn nested_blocks_mut(&mut self) -> Vec<&mut Vec> {
+ match self {
+ Self::Match { arms, .. } => arms.iter_mut().collect(),
+ Self::IfNotZero {
+ then_branch,
+ else_branch,
+ ..
+ } => vec![then_branch, else_branch],
+ Self::ForwardDeclaration { .. }
+ | Self::Assignment { .. }
+ | Self::RawAccess { .. }
+ | Self::AssertZero { .. }
+ | Self::FunctionCall { .. }
+ | Self::FunctionRet { .. }
+ | Self::Precompile { .. }
+ | Self::Panic { .. }
+ | Self::CustomHint(..)
+ | Self::Print { .. }
+ | Self::HintMAlloc { .. }
+ | Self::ConstMalloc { .. }
+ | Self::LocationReport { .. }
+ | Self::DebugAssert(..)
+ | Self::RangeCheck { .. } => vec![],
+ }
+ }
+
+ pub fn nested_blocks(&self) -> Vec<&Vec> {
+ match self {
+ Self::Match { arms, .. } => arms.iter().collect(),
+ Self::IfNotZero {
+ then_branch,
+ else_branch,
+ ..
+ } => vec![then_branch, else_branch],
+ Self::ForwardDeclaration { .. }
+ | Self::Assignment { .. }
+ | Self::RawAccess { .. }
+ | Self::AssertZero { .. }
+ | Self::FunctionCall { .. }
+ | Self::FunctionRet { .. }
+ | Self::Precompile { .. }
+ | Self::Panic { .. }
+ | Self::CustomHint(..)
+ | Self::Print { .. }
+ | Self::HintMAlloc { .. }
+ | Self::ConstMalloc { .. }
+ | Self::LocationReport { .. }
+ | Self::DebugAssert(..)
+ | Self::RangeCheck { .. } => vec![],
+ }
+ }
}
-pub fn simplify_program(mut program: Program) -> SimpleProgram {
+fn ends_with_early_exit(block: &[SimpleLine]) -> bool {
+ match block.last() {
+ Some(SimpleLine::Panic { .. }) | Some(SimpleLine::FunctionRet { .. }) => true,
+ Some(last) => {
+ let nested = last.nested_blocks();
+ !nested.is_empty() && nested.iter().all(|b| ends_with_early_exit(b))
+ }
+ None => false,
+ }
+}
+
+pub fn simplify_program(mut program: Program) -> Result {
check_program_scoping(&program);
- handle_inlined_functions(&mut program);
- // Iterate between unrolling and const argument handling until fixed point
let mut unroll_counter = Counter::new();
- let mut max_iterations = 100;
- loop {
- let mut any_change = false;
+ let mut inline_counter = Counter::new();
+ compile_time_transform_in_program(&mut program, &mut unroll_counter, &mut inline_counter)?;
- any_change |= unroll_loops_in_program(&mut program, &mut unroll_counter);
- any_change |= handle_const_arguments(&mut program);
+ // Remove all inlined functions (they've been inlined)
+ program.functions.retain(|_, func| !func.inlined);
- max_iterations -= 1;
- assert!(max_iterations > 0, "Too many iterations while simplifying program");
- if !any_change {
- break;
- }
- }
+ validate_program_vectors(&program)?;
// Remove all const functions - they should all have been specialized by now
let const_func_names: Vec<_> = program
@@ -180,6 +228,9 @@ pub fn simplify_program(mut program: Program) -> SimpleProgram {
program.functions.remove(&name);
}
+ let mut mutable_loop_counter = MutableLoopTransformCounter::default();
+ transform_mutable_in_loops_in_program(&mut program, &mut mutable_loop_counter);
+
let mut new_functions = BTreeMap::new();
let mut counters = Counters::default();
let mut const_malloc = ConstMalloc::default();
@@ -189,2387 +240,3126 @@ pub fn simplify_program(mut program: Program) -> SimpleProgram {
};
for (name, func) in &program.functions {
let mut array_manager = ArrayManager::default();
+ let mut mut_tracker = MutableVarTracker::default();
+ let mut vec_tracker = VectorTracker::default();
+
+ // Register mutable arguments and capture their initial versioned names
+ // BEFORE simplifying the body
+ let arguments: Vec = func
+ .arguments
+ .iter()
+ .map(|arg| {
+ assert!(!arg.is_const);
+ if arg.is_mutable {
+ mut_tracker.register_mutable(&arg.name);
+ // Capture the initial versioned name (version 0)
+ mut_tracker.current_name(&arg.name)
+ } else {
+ mut_tracker.assigned.insert(arg.name.clone());
+ arg.name.clone()
+ }
+ })
+ .collect();
+
let mut state = SimplifyState {
counters: &mut counters,
array_manager: &mut array_manager,
+ mut_tracker: &mut mut_tracker,
+ vec_tracker: &mut vec_tracker,
};
let simplified_instructions = simplify_lines(
&ctx,
&mut state,
&mut const_malloc,
&mut new_functions,
- func.file_id,
func.n_returned_vars,
&func.body,
false,
- );
- let arguments = func
- .arguments
- .iter()
- .map(|(v, is_const)| {
- assert!(!is_const,);
- v.clone()
- })
- .collect::>();
+ )?;
let simplified_function = SimpleFunction {
name: name.clone(),
- file_id: func.file_id,
arguments,
n_returned_vars: func.n_returned_vars,
instructions: simplified_instructions,
};
- if !func.assume_always_returns {
- check_function_always_returns(&simplified_function);
- }
+ check_function_always_returns(&simplified_function)?;
new_functions.insert(name.clone(), simplified_function);
const_malloc.map.clear();
}
- SimpleProgram {
+ Ok(SimpleProgram {
functions: new_functions,
- }
+ })
}
-fn unroll_loops_in_program(program: &mut Program, unroll_counter: &mut Counter) -> bool {
- let mut changed = false;
- for func in program.functions.values_mut() {
- changed |= unroll_loops_in_lines(&mut func.body, &program.const_arrays, unroll_counter);
+#[derive(Debug, Clone, Default)]
+pub struct VectorLenTracker {
+ vectors: BTreeMap,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum VectorLenValue {
+ Scalar,
+ Vector(Vec),
+}
+
+impl VectorLenTracker {
+ fn register(&mut self, var: &Var, value: VectorLenValue) {
+ self.vectors.insert(var.clone(), value);
+ }
+
+ fn is_vector(&self, var: &Var) -> bool {
+ self.vectors.contains_key(var)
+ }
+
+ pub fn get(&self, var: &Var) -> Option<&VectorLenValue> {
+ self.vectors.get(var)
+ }
+
+ fn get_mut(&mut self, var: &Var) -> Option<&mut VectorLenValue> {
+ self.vectors.get_mut(var)
}
- changed
}
-fn unroll_loops_in_lines(
- lines: &mut Vec,
- const_arrays: &BTreeMap,
- unroll_counter: &mut Counter,
-) -> bool {
- let mut changed = false;
- let mut i = 0;
- while i < lines.len() {
- // First, recursively process nested structures
- match &mut lines[i] {
- Line::ForLoop { body, .. } => {
- changed |= unroll_loops_in_lines(body, const_arrays, unroll_counter);
- }
- Line::IfCondition {
- then_branch,
- else_branch,
- ..
- } => {
- changed |= unroll_loops_in_lines(then_branch, const_arrays, unroll_counter);
- changed |= unroll_loops_in_lines(else_branch, const_arrays, unroll_counter);
- }
- Line::Match { arms, .. } => {
- for (_, arm_body) in arms {
- changed |= unroll_loops_in_lines(arm_body, const_arrays, unroll_counter);
- }
- }
- _ => {}
+impl VectorLenValue {
+ pub fn push(&mut self, elem: Self) {
+ match self {
+ Self::Vector(v) => v.push(elem),
+ _ => panic!("push on scalar"),
}
+ }
- // Now try to unroll if it's an unrollable loop
- if let Line::ForLoop {
- iterator,
- start,
- end,
- body,
- rev,
- unroll: true,
- line_number: _,
- } = &lines[i]
- && let (Some(start_val), Some(end_val)) = (start.naive_eval(const_arrays), end.naive_eval(const_arrays))
- {
- let start_usize = start_val.to_usize();
- let end_usize = end_val.to_usize();
- let unroll_index = unroll_counter.next();
-
- let (internal_vars, _) = find_variable_usage(body, const_arrays);
+ pub fn pop(&mut self) -> Option {
+ match self {
+ Self::Vector(v) => v.pop(),
+ _ => panic!("pop on scalar"),
+ }
+ }
- let mut range: Vec<_> = (start_usize..end_usize).collect();
- if *rev {
- range.reverse();
- }
+ pub fn len(&self) -> usize {
+ match self {
+ Self::Vector(v) => v.len(),
+ _ => panic!("len on scalar"),
+ }
+ }
- let iterator = iterator.clone();
- let body = body.clone();
+ pub fn is_vector(&self) -> bool {
+ matches!(self, Self::Vector(_))
+ }
- let mut unrolled = Vec::new();
- for j in range {
- let mut body_copy = body.clone();
- replace_vars_for_unroll(&mut body_copy, &iterator, unroll_index, j, &internal_vars);
- unrolled.extend(body_copy);
- }
+ fn get(&self, i: usize) -> Option<&Self> {
+ match self {
+ Self::Vector(v) => v.get(i),
+ _ => None,
+ }
+ }
- let num_inserted = unrolled.len();
- lines.splice(i..=i, unrolled);
- changed = true;
- i += num_inserted;
- continue;
+ fn get_mut(&mut self, i: usize) -> Option<&mut Self> {
+ match self {
+ Self::Vector(v) => v.get_mut(i),
+ _ => None,
}
+ }
- i += 1;
+ pub fn navigate(&self, idx: &[F]) -> Option<&Self> {
+ idx.iter().try_fold(self, |v, &i| v.get(i.to_usize()))
}
- changed
+
+ pub fn navigate_mut(&mut self, idx: &[F]) -> Option<&mut Self> {
+ idx.iter().try_fold(self, |v, &i| v.get_mut(i.to_usize()))
+ }
+}
+
+fn build_vector_len_value(elements: &[VecLiteral]) -> VectorLenValue {
+ let mut vec_elements = Vec::new();
+
+ for elem in elements {
+ let elem_len_value = build_vector_len_value_from_element(elem);
+ vec_elements.push(elem_len_value);
+ }
+
+ VectorLenValue::Vector(vec_elements)
}
-/// Analyzes a simplified function to verify that it returns on each code path.
-fn check_function_always_returns(func: &SimpleFunction) {
- check_block_always_returns(&func.name, &func.instructions);
+fn build_vector_len_value_from_element(element: &VecLiteral) -> VectorLenValue {
+ match element {
+ VecLiteral::Vec(inner) => build_vector_len_value(inner),
+ VecLiteral::Expr(_) => VectorLenValue::Scalar,
+ }
}
-fn check_block_always_returns(function_name: &String, instructions: &[SimpleLine]) {
- match instructions.last() {
- Some(SimpleLine::Match { value: _, arms }) => {
- for arm in arms {
- check_block_always_returns(function_name, arm);
- }
- }
- Some(SimpleLine::IfNotZero {
- condition: _,
- then_branch,
- else_branch,
- line_number: _,
- }) => {
- check_block_always_returns(function_name, then_branch);
- check_block_always_returns(function_name, else_branch);
+fn compile_time_transform_in_program(
+ program: &mut Program,
+ unroll_counter: &mut Counter,
+ inline_counter: &mut Counter,
+) -> Result<(), String> {
+ let const_arrays = program.const_arrays.clone();
+
+ // Collect inlined functions
+ let inlined_functions: BTreeMap<_, _> = program
+ .functions
+ .iter()
+ .filter(|(_, func)| func.inlined)
+ .map(|(name, func)| (name.clone(), func.clone()))
+ .collect();
+
+ for func in inlined_functions.values() {
+ if func.has_mutable_arguments() {
+ return Err("Inlined functions with mutable arguments are not supported yet".to_string());
}
- Some(SimpleLine::FunctionRet { return_data: _ }) => {
- // good
+ if func.has_const_arguments() {
+ return Err("Inlined functions with constant arguments are not supported yet".to_string());
}
- Some(SimpleLine::Panic) => {
- // good
+ }
+
+ // Process all functions, including newly created specialized ones
+ let mut processed: BTreeSet = BTreeSet::new();
+ loop {
+ let to_process: Vec<_> = program
+ .functions
+ .iter()
+ .filter(|(name, func)| !func.inlined && !func.has_const_arguments() && !processed.contains(*name))
+ .map(|(name, _)| name.clone())
+ .collect();
+
+ if to_process.is_empty() {
+ break;
}
- _ => {
- panic!("Cannot prove that function always returns: {function_name}");
+
+ let existing_functions = program.functions.clone();
+ for func_name in to_process {
+ processed.insert(func_name.clone());
+ let func = program.functions.get_mut(&func_name).unwrap();
+ let mut new_functions = BTreeMap::new();
+ compile_time_transform_in_lines(
+ &mut func.body,
+ &const_arrays,
+ &existing_functions,
+ &inlined_functions,
+ &mut new_functions,
+ unroll_counter,
+ inline_counter,
+ )?;
+ // Add new specialized functions - they'll be processed in the next iteration of this loop
+ for (name, new_func) in new_functions {
+ program.functions.entry(name).or_insert(new_func);
+ }
}
}
+ Ok(())
}
-/// Analyzes the program to verify that each variable is defined in each context where it is used.
-fn check_program_scoping(program: &Program) {
- for (_, function) in program.functions.iter() {
- let mut scope = Scope { vars: BTreeSet::new() };
- for (arg, _) in function.arguments.iter() {
- scope.vars.insert(arg.clone());
+fn compile_time_transform_in_lines(
+ lines: &mut Vec,
+ const_arrays: &BTreeMap,
+ existing_functions: &BTreeMap,
+ inlined_functions: &BTreeMap,
+ new_functions: &mut BTreeMap,
+ unroll_counter: &mut Counter,
+ inline_counter: &mut Counter,
+) -> Result<(), String> {
+ let mut vector_len_tracker = VectorLenTracker::default();
+ let mut const_var_exprs: BTreeMap = BTreeMap::new(); // used to simplify expressions containing variables with known constant values
+
+ let mut i = 0;
+ while i < lines.len() {
+ let line = &mut lines[i];
+
+ for expr in line.expressions_mut() {
+ substitute_const_vars_in_expr(expr, &const_var_exprs);
+ compile_time_transform_in_expr(expr, const_arrays, &vector_len_tracker);
}
- let mut ctx = Context {
- scopes: vec![scope],
- const_arrays: program.const_arrays.clone(),
- };
- check_block_scoping(&function.body, &mut ctx);
- }
-}
+ // Extract nested inlined calls from expressions (e.g., `x = a + inlined_func(b)`)
+ if let Some(new_lines) = extract_inlined_calls(line, inlined_functions, inline_counter)? {
+ lines.splice(i..=i, new_lines);
+ continue;
+ }
-/// Analyzes the block to verify that each variable is defined in each context where it is used.
-fn check_block_scoping(block: &[Line], ctx: &mut Context) {
- for line in block.iter() {
match line {
- Line::ForwardDeclaration { var } => {
- ctx.add_var(var);
- }
- Line::Match { value, arms } => {
- check_expr_scoping(value, ctx);
- for (_, arm) in arms {
- ctx.scopes.push(Scope { vars: BTreeSet::new() });
- check_block_scoping(arm, ctx);
- ctx.scopes.pop();
- }
- }
Line::Statement { targets, value, .. } => {
- check_expr_scoping(value, ctx);
- // First: add new variables to scope
- for target in targets {
- if let AssignmentTarget::Var(var) = target
- && !ctx.defines(var)
+ if let Some(inlined) = try_inline_call(value, targets, inlined_functions, const_arrays, inline_counter)
+ {
+ lines.splice(i..=i, inlined);
+ continue;
+ }
+ if let Expression::FunctionCall {
+ function_name, args, ..
+ } = value
+ && let Some(func) = existing_functions.get(function_name.as_str())
+ && func.has_const_arguments()
+ {
+ let mut const_evals = Vec::new();
+ for (arg_expr, arg) in args.iter().zip(&func.arguments) {
+ if arg.is_const {
+ if let Some(const_eval) = arg_expr.as_scalar() {
+ const_evals.push((arg.name.clone(), const_eval));
+ } else {
+ return Err(format!(
+ "Cannot evaluate const argument '{}' for function '{}'",
+ arg.name, function_name
+ ));
+ }
+ }
+ }
+ let const_funct_name = format!(
+ "{function_name}_{}",
+ const_evals
+ .iter()
+ .map(|(v, c)| format!("{v}={c}"))
+ .collect::>()
+ .join("_")
+ );
+ *function_name = const_funct_name.clone();
+ *args = args
+ .iter()
+ .zip(&func.arguments)
+ .filter(|(_, arg)| !arg.is_const)
+ .map(|(e, _)| e.clone())
+ .collect();
+ if !new_functions.contains_key(&const_funct_name)
+ && !existing_functions.contains_key(&const_funct_name)
{
- ctx.add_var(var);
+ let mut new_body = func.body.clone();
+ replace_vars_by_const_in_lines(&mut new_body, &const_evals.iter().cloned().collect());
+ new_functions.insert(
+ const_funct_name.clone(),
+ Function {
+ name: const_funct_name,
+ arguments: func.arguments.iter().filter(|a| !a.is_const).cloned().collect(),
+ inlined: false,
+ body: new_body,
+ n_returned_vars: func.n_returned_vars,
+ },
+ );
}
}
- // Second pass: check array access targets
- for target in targets {
- if let AssignmentTarget::ArrayAccess { array: _, index } = target {
- check_expr_scoping(index, ctx);
+ if targets.len() == 1
+ && let AssignmentTarget::Var { var, is_mutable: false } = &targets[0]
+ && let Some(value_const) = value.as_scalar()
+ {
+ const_var_exprs.insert(var.clone(), value_const);
+ }
+ }
+
+ Line::VecDeclaration { var, elements, .. } => {
+ vector_len_tracker.register(var, build_vector_len_value(elements));
+ }
+
+ Line::Push {
+ vector,
+ indices,
+ element,
+ ..
+ } => {
+ let Some(const_indices) = indices.iter().map(|idx| idx.as_scalar()).collect::