From 87ce69412c67506c81ed1e8bee6d4057df8393a5 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 06:39:09 -0700 Subject: [PATCH 01/51] feat: bring in frontend --- .rustfmt.toml | 51 +- Cargo.lock | 2094 +++++++++++-- Cargo.toml | 141 +- frontend/Cargo.lock | 2716 +++++++++++++++++ frontend/Cargo.toml | 44 + .../noir_circuit_data/add_external.json | 1 + .../noir_circuit_data/square_zeroth.json | 1 + .../noir_circuit_data/swap_memory.json | 1 + frontend/mock/fold.json | 1 + frontend/mock/mock.json | 1 + frontend/src/circom/mod.rs | 172 ++ frontend/src/circom/r1cs.rs | 277 ++ frontend/src/circom/wasm_witness.rs | 36 + frontend/src/circom/witness.rs | 261 ++ frontend/src/errors.rs | 106 + frontend/src/lib.rs | 91 + frontend/src/noir/mod.rs | 212 ++ frontend/src/noir/tests.rs | 246 ++ frontend/src/program/data.rs | 636 ++++ frontend/src/program/mod.rs | 458 +++ frontend/src/program/utils.rs | 182 ++ frontend/src/proof.rs | 63 + frontend/src/setup.rs | 130 + .../examples/circuit_data/add_external.bin | Bin 0 -> 114 bytes .../examples/circuit_data/add_external.circom | 13 + .../examples/circuit_data/add_external.r1cs | Bin 0 -> 152 bytes .../examples/circuit_data/square_zeroth.bin | Bin 0 -> 75 bytes .../circuit_data/square_zeroth.circom | 12 + .../examples/circuit_data/square_zeroth.r1cs | Bin 0 -> 356 bytes .../examples/circuit_data/swap_memory.bin | Bin 0 -> 68 bytes .../examples/circuit_data/swap_memory.circom | 12 + .../examples/circuit_data/swap_memory.r1cs | Bin 0 -> 320 bytes frontend/src/tests/inputs.rs | 8 + frontend/src/tests/mod.rs | 13 + frontend/src/tests/witnesscalc.rs | 152 + prover/Cargo.toml | 84 + prover/src/bellpepper/mod.rs | 62 + prover/src/bellpepper/r1cs.rs | 147 + prover/src/bellpepper/shape_cs.rs | 82 + {src => prover/src}/bellpepper/solver.rs | 0 prover/src/bellpepper/test_shape_cs.rs | 297 ++ prover/src/circuit.rs | 496 +++ {src => prover/src}/constants.rs | 0 prover/src/cyclefold/circuit.rs | 256 ++ prover/src/cyclefold/gadgets.rs | 632 ++++ {src => prover/src}/cyclefold/mod.rs | 0 prover/src/cyclefold/nifs.rs | 141 + prover/src/cyclefold/nova_circuit.rs | 510 ++++ prover/src/cyclefold/snark.rs | 555 ++++ prover/src/cyclefold/util.rs | 87 + {src => prover/src}/digest.rs | 91 +- prover/src/errors.rs | 99 + prover/src/fast_serde.rs | 100 + prover/src/gadgets/ecc.rs | 1071 +++++++ prover/src/gadgets/mod.rs | 24 + prover/src/gadgets/nonnative/bignat.rs | 849 ++++++ prover/src/gadgets/nonnative/mod.rs | 35 + prover/src/gadgets/nonnative/util.rs | 254 ++ prover/src/gadgets/r1cs.rs | 395 +++ prover/src/gadgets/utils.rs | 385 +++ prover/src/lib.rs | 1625 ++++++++++ prover/src/nifs.rs | 370 +++ prover/src/provider/bn256_grumpkin.rs | 109 + prover/src/provider/hyperkzg.rs | 852 ++++++ prover/src/provider/ipa_pc.rs | 360 +++ prover/src/provider/keccak.rs | 219 ++ prover/src/provider/kzg_commitment.rs | 301 ++ prover/src/provider/mod.rs | 170 ++ prover/src/provider/pedersen.rs | 310 ++ prover/src/provider/poseidon.rs | 222 ++ prover/src/provider/tests/ipa_pc.rs | 128 + prover/src/provider/tests/mod.rs | 134 + prover/src/provider/traits.rs | 189 ++ prover/src/provider/util/fb_msm.rs | 126 + prover/src/provider/util/mod.rs | 213 ++ prover/src/r1cs/mod.rs | 833 +++++ prover/src/r1cs/sparse.rs | 333 ++ {src => prover/src}/r1cs/util.rs | 46 +- prover/src/spartan/batched.rs | 581 ++++ prover/src/spartan/batched_ppsnark.rs | 1261 ++++++++ {src => prover/src}/spartan/macros.rs | 0 prover/src/spartan/math.rs | 15 + {src => prover/src}/spartan/mod.rs | 283 +- prover/src/spartan/polys/eq.rs | 115 + prover/src/spartan/polys/identity.rs | 25 + prover/src/spartan/polys/masked_eq.rs | 131 + {src => prover/src}/spartan/polys/mod.rs | 0 prover/src/spartan/polys/multilinear.rs | 295 ++ prover/src/spartan/polys/power.rs | 63 + prover/src/spartan/polys/univariate.rs | 373 +++ prover/src/spartan/ppsnark.rs | 1036 +++++++ prover/src/spartan/snark.rs | 502 +++ prover/src/spartan/sumcheck/engine.rs | 571 ++++ prover/src/spartan/sumcheck/mod.rs | 542 ++++ {src => prover/src}/supernova/Readme.md | 0 prover/src/supernova/circuit.rs | 769 +++++ {src => prover/src}/supernova/error.rs | 18 +- prover/src/supernova/mod.rs | 1189 ++++++++ prover/src/supernova/snark.rs | 569 ++++ prover/src/supernova/test.rs | 770 +++++ {src => prover/src}/supernova/utils.rs | 159 +- prover/src/traits/commitment.rs | 87 + prover/src/traits/evaluation.rs | 60 + prover/src/traits/mod.rs | 182 ++ prover/src/traits/snark.rs | 132 + src/bellpepper/mod.rs | 64 - src/bellpepper/r1cs.rs | 162 - src/bellpepper/shape_cs.rs | 107 - src/bellpepper/test_shape_cs.rs | 320 -- src/circuit.rs | 528 ---- src/cyclefold/circuit.rs | 285 -- src/cyclefold/gadgets.rs | 706 ----- src/cyclefold/nifs.rs | 161 - src/cyclefold/nova_circuit.rs | 565 ---- src/cyclefold/snark.rs | 563 ---- src/cyclefold/util.rs | 90 - src/errors.rs | 101 - src/fast_serde.rs | 100 - src/gadgets/ecc.rs | 1161 ------- src/gadgets/mod.rs | 26 - src/gadgets/nonnative/bignat.rs | 908 ------ src/gadgets/nonnative/mod.rs | 35 - src/gadgets/nonnative/util.rs | 293 -- src/gadgets/r1cs.rs | 427 --- src/gadgets/utils.rs | 404 --- src/nifs.rs | 414 --- src/provider/bn256_grumpkin.rs | 113 - src/provider/hyperkzg.rs | 923 ------ src/provider/ipa_pc.rs | 394 --- src/provider/keccak.rs | 247 -- src/provider/kzg_commitment.rs | 349 --- src/provider/mod.rs | 176 -- src/provider/pedersen.rs | 369 --- src/provider/poseidon.rs | 244 -- src/provider/tests/ipa_pc.rs | 130 - src/provider/tests/mod.rs | 155 - src/provider/traits.rs | 208 -- src/provider/util/fb_msm.rs | 137 - src/provider/util/mod.rs | 234 -- src/r1cs/mod.rs | 921 ------ src/r1cs/sparse.rs | 380 --- src/spartan/batched.rs | 650 ---- src/spartan/batched_ppsnark.rs | 1413 --------- src/spartan/math.rs | 15 - src/spartan/polys/eq.rs | 124 - src/spartan/polys/identity.rs | 30 - src/spartan/polys/masked_eq.rs | 150 - src/spartan/polys/multilinear.rs | 336 -- src/spartan/polys/power.rs | 75 - src/spartan/polys/univariate.rs | 415 --- src/spartan/ppsnark.rs | 1097 ------- src/spartan/snark.rs | 560 ---- src/spartan/sumcheck/engine.rs | 630 ---- src/spartan/sumcheck/mod.rs | 632 ---- src/supernova/circuit.rs | 838 ----- src/supernova/mod.rs | 1296 -------- src/supernova/snark.rs | 663 ---- src/supernova/test.rs | 876 ------ src/traits/commitment.rs | 89 - src/traits/evaluation.rs | 60 - src/traits/mod.rs | 192 -- src/traits/snark.rs | 137 - taplo.toml | 20 + 163 files changed, 30449 insertions(+), 23300 deletions(-) create mode 100644 frontend/Cargo.lock create mode 100644 frontend/Cargo.toml create mode 100644 frontend/examples/noir_circuit_data/add_external.json create mode 100644 frontend/examples/noir_circuit_data/square_zeroth.json create mode 100644 frontend/examples/noir_circuit_data/swap_memory.json create mode 100644 frontend/mock/fold.json create mode 100644 frontend/mock/mock.json create mode 100644 frontend/src/circom/mod.rs create mode 100644 frontend/src/circom/r1cs.rs create mode 100644 frontend/src/circom/wasm_witness.rs create mode 100644 frontend/src/circom/witness.rs create mode 100644 frontend/src/errors.rs create mode 100644 frontend/src/lib.rs create mode 100644 frontend/src/noir/mod.rs create mode 100644 frontend/src/noir/tests.rs create mode 100644 frontend/src/program/data.rs create mode 100644 frontend/src/program/mod.rs create mode 100644 frontend/src/program/utils.rs create mode 100644 frontend/src/proof.rs create mode 100644 frontend/src/setup.rs create mode 100644 frontend/src/tests/examples/circuit_data/add_external.bin create mode 100644 frontend/src/tests/examples/circuit_data/add_external.circom create mode 100644 frontend/src/tests/examples/circuit_data/add_external.r1cs create mode 100644 frontend/src/tests/examples/circuit_data/square_zeroth.bin create mode 100644 frontend/src/tests/examples/circuit_data/square_zeroth.circom create mode 100644 frontend/src/tests/examples/circuit_data/square_zeroth.r1cs create mode 100644 frontend/src/tests/examples/circuit_data/swap_memory.bin create mode 100644 frontend/src/tests/examples/circuit_data/swap_memory.circom create mode 100644 frontend/src/tests/examples/circuit_data/swap_memory.r1cs create mode 100644 frontend/src/tests/inputs.rs create mode 100644 frontend/src/tests/mod.rs create mode 100644 frontend/src/tests/witnesscalc.rs create mode 100644 prover/Cargo.toml create mode 100644 prover/src/bellpepper/mod.rs create mode 100644 prover/src/bellpepper/r1cs.rs create mode 100644 prover/src/bellpepper/shape_cs.rs rename {src => prover/src}/bellpepper/solver.rs (100%) create mode 100644 prover/src/bellpepper/test_shape_cs.rs create mode 100644 prover/src/circuit.rs rename {src => prover/src}/constants.rs (100%) create mode 100644 prover/src/cyclefold/circuit.rs create mode 100644 prover/src/cyclefold/gadgets.rs rename {src => prover/src}/cyclefold/mod.rs (100%) create mode 100644 prover/src/cyclefold/nifs.rs create mode 100644 prover/src/cyclefold/nova_circuit.rs create mode 100644 prover/src/cyclefold/snark.rs create mode 100644 prover/src/cyclefold/util.rs rename {src => prover/src}/digest.rs (66%) create mode 100644 prover/src/errors.rs create mode 100644 prover/src/fast_serde.rs create mode 100644 prover/src/gadgets/ecc.rs create mode 100644 prover/src/gadgets/mod.rs create mode 100644 prover/src/gadgets/nonnative/bignat.rs create mode 100644 prover/src/gadgets/nonnative/mod.rs create mode 100644 prover/src/gadgets/nonnative/util.rs create mode 100644 prover/src/gadgets/r1cs.rs create mode 100644 prover/src/gadgets/utils.rs create mode 100644 prover/src/lib.rs create mode 100644 prover/src/nifs.rs create mode 100644 prover/src/provider/bn256_grumpkin.rs create mode 100644 prover/src/provider/hyperkzg.rs create mode 100644 prover/src/provider/ipa_pc.rs create mode 100644 prover/src/provider/keccak.rs create mode 100644 prover/src/provider/kzg_commitment.rs create mode 100644 prover/src/provider/mod.rs create mode 100644 prover/src/provider/pedersen.rs create mode 100644 prover/src/provider/poseidon.rs create mode 100644 prover/src/provider/tests/ipa_pc.rs create mode 100644 prover/src/provider/tests/mod.rs create mode 100644 prover/src/provider/traits.rs create mode 100644 prover/src/provider/util/fb_msm.rs create mode 100644 prover/src/provider/util/mod.rs create mode 100644 prover/src/r1cs/mod.rs create mode 100644 prover/src/r1cs/sparse.rs rename {src => prover/src}/r1cs/util.rs (51%) create mode 100644 prover/src/spartan/batched.rs create mode 100644 prover/src/spartan/batched_ppsnark.rs rename {src => prover/src}/spartan/macros.rs (100%) create mode 100644 prover/src/spartan/math.rs rename {src => prover/src}/spartan/mod.rs (55%) create mode 100644 prover/src/spartan/polys/eq.rs create mode 100644 prover/src/spartan/polys/identity.rs create mode 100644 prover/src/spartan/polys/masked_eq.rs rename {src => prover/src}/spartan/polys/mod.rs (100%) create mode 100644 prover/src/spartan/polys/multilinear.rs create mode 100644 prover/src/spartan/polys/power.rs create mode 100644 prover/src/spartan/polys/univariate.rs create mode 100644 prover/src/spartan/ppsnark.rs create mode 100644 prover/src/spartan/snark.rs create mode 100644 prover/src/spartan/sumcheck/engine.rs create mode 100644 prover/src/spartan/sumcheck/mod.rs rename {src => prover/src}/supernova/Readme.md (100%) create mode 100644 prover/src/supernova/circuit.rs rename {src => prover/src}/supernova/error.rs (59%) create mode 100644 prover/src/supernova/mod.rs create mode 100644 prover/src/supernova/snark.rs create mode 100644 prover/src/supernova/test.rs rename {src => prover/src}/supernova/utils.rs (57%) create mode 100644 prover/src/traits/commitment.rs create mode 100644 prover/src/traits/evaluation.rs create mode 100644 prover/src/traits/mod.rs create mode 100644 prover/src/traits/snark.rs delete mode 100644 src/bellpepper/mod.rs delete mode 100644 src/bellpepper/r1cs.rs delete mode 100644 src/bellpepper/shape_cs.rs delete mode 100644 src/bellpepper/test_shape_cs.rs delete mode 100644 src/circuit.rs delete mode 100644 src/cyclefold/circuit.rs delete mode 100644 src/cyclefold/gadgets.rs delete mode 100644 src/cyclefold/nifs.rs delete mode 100644 src/cyclefold/nova_circuit.rs delete mode 100644 src/cyclefold/snark.rs delete mode 100644 src/cyclefold/util.rs delete mode 100644 src/errors.rs delete mode 100644 src/fast_serde.rs delete mode 100644 src/gadgets/ecc.rs delete mode 100644 src/gadgets/mod.rs delete mode 100644 src/gadgets/nonnative/bignat.rs delete mode 100644 src/gadgets/nonnative/mod.rs delete mode 100644 src/gadgets/nonnative/util.rs delete mode 100644 src/gadgets/r1cs.rs delete mode 100644 src/gadgets/utils.rs delete mode 100644 src/nifs.rs delete mode 100644 src/provider/bn256_grumpkin.rs delete mode 100644 src/provider/hyperkzg.rs delete mode 100644 src/provider/ipa_pc.rs delete mode 100644 src/provider/keccak.rs delete mode 100644 src/provider/kzg_commitment.rs delete mode 100644 src/provider/mod.rs delete mode 100644 src/provider/pedersen.rs delete mode 100644 src/provider/poseidon.rs delete mode 100644 src/provider/tests/ipa_pc.rs delete mode 100644 src/provider/tests/mod.rs delete mode 100644 src/provider/traits.rs delete mode 100644 src/provider/util/fb_msm.rs delete mode 100644 src/provider/util/mod.rs delete mode 100644 src/r1cs/mod.rs delete mode 100644 src/r1cs/sparse.rs delete mode 100644 src/spartan/batched.rs delete mode 100644 src/spartan/batched_ppsnark.rs delete mode 100644 src/spartan/math.rs delete mode 100644 src/spartan/polys/eq.rs delete mode 100644 src/spartan/polys/identity.rs delete mode 100644 src/spartan/polys/masked_eq.rs delete mode 100644 src/spartan/polys/multilinear.rs delete mode 100644 src/spartan/polys/power.rs delete mode 100644 src/spartan/polys/univariate.rs delete mode 100644 src/spartan/ppsnark.rs delete mode 100644 src/spartan/snark.rs delete mode 100644 src/spartan/sumcheck/engine.rs delete mode 100644 src/spartan/sumcheck/mod.rs delete mode 100644 src/supernova/circuit.rs delete mode 100644 src/supernova/mod.rs delete mode 100644 src/supernova/snark.rs delete mode 100644 src/supernova/test.rs delete mode 100644 src/traits/commitment.rs delete mode 100644 src/traits/evaluation.rs delete mode 100644 src/traits/mod.rs delete mode 100644 src/traits/snark.rs create mode 100644 taplo.toml diff --git a/.rustfmt.toml b/.rustfmt.toml index de018f8..452cbec 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -1,11 +1,46 @@ -imports_granularity = "Crate" -group_imports = "StdExternalCrate" +# Rustfmt configuration +# Opinionated whitespace and tabs. The most important of these are imports and width settings. +# Others may want to borrow or change these to their own liking. +# https://rust-lang.github.io/rustfmt -format_code_in_doc_comments = true +# version-related +unstable_features=true +use_try_shorthand=true # replace any `try!` (2015 Rust) with `?` -use_field_init_shorthand = true +# misc formatting +condense_wildcard_suffixes =true # replace: (a,b,_,_)=(1, 2, 3, 4); -> (a,b,..)=(1, 2, 3, 4); +format_code_in_doc_comments =true # format code blocks in doc comments +format_macro_matchers =true # $a: ident -> $a:ident +format_strings =true # break and insert newlines for long string literals +match_block_trailing_comma =true # include comma in match blocks after '}' +normalize_comments =true # convert /*..*/ to //.. where possible +reorder_impl_items =true # move `type` and `const` declarations to top of impl block +struct_field_align_threshold=20 # align struct arguments' types vertically +use_field_init_shorthand =true # struct initialization short {x: x} -> {x} -wrap_comments = true -normalize_comments = true -comment_width = 80 -edition = "2021" +# reduce whitespace +blank_lines_upper_bound=1 # default: 1. Sometimes useful to change to 0 to condense a file. +brace_style ="PreferSameLine" # prefer starting `{` without inserting extra \n +fn_single_line =true # if it's a short 1-liner, let it be a short 1-liner +match_arm_blocks =false # remove unnecessary {} in match arms +newline_style ="Unix" # not auto, we won the culture war. \n over \r\n +overflow_delimited_expr=true # prefer ]); to ]\n); +where_single_line =true # put where on a single line if possible + +# imports preferences +group_imports ="StdExternalCrate" # create import groupings for std, external libs, and internal deps +imports_granularity="Crate" # aggressively group imports + +# width settings: everything to 100 +comment_width =100 # default: 80 +inline_attribute_width=60 # inlines #[cfg(test)]\nmod test -> #[cfg(test)] mod test +max_width =100 # default: 100 +use_small_heuristics ="Max" # don't ever newline short of `max_width`. +wrap_comments =true # wrap comments at `comment_width` +# format_strings = true # wrap strings at `max_length` + +# tabs and spaces +hard_tabs =false # (def: false) use spaces over tabs +tab_spaces=2 # 2 > 4, it's just math. + +ignore=["tls"] diff --git a/Cargo.lock b/Cargo.lock index 37a15d1..4d454ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,68 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 + +[[package]] +name = "acir" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acir_field", + "base64", + "bincode", + "brillig", + "flate2", + "serde", + "serde-big-array", + "strum", + "strum_macros", + "thiserror", +] + +[[package]] +name = "acir_field" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "ark-bn254 0.5.0", + "ark-ff 0.5.0", + "cfg-if", + "hex", + "num-bigint 0.4.6", + "serde", +] + +[[package]] +name = "acvm" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acir", + "acvm_blackbox_solver", + "brillig_vm", + "fxhash", + "indexmap 1.9.3", + "serde", + "thiserror", + "tracing", +] + +[[package]] +name = "acvm_blackbox_solver" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acir", + "blake2", + "blake3", + "k256", + "keccak", + "libaes", + "num-bigint 0.4.6", + "p256", + "sha2", + "thiserror", +] [[package]] name = "addchain" @@ -28,6 +90,18 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -37,12 +111,37 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "alloy-rlp" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6c1d995bff8d011f7cd6c81820d51825e6e06d6db73914c1630ecf544d83d6" +dependencies = [ + "arrayvec", + "bytes", +] + [[package]] name = "anes" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + [[package]] name = "anstyle" version = "1.0.8" @@ -55,6 +154,307 @@ version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +[[package]] +name = "ark-bn254" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec 0.5.0", + "ark-ff 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", + "itertools 0.10.5", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-poly 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.2", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-traits", + "paste", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.98", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.2", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive 0.4.2", + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint 0.4.6", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "num-bigint 0.4.6", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "arrayref" version = "0.3.9" @@ -67,6 +467,35 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +[[package]] +name = "ascii-canvas" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" +dependencies = [ + "term", +] + +[[package]] +name = "auto_impl" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "autocfg" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" +dependencies = [ + "autocfg 1.4.0", +] + [[package]] name = "autocfg" version = "1.4.0" @@ -88,6 +517,24 @@ dependencies = [ "windows-targets", ] +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + [[package]] name = "bellpepper" version = "0.4.1" @@ -95,7 +542,7 @@ source = "git+https://github.com/argumentcomputer/bellpepper?branch=dev#d0225bf6 dependencies = [ "bellpepper-core", "byteorder", - "ff", + "ff 0.13.0", "itertools 0.12.1", ] @@ -107,7 +554,7 @@ checksum = "1d8abb418570756396d722841b19edfec21d4e89e1cf8990610663040ecb1aea" dependencies = [ "blake2s_simd", "byteorder", - "ff", + "ff 0.13.0", "serde", "thiserror", ] @@ -121,6 +568,26 @@ dependencies = [ "serde", ] +[[package]] +name = "bindgen" +version = "0.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.98", +] + [[package]] name = "bit-set" version = "0.5.3" @@ -136,12 +603,6 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - [[package]] name = "bitflags" version = "2.6.0" @@ -160,6 +621,15 @@ dependencies = [ "wyz", ] +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "blake2b_simd" version = "1.0.2" @@ -182,6 +652,19 @@ dependencies = [ "constant_time_eq", ] +[[package]] +name = "blake3" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "675f87afced0413c9bb02843499dbbd3882a237645883f71a2b59644a6d2f753" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -211,14 +694,35 @@ checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" dependencies = [ "blst", "byte-slice-cast", - "ff", - "group", + "ff 0.13.0", + "group 0.13.0", "pairing", - "rand_core", + "rand_core 0.6.4", "serde", "subtle", ] +[[package]] +name = "brillig" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acir_field", + "serde", +] + +[[package]] +name = "brillig_vm" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acir", + "acvm_blackbox_solver", + "num-bigint 0.4.6", + "num-traits", + "thiserror", +] + [[package]] name = "bumpalo" version = "3.16.0" @@ -237,6 +741,12 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +[[package]] +name = "bytes" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" + [[package]] name = "cast" version = "0.3.0" @@ -252,6 +762,15 @@ dependencies = [ "shlex", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -285,6 +804,56 @@ dependencies = [ "half", ] +[[package]] +name = "circom_algebra" +version = "2.1.4" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "constant_tracking", + "num-bigint-dig", + "num-traits", +] + +[[package]] +name = "circom_witnesscalc" +version = "0.2.0" +source = "git+https://github.com/pluto/circom-witnesscalc#722a3936999a8dd894f53cd498b019b65c90ac10" +dependencies = [ + "ark-bn254 0.4.0", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "bindgen", + "byteorder", + "code_producers", + "compiler", + "constraint_generation", + "hex", + "lazy_static", + "libc", + "num-bigint 0.4.6", + "parser", + "program_structure", + "prost", + "prost-build", + "rand 0.8.5", + "ruint", + "serde", + "serde_json", + "type_analysis", + "wtns-file", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "clap" version = "4.5.19" @@ -322,16 +891,16 @@ dependencies = [ "byteorder", "cfg-if", "criterion", - "digest", + "digest 0.10.7", "expect-test", - "ff", + "ff 0.13.0", "flate2", "generic-array 1.1.0", - "getrandom", - "group", + "getrandom 0.2.15", + "group 0.13.0", "grumpkin-msm", "halo2curves", - "handlebars", + "handlebars 5.1.2", "hex", "itertools 0.13.0", "neptune", @@ -340,11 +909,10 @@ dependencies = [ "num-traits", "once_cell", "pairing", - "pprof", "proptest", - "rand", - "rand_chacha", - "rand_core", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_core 0.6.4", "rayon", "rayon-scan", "ref-cast", @@ -360,9 +928,56 @@ dependencies = [ "tracing-subscriber", "tracing-test", "tracing-texray", - "vergen", ] +[[package]] +name = "code_producers" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "handlebars 4.5.0", + "lz_fnv", + "num-bigint-dig", + "serde_json", +] + +[[package]] +name = "codespan" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ebaf6bb6a863ad6aa3a18729e9710c53d75df03306714d9cc1f7357a00cd789" +dependencies = [ + "codespan-reporting", +] + +[[package]] +name = "codespan-reporting" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e0762455306b1ed42bc651ef6a2197aabda5e1d4a43c34d5eab5c1a3634e81d" +dependencies = [ + "termcolor", + "unicode-width", +] + +[[package]] +name = "compiler" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "code_producers", + "constant_tracking", + "num-bigint-dig", + "num-traits", + "program_structure", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + [[package]] name = "constant_time_eq" version = "0.3.1" @@ -370,12 +985,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] -name = "cpp_demangle" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96e58d342ad113c2b878f16d5d034c03be492ae460cdbc02b7f0f2284d310c7d" +name = "constant_tracking" +version = "2.0.0" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" + +[[package]] +name = "constraint_generation" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" dependencies = [ - "cfg-if", + "ansi_term", + "circom_algebra", + "compiler", + "constraint_list", + "constraint_writers", + "dag", + "num-bigint-dig", + "num-traits", + "program_structure", +] + +[[package]] +name = "constraint_list" +version = "2.1.8" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "circom_algebra", + "constraint_writers", + "json", + "num_cpus", + "program_structure", + "threadpool", +] + +[[package]] +name = "constraint_writers" +version = "2.1.8" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "circom_algebra", + "json", ] [[package]] @@ -463,6 +1112,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array 0.14.7", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -474,21 +1135,51 @@ dependencies = [ ] [[package]] -name = "debugid" -version = "0.8.0" +name = "dag" +version = "2.1.8" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "circom_algebra", + "constraint_list", + "constraint_writers", + "json", + "program_structure", +] + +[[package]] +name = "der" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ - "uuid", + "const-oid", + "zeroize", ] [[package]] -name = "deranged" -version = "0.3.11" +name = "derivative" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "powerfmt", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.7", ] [[package]] @@ -499,6 +1190,28 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", + "subtle", +] + +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", ] [[package]] @@ -507,12 +1220,91 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59f8e79d1fbf76bdfbde321e902714bf6c49df88a7dda6fc682fc2979226962d" +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der", + "elliptic-curve", + "rfc6979", + "signature", +] + +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "either" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct", + "crypto-bigint", + "der", + "digest 0.10.7", + "ff 0.12.1", + "generic-array 0.14.7", + "group 0.12.1", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "ena" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" +dependencies = [ + "log", +] + +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + [[package]] name = "errno" version = "0.3.9" @@ -539,6 +1331,38 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff" version = "0.13.0" @@ -548,7 +1372,7 @@ dependencies = [ "bitvec", "byteorder", "ff_derive", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -569,17 +1393,29 @@ dependencies = [ ] [[package]] -name = "findshlibs" -version = "0.10.2" +name = "fixed-hash" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ - "cc", - "lazy_static", - "libc", - "winapi", + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", ] +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + [[package]] name = "flate2" version = "1.0.34" @@ -596,12 +1432,56 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "frontend" +version = "0.1.0" +dependencies = [ + "acvm", + "ark-bn254 0.5.0", + "bellpepper-core", + "bincode", + "byteorder", + "circom_witnesscalc", + "client-side-prover", + "ff 0.13.0", + "halo2curves", + "hex", + "itertools 0.13.0", + "js-sys", + "num-bigint 0.4.6", + "serde", + "serde-wasm-bindgen", + "serde_json", + "tempdir", + "thiserror", + "tokio", + "tracing", + "tracing-test", + "wasm-bindgen", + "wasm-bindgen-futures", +] + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + [[package]] name = "funty" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -621,6 +1501,17 @@ dependencies = [ "typenum", ] +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + [[package]] name = "getrandom" version = "0.2.15" @@ -630,7 +1521,7 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -646,15 +1537,26 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", - "rand", - "rand_core", + "ff 0.13.0", + "rand 0.8.5", + "rand_core 0.6.4", "rand_xorshift", "subtle", ] @@ -666,11 +1568,11 @@ source = "git+https://github.com/argumentcomputer/grumpkin-msm?branch=dev#414da3 dependencies = [ "blst", "cc", - "getrandom", + "getrandom 0.2.15", "halo2curves", "pasta_curves", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rayon", "semolina", "sppark", @@ -694,8 +1596,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db81d01d0bbfec9f624d7590fc6929ee2537a64ec1e080d8f8c9e2d2da291405" dependencies = [ "blake2b_simd", - "ff", - "group", + "ff 0.13.0", + "group 0.13.0", "hex", "lazy_static", "num-bigint 0.4.6", @@ -703,8 +1605,8 @@ dependencies = [ "pairing", "pasta_curves", "paste", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "rayon", "serde", "serde_arrays", @@ -712,6 +1614,20 @@ dependencies = [ "subtle", ] +[[package]] +name = "handlebars" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faa67bab9ff362228eb3d00bd024a4965d8231bbb7921167f0cfa66c6626b225" +dependencies = [ + "log", + "pest", + "pest_derive", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "handlebars" version = "5.1.2" @@ -726,6 +1642,42 @@ dependencies = [ "thiserror", ] +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "allocator-api2", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.3.9" @@ -747,6 +1699,15 @@ dependencies = [ "serde", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "home" version = "0.5.9" @@ -756,6 +1717,46 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg 1.4.0", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +dependencies = [ + "equivalent", + "hashbrown 0.15.2", +] + [[package]] name = "is-terminal" version = "0.4.13" @@ -804,18 +1805,67 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" name = "js-sys" version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "json" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" + +[[package]] +name = "k256" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "sha2", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "lalrpop" +version = "0.19.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a1cbf952127589f2851ab2046af368fd20645491bb4b376f04b7f94d7a9837b" dependencies = [ - "wasm-bindgen", + "ascii-canvas", + "bit-set", + "diff", + "ena", + "is-terminal", + "itertools 0.10.5", + "lalrpop-util", + "petgraph 0.6.5", + "regex", + "regex-syntax 0.6.29", + "string_cache", + "term", + "tiny-keccak", + "unicode-xid", ] [[package]] -name = "keccak" -version = "0.1.5" +name = "lalrpop-util" +version = "0.19.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "d3c48237b9604c5a4702de6b824e02006c3214327564636aef27c1028a8fa0ed" dependencies = [ - "cpufeatures", + "regex", ] [[package]] @@ -827,11 +1877,27 @@ dependencies = [ "spin", ] +[[package]] +name = "libaes" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82903360c009b816f5ab72a9b68158c27c301ee2c3f20655b55c5e589e7d3bb7" + [[package]] name = "libc" -version = "0.2.159" +version = "0.2.170" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" + +[[package]] +name = "libloading" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +dependencies = [ + "cfg-if", + "windows-targets", +] [[package]] name = "libm" @@ -839,6 +1905,16 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags", + "libc", +] + [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -851,7 +1927,7 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ - "autocfg", + "autocfg 1.4.0", "scopeguard", ] @@ -861,6 +1937,12 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "lz_fnv" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bbb1b0dbe51f0976eaa466f4e0bdc11856fe8008aee26f30ccec8de15b28e38" + [[package]] name = "matchers" version = "0.1.0" @@ -877,13 +1959,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] -name = "memmap2" -version = "0.9.5" +name = "minimal-lexical" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" -dependencies = [ - "libc", -] +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" @@ -894,6 +1973,23 @@ dependencies = [ "adler2", ] +[[package]] +name = "mio" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", +] + +[[package]] +name = "multimap" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + [[package]] name = "neptune" version = "13.0.0" @@ -904,7 +2000,7 @@ dependencies = [ "blake2s_simd", "blstrs", "byteorder", - "ff", + "ff 0.13.0", "generic-array 0.14.7", "pasta_curves", "serde", @@ -912,14 +2008,19 @@ dependencies = [ ] [[package]] -name = "nix" -version = "0.26.4" +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" + +[[package]] +name = "nom" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", + "memchr", + "minimal-lexical", ] [[package]] @@ -938,7 +2039,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" dependencies = [ - "autocfg", + "autocfg 1.4.0", "num-integer", "num-traits", ] @@ -951,15 +2052,27 @@ checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", - "rand", + "rand 0.8.5", "serde", ] [[package]] -name = "num-conv" -version = "0.1.0" +name = "num-bigint-dig" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "5d51546d704f52ef14b3c962b5776e53d5b862e5790e40a350d366c209bd7f7a" +dependencies = [ + "autocfg 0.1.8", + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.7.3", + "serde", + "smallvec", +] [[package]] name = "num-integer" @@ -970,13 +2083,24 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg 1.4.0", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ - "autocfg", + "autocfg 1.4.0", "libm", ] @@ -990,20 +2114,11 @@ dependencies = [ "libc", ] -[[package]] -name = "num_threads" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" -dependencies = [ - "libc", -] - [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -1026,13 +2141,50 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa", + "elliptic-curve", + "sha2", +] + [[package]] name = "pairing" version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group", + "group 0.13.0", +] + +[[package]] +name = "parity-scale-codec" +version = "3.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -1058,6 +2210,22 @@ dependencies = [ "windows-targets", ] +[[package]] +name = "parser" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "lalrpop", + "lalrpop-util", + "num-bigint-dig", + "num-traits", + "program_structure", + "regex", + "rustc-hex", + "serde", + "serde_derive", +] + [[package]] name = "pasta_curves" version = "0.5.1" @@ -1065,11 +2233,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" dependencies = [ "blake2b_simd", - "ff", - "group", + "ff 0.13.0", + "group 0.13.0", "hex", "lazy_static", - "rand", + "rand 0.8.5", "serde", "static_assertions", "subtle", @@ -1112,7 +2280,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1126,12 +2294,51 @@ dependencies = [ "sha2", ] +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset 0.4.2", + "indexmap 2.7.1", +] + +[[package]] +name = "petgraph" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +dependencies = [ + "fixedbitset 0.5.7", + "indexmap 2.7.1", +] + +[[package]] +name = "phf_shared" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project-lite" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der", + "spki", +] + [[package]] name = "plotters" version = "0.3.7" @@ -1161,49 +2368,74 @@ dependencies = [ ] [[package]] -name = "powerfmt" -version = "0.2.0" +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "precomputed-hash" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] -name = "pprof" -version = "0.13.0" +name = "prettyplease" +version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb" +checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ - "backtrace", - "cfg-if", - "findshlibs", - "libc", - "log", - "nix", - "once_cell", - "parking_lot", - "smallvec", - "symbolic-demangle", - "tempfile", - "thiserror", + "proc-macro2", + "syn 2.0.98", ] [[package]] -name = "ppv-lite86" -version = "0.2.20" +name = "primitive-types" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ - "zerocopy", + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +dependencies = [ + "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] +[[package]] +name = "program_structure" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "codespan", + "codespan-reporting", + "num-bigint-dig", + "num-traits", + "regex", + "rustc-hex", + "serde", + "serde_derive", +] + [[package]] name = "proptest" version = "1.5.0" @@ -1212,11 +2444,11 @@ checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.6.0", + "bitflags", "lazy_static", "num-traits", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -1224,6 +2456,58 @@ dependencies = [ "unarray", ] +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" +dependencies = [ + "heck 0.5.0", + "itertools 0.13.0", + "log", + "multimap", + "once_cell", + "petgraph 0.7.1", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.98", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.13.0", + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -1245,6 +2529,32 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +dependencies = [ + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "rdrand", + "winapi", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", +] + [[package]] name = "rand" version = "0.8.5" @@ -1252,8 +2562,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", ] [[package]] @@ -1263,7 +2583,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", ] [[package]] @@ -1272,7 +2616,16 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", ] [[package]] @@ -1281,7 +2634,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -1308,9 +2661,18 @@ dependencies = [ name = "rayon-scan" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f87cc11a0140b4b0da0ffc889885760c61b13672d80a908920b2c0df078fa14" +checksum = "3f87cc11a0140b4b0da0ffc889885760c61b13672d80a908920b2c0df078fa14" +dependencies = [ + "rayon", +] + +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" dependencies = [ - "rayon", + "rand_core 0.3.1", ] [[package]] @@ -1319,7 +2681,18 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.6.0", + "bitflags", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.15", + "libredox", + "thiserror", ] [[package]] @@ -1339,7 +2712,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1386,19 +2759,111 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint", + "hmac", + "zeroize", +] + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + +[[package]] +name = "ruint" +version = "1.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5ef8fb1dd8de3870cb8400d51b4c2023854bbafd5431a3ac7e7317243e22d2f" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp 0.3.1", + "fastrlp 0.4.0", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + [[package]] name = "rustc-demangle" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver 1.0.25", +] + [[package]] name = "rustix" version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ - "bitflags 2.6.0", + "bitflags", "errno", "libc", "linux-raw-sys", @@ -1444,6 +2909,20 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle", + "zeroize", +] + [[package]] name = "semolina" version = "0.1.4" @@ -1454,6 +2933,30 @@ dependencies = [ "glob", ] +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" + +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + [[package]] name = "serde" version = "1.0.210" @@ -1463,6 +2966,26 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +dependencies = [ + "serde", +] + +[[package]] +name = "serde-wasm-bindgen" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8302e169f0eddcc139c70f139d19d6467353af16f9fce27e8c30158036a1e16b" +dependencies = [ + "js-sys", + "serde", + "wasm-bindgen", +] + [[package]] name = "serde_arrays" version = "0.1.0" @@ -1480,7 +3003,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1503,7 +3026,7 @@ checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.10.7", ] [[package]] @@ -1512,7 +3035,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest", + "digest 0.10.7", "keccak", ] @@ -1531,18 +3054,63 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + [[package]] name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +[[package]] +name = "socket2" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "sppark" version = "0.1.8" @@ -1553,12 +3121,6 @@ dependencies = [ "which", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "static_assertions" version = "1.1.0" @@ -1566,34 +3128,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] -name = "subtle" -version = "2.6.1" +name = "string_cache" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +checksum = "938d512196766101d333398efde81bc1f37b00cb42c2f8350e5df639f040bbbe" +dependencies = [ + "new_debug_unreachable", + "parking_lot", + "phf_shared", + "precomputed-hash", +] [[package]] -name = "symbolic-common" -version = "12.12.0" +name = "strum" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" -dependencies = [ - "debugid", - "memmap2", - "stable_deref_trait", - "uuid", -] +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" [[package]] -name = "symbolic-demangle" -version = "12.12.0" +name = "strum_macros" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "cpp_demangle", - "rustc-demangle", - "symbolic-common", + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 1.0.109", ] +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "syn" version = "1.0.109" @@ -1607,9 +3177,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -1622,6 +3192,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +dependencies = [ + "rand 0.4.6", + "remove_dir_all", +] + [[package]] name = "tempfile" version = "3.13.0" @@ -1635,6 +3215,17 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "term" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", +] + [[package]] name = "term_size" version = "0.3.2" @@ -1645,6 +3236,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + [[package]] name = "thiserror" version = "1.0.64" @@ -1662,7 +3262,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1685,46 +3285,68 @@ dependencies = [ ] [[package]] -name = "time" -version = "0.3.36" +name = "tiny-keccak" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ - "deranged", - "itoa", - "libc", - "num-conv", - "num_threads", - "powerfmt", "serde", - "time-core", - "time-macros", + "serde_json", ] [[package]] -name = "time-core" -version = "0.1.2" +name = "tokio" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", +] [[package]] -name = "time-macros" -version = "0.2.18" +name = "tokio-macros" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ - "num-conv", - "time-core", + "proc-macro2", + "quote", + "syn 2.0.98", ] [[package]] -name = "tinytemplate" -version = "1.2.1" +name = "toml_datetime" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" + +[[package]] +name = "toml_edit" +version = "0.22.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "serde", - "serde_json", + "indexmap 2.7.1", + "toml_datetime", + "winnow", ] [[package]] @@ -1746,7 +3368,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1806,7 +3428,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1833,6 +3455,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "type_analysis" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "num-bigint-dig", + "num-traits", + "program_structure", +] + [[package]] name = "typenum" version = "1.17.0" @@ -1845,6 +3477,18 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + [[package]] name = "unarray" version = "0.1.4" @@ -1858,28 +3502,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] -name = "uuid" -version = "1.10.0" +name = "unicode-width" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] -name = "valuable" -version = "0.1.0" +name = "unicode-xid" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] -name = "vergen" -version = "8.3.2" +name = "valuable" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" -dependencies = [ - "anyhow", - "cfg-if", - "rustversion", - "time", -] +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "version_check" @@ -1906,6 +3544,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -1934,10 +3578,22 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.93" @@ -1956,7 +3612,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2102,6 +3758,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "winnow" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" +dependencies = [ + "memchr", +] + +[[package]] +name = "wtns-file" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3b856452298f68a5879e3901918bac5d753ca9fa4be8a983a37a3d25dabf0a" +dependencies = [ + "byteorder", +] + [[package]] name = "wyz" version = "0.5.1" @@ -2129,7 +3803,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -2149,5 +3823,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] diff --git a/Cargo.toml b/Cargo.toml index b4c3098..cff1fa3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,86 +1,61 @@ -[package] -name = "client-side-prover" -version = "0.1.0" -authors = ["Pluto Engineering"] -edition = "2021" -description = "Client side proving" -readme = "README.md" -repository = "https://github.com/pluto/client-side-prover" -license-file = "LICENSE" -keywords = ["zkSNARKs", "cryptography", "proofs"] -# rust-version = "1.79" +[workspace] +members =["prover", "frontend"] +resolver="2" + +[workspace.dependencies] +bellpepper-core ={ version="0.4.0", default-features=false } +bellpepper ={ git="https://github.com/argumentcomputer/bellpepper", branch="dev", default-features=false } +ff ={ version="0.13.0", features=["derive"] } +digest ="0.10" +halo2curves ={ version="0.6.0", features=["bits", "derive_serde"] } +sha3 ="0.10" +rayon ="1.7" +rand_core ={ version="0.6", default-features=false } +rand_chacha ="0.3" +subtle ="2.5" +neptune ={ git="https://github.com/argumentcomputer/neptune", branch="dev", default-features=false } +generic-array ="1.0.0" +num-bigint ={ version="0.4", features=["serde", "rand"] } +num-traits ="0.2" +num-integer ="0.1" +serde ={ version="1.0", features=["derive", "rc"] } +bincode ="1.3" +bitvec ="1.0" +byteorder ="1.4.3" +thiserror ="1.0" +group ="0.13.0" +pairing ="0.23.0" +tracing ="0.1.37" +cfg-if ="1.0.0" +once_cell ="1.18.0" +itertools ="0.13.0" # zip_eq +rand ="0.8.5" +ref-cast ="1.0.20" # allocation-less conversion in multilinear polys # lightens impl macros for pasta +static_assertions="1.1.0" +rayon-scan ="0.1.0" +hex ="0.4.3" -[dependencies] -bellpepper-core = { version = "0.4.0", default-features = false } -bellpepper = { git = "https://github.com/argumentcomputer/bellpepper", branch = "dev", default-features = false } -ff = { version = "0.13.0", features = ["derive"] } -digest = "0.10" -halo2curves = { version = "0.6.0", features = ["bits", "derive_serde"] } -sha3 = "0.10" -rayon = "1.7" -rand_core = { version = "0.6", default-features = false } -rand_chacha = "0.3" -subtle = "2.5" -neptune = { git = "https://github.com/argumentcomputer/neptune", branch = "dev", default-features = false } -generic-array = "1.0.0" -num-bigint = { version = "0.4", features = ["serde", "rand"] } -num-traits = "0.2" -num-integer = "0.1" -serde = { version = "1.0", features = ["derive", "rc"] } -bincode = "1.3" -bitvec = "1.0" -byteorder = "1.4.3" -thiserror = "1.0" -group = "0.13.0" -pairing = "0.23.0" -tracing = "0.1.37" -cfg-if = "1.0.0" -once_cell = "1.18.0" -itertools = "0.13.0" # zip_eq -rand = "0.8.5" -ref-cast = "1.0.20" # allocation-less conversion in multilinear polys # lightens impl macros for pasta -static_assertions = "1.1.0" -rayon-scan = "0.1.0" - -[target.'cfg(any(target_arch = "x86_64", target_arch = "aarch64"))'.dependencies] # grumpkin-msm has been patched to support MSMs for the pasta curve cycle # see: https://github.com/argumentcomputer/grumpkin-msm/pull/3 -grumpkin-msm = { git = "https://github.com/argumentcomputer/grumpkin-msm", branch = "dev" } - -[target.'cfg(target_arch = "wasm32")'.dependencies] -getrandom = { version = "0.2.0", default-features = false, features = ["js"] } - -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] -proptest = "1.2.0" -pprof = { version = "0.13", optional = true } # in benches under feature "flamegraph" - -[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] -criterion = { version = "0.5", features = ["html_reports"] } - -[dev-dependencies] -flate2 = "1.0" -hex = "0.4.3" -sha2 = "0.10.7" -tracing-test = "0.2.4" -expect-test = "1.4.1" -anyhow = "1.0.72" -tap = "1.0.1" -tracing-texray = "0.2.0" -tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -handlebars = "5.1.0" -serde_json = "1.0.1" - -[build-dependencies] -vergen = { version = "8", features = ["build", "git", "gitcl"] } - -[features] -default = ["grumpkin-msm/portable"] -# asm = ["halo2curves/asm"] -# Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems. -# portable = ["grumpkin-msm/portable"] -# cuda = ["grumpkin-msm/cuda"] - -[profile.release] -lto = true -codegen-units = 1 -panic = "abort" +grumpkin-msm={ git="https://github.com/argumentcomputer/grumpkin-msm", branch="dev" } + +# wasm32 dependencies +getrandom={ version="0.2.0", default-features=false, features=["js"] } + +# property testing +proptest="1.2.0" + +# benchmarks +criterion={ version="0.5", features=["html_reports"] } + +# dev dependencies +flate2 ="1.0" +sha2 ="0.10.7" +tracing-test ="0.2.4" +expect-test ="1.4.1" +anyhow ="1.0.72" +tap ="1.0.1" +tracing-texray ="0.2.0" +tracing-subscriber={ version="0.3.17", features=["env-filter"] } +handlebars ="5.1.0" +serde_json ="1.0.1" diff --git a/frontend/Cargo.lock b/frontend/Cargo.lock new file mode 100644 index 0000000..b693177 --- /dev/null +++ b/frontend/Cargo.lock @@ -0,0 +1,2716 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addchain" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2e69442aa5628ea6951fa33e24efe8313f4321a91bd729fc2f75bdfc858570" +dependencies = [ + "num-bigint 0.3.3", + "num-integer", + "num-traits", +] + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +dependencies = [ + "arrayvec", + "bytes", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "anstream" +version = "0.6.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" + +[[package]] +name = "anstyle-parse" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", +] + +[[package]] +name = "anyhow" +version = "1.0.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" + +[[package]] +name = "ark-bn254" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" +dependencies = [ + "ark-ec", + "ark-ff 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff 0.4.2", + "ark-poly", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", + "itertools 0.10.5", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive", + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint 0.4.6", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "ascii-canvas" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" +dependencies = [ + "term", +] + +[[package]] +name = "auto_impl" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "autocfg" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" +dependencies = [ + "autocfg 1.3.0", +] + +[[package]] +name = "autocfg" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" + +[[package]] +name = "bellpepper" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae286c2cb403324ab644c7cc68dceb25fe52ca9429908a726d7ed272c1edf7b" +dependencies = [ + "bellpepper-core", + "byteorder", + "ff", +] + +[[package]] +name = "bellpepper-core" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d8abb418570756396d722841b19edfec21d4e89e1cf8990610663040ecb1aea" +dependencies = [ + "blake2s_simd", + "byteorder", + "ff", + "serde", + "thiserror", +] + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bindgen" +version = "0.69.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.77", + "which", +] + +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + +[[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2b_simd" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + +[[package]] +name = "blake2s_simd" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94230421e395b9920d23df13ea5d77a20e1725331f90fbbf6df6040b33f756ae" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "blst" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4378725facc195f1a538864863f6de233b500a8862747e7f165078a419d5e874" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + +[[package]] +name = "blstrs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" +dependencies = [ + "blst", + "byte-slice-cast", + "ff", + "group", + "pairing", + "rand_core 0.6.4", + "serde", + "subtle", +] + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" + +[[package]] +name = "cc" +version = "1.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45bcde016d64c21da4be18b655631e5ab6d3107607e71a73a9f53eb48aae23fb" +dependencies = [ + "shlex", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "circom_algebra" +version = "2.1.4" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "constant_tracking", + "num-bigint-dig", + "num-traits", +] + +[[package]] +name = "circom_witnesscalc" +version = "0.2.0" +source = "git+https://github.com/iden3/circom-witnesscalc#ec597bb986883416fd6be312b4fa044b8d6bcc25" +dependencies = [ + "ark-bn254", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "bindgen", + "byteorder", + "code_producers", + "compiler", + "constraint_generation", + "hex", + "lazy_static", + "libc", + "parser", + "postcard", + "program_structure", + "prost", + "prost-build", + "rand 0.8.5", + "ruint", + "serde", + "serde_json", + "type_analysis", + "wtns-file", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "clap_lex" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" + +[[package]] +name = "cobs" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" + +[[package]] +name = "code_producers" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "handlebars", + "lz_fnv", + "num-bigint-dig", + "serde_json", +] + +[[package]] +name = "codespan" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ebaf6bb6a863ad6aa3a18729e9710c53d75df03306714d9cc1f7357a00cd789" +dependencies = [ + "codespan-reporting", +] + +[[package]] +name = "codespan-reporting" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e0762455306b1ed42bc651ef6a2197aabda5e1d4a43c34d5eab5c1a3634e81d" +dependencies = [ + "termcolor", + "unicode-width", +] + +[[package]] +name = "colorchoice" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" + +[[package]] +name = "compiler" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "code_producers", + "constant_tracking", + "num-bigint-dig", + "num-traits", + "program_structure", +] + +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "constant_tracking" +version = "2.0.0" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" + +[[package]] +name = "constraint_generation" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "ansi_term", + "circom_algebra", + "compiler", + "constraint_list", + "constraint_writers", + "dag", + "num-bigint-dig", + "num-traits", + "program_structure", +] + +[[package]] +name = "constraint_list" +version = "2.1.8" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "circom_algebra", + "constraint_writers", + "json", + "num_cpus", + "program_structure", + "threadpool", +] + +[[package]] +name = "constraint_writers" +version = "2.1.8" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "circom_algebra", + "json", +] + +[[package]] +name = "cpufeatures" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +dependencies = [ + "libc", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array 0.14.7", + "typenum", +] + +[[package]] +name = "dag" +version = "2.1.8" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "circom_algebra", + "constraint_list", + "constraint_writers", + "json", + "program_structure", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "ena" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" +dependencies = [ + "log", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "fastrand" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" + +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "bitvec", + "byteorder", + "ff_derive", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "ff_derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9f54704be45ed286151c5e11531316eaef5b8f5af7d597b806fdb8af108d84a" +dependencies = [ + "addchain", + "cfg-if", + "num-bigint 0.3.3", + "num-integer", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "generic-array" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96512db27971c2c3eece70a1e106fbe6c87760234e31e8f7e5634912fe52794a" +dependencies = [ + "typenum", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand 0.8.5", + "rand_core 0.6.4", + "rand_xorshift", + "subtle", +] + +[[package]] +name = "halo2curves" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db81d01d0bbfec9f624d7590fc6929ee2537a64ec1e080d8f8c9e2d2da291405" +dependencies = [ + "blake2b_simd", + "ff", + "group", + "hex", + "lazy_static", + "num-bigint 0.4.6", + "num-traits", + "pairing", + "pasta_curves", + "paste", + "rand 0.8.5", + "rand_core 0.6.4", + "rayon", + "serde", + "serde_arrays", + "static_assertions", + "subtle", +] + +[[package]] +name = "handlebars" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faa67bab9ff362228eb3d00bd024a4965d8231bbb7921167f0cfa66c6626b225" +dependencies = [ + "log", + "pest", + "pest_derive", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "indexmap" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +dependencies = [ + "equivalent", + "hashbrown 0.14.5", +] + +[[package]] +name = "is-terminal" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +dependencies = [ + "hermit-abi 0.4.0", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "js-sys" +version = "0.3.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "json" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "lalrpop" +version = "0.19.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a1cbf952127589f2851ab2046af368fd20645491bb4b376f04b7f94d7a9837b" +dependencies = [ + "ascii-canvas", + "bit-set", + "diff", + "ena", + "is-terminal", + "itertools 0.10.5", + "lalrpop-util", + "petgraph", + "regex", + "regex-syntax 0.6.29", + "string_cache", + "term", + "tiny-keccak", + "unicode-xid", +] + +[[package]] +name = "lalrpop-util" +version = "0.19.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3c48237b9604c5a4702de6b824e02006c3214327564636aef27c1028a8fa0ed" +dependencies = [ + "regex", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.158" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" + +[[package]] +name = "libloading" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +dependencies = [ + "cfg-if", + "windows-targets", +] + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags", + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg 1.3.0", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "lz_fnv" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bbb1b0dbe51f0976eaa466f4e0bdc11856fe8008aee26f30ccec8de15b28e38" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "multimap" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + +[[package]] +name = "neptune" +version = "13.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06626c9ac04c894e9a23d061ba1309f28506cdc5fe64156d28a15fb57fc8e438" +dependencies = [ + "bellpepper", + "bellpepper-core", + "blake2s_simd", + "blstrs", + "byteorder", + "ff", + "generic-array 0.14.7", + "log", + "pasta_curves", + "serde", + "trait-set", +] + +[[package]] +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nova-snark" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69b80bc67f3e77ad68dec356b5df15e8ce30d8855fc76e92782945a5fa74d6fc" +dependencies = [ + "bellpepper", + "bellpepper-core", + "bincode", + "bitvec", + "byteorder", + "digest 0.10.7", + "ff", + "generic-array 1.1.0", + "getrandom 0.2.15", + "group", + "halo2curves", + "itertools 0.12.1", + "neptune", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "once_cell", + "pasta-msm", + "pasta_curves", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "rayon", + "serde", + "sha3", + "subtle", + "thiserror", +] + +[[package]] +name = "num-bigint" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" +dependencies = [ + "autocfg 1.3.0", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", + "rand 0.8.5", + "serde", +] + +[[package]] +name = "num-bigint-dig" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d51546d704f52ef14b3c962b5776e53d5b862e5790e40a350d366c209bd7f7a" +dependencies = [ + "autocfg 0.1.8", + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.7.3", + "serde", + "smallvec", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg 1.3.0", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg 1.3.0", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.9", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "pairing" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +dependencies = [ + "group", +] + +[[package]] +name = "parity-scale-codec" +version = "3.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets", +] + +[[package]] +name = "parser" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "lalrpop", + "lalrpop-util", + "num-bigint-dig", + "num-traits", + "program_structure", + "regex", + "rustc-hex", + "serde", + "serde_derive", +] + +[[package]] +name = "pasta-msm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e85d75eba3e7e9ee3bd11342b669185e194dadda3557934bc1000d9b87159d3" +dependencies = [ + "cc", + "pasta_curves", + "semolina", + "sppark", + "which", +] + +[[package]] +name = "pasta_curves" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" +dependencies = [ + "blake2b_simd", + "ff", + "group", + "hex", + "lazy_static", + "rand 0.8.5", + "serde", + "static_assertions", + "subtle", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pest" +version = "2.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c73c26c01b8c87956cea613c907c9d6ecffd8d18a2a5908e5de0adfaa185cea" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "664d22978e2815783adbdd2c588b455b1bd625299ce36b2a99881ac9627e6d8d" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2d5487022d5d33f4c30d91c22afa240ce2a644e87fe08caad974d4eab6badbe" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "pest_meta" +version = "2.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0091754bbd0ea592c4deb3a122ce8ecbb0753b738aa82bc055fcc2eccc8d8174" +dependencies = [ + "once_cell", + "pest", + "sha2", +] + +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap", +] + +[[package]] +name = "phf_shared" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" +dependencies = [ + "siphasher", +] + +[[package]] +name = "postcard" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f7f0a8d620d71c457dd1d47df76bb18960378da56af4527aaa10f515eee732e" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "serde", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "precomputed-hash" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" + +[[package]] +name = "prettyplease" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +dependencies = [ + "proc-macro2", + "syn 2.0.77", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "program_structure" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "codespan", + "codespan-reporting", + "num-bigint-dig", + "num-traits", + "regex", + "rustc-hex", + "serde", + "serde_derive", +] + +[[package]] +name = "proofs" +version = "0.1.0" +dependencies = [ + "bellpepper-core", + "byteorder", + "circom_witnesscalc", + "clap", + "ff", + "nova-snark", + "num-bigint 0.4.6", + "num-traits", + "serde", + "serde_json", +] + +[[package]] +name = "proptest" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +dependencies = [ + "bitflags", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_xorshift", + "regex-syntax 0.8.4", + "unarray", +] + +[[package]] +name = "prost" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2ecbe40f08db5c006b5764a2645f7f3f141ce756412ac9e1dd6087e6d32995" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" +dependencies = [ + "bytes", + "heck", + "itertools 0.12.1", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.77", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" +dependencies = [ + "anyhow", + "itertools 0.12.1", + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "prost-types" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60caa6738c7369b940c3d49246a8d1749323674c65cb13010134f5c9bad5b519" +dependencies = [ + "prost", +] + +[[package]] +name = "quote" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.15", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +dependencies = [ + "bitflags", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.15", + "libredox", + "thiserror", +] + +[[package]] +name = "regex" +version = "1.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax 0.8.4", +] + +[[package]] +name = "regex-automata" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.4", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + +[[package]] +name = "ruint" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint 0.4.6", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver 1.0.23", +] + +[[package]] +name = "rustix" +version = "0.38.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "semolina" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b0111fd4fa831becb0606b9a2285ef3bee3c6a70d690209b8ae9514e9befe23" +dependencies = [ + "cc", + "glob", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + +[[package]] +name = "serde" +version = "1.0.210" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_arrays" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38636132857f68ec3d5f3eb121166d2af33cb55174c4d5ff645db6165cbef0fd" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.210" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "serde_json" +version = "1.0.128" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "sppark" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c55f3833d30846a26110dccb1d5366314c2c52516a9173b74238c16b24b1a9f9" +dependencies = [ + "cc", + "which", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "string_cache" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +dependencies = [ + "new_debug_unreachable", + "once_cell", + "parking_lot", + "phf_shared", + "precomputed-hash", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "term" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "1.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" + +[[package]] +name = "toml_edit" +version = "0.22.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + +[[package]] +name = "trait-set" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b79e2e9c9ab44c6d7c20d5976961b47e8f49ac199154daa514b77cd1ab536625" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "type_analysis" +version = "2.1.9" +source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" +dependencies = [ + "num-bigint-dig", + "num-traits", + "program_structure", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicode-ident" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" + +[[package]] +name = "unicode-width" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" + +[[package]] +name = "unicode-xid" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.77", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +dependencies = [ + "memchr", +] + +[[package]] +name = "wtns-file" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3b856452298f68a5879e3901918bac5d753ca9fa4be8a983a37a3d25dabf0a" +dependencies = [ + "byteorder", +] + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml new file mode 100644 index 0000000..46fcb44 --- /dev/null +++ b/frontend/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name ="frontend" +version="0.1.0" +edition="2021" + +[dependencies] +client-side-prover={ path="../prover" } + +serde ={ workspace=true } +serde_json ={ workspace=true } +thiserror ={ workspace=true } +tracing ={ workspace=true } +hex ={ workspace=true } +ff ={ workspace=true } +bellpepper-core ={ workspace=true } +halo2curves ={ workspace=true } + +byteorder ={ workspace=true } +num-bigint={ workspace=true } +itertools ={ workspace=true } +bincode={ workspace=true } + +# noir +acvm ={ git="https://github.com/noir-lang/noir", rev="v1.0.0-beta.2" } +ark-bn254="0.5" + +#- Wasm target configuration ----------------------------------------------------------------------# +[target.'cfg(target_arch = "wasm32")'.dependencies] +serde-wasm-bindgen ="0.6.5" +wasm-bindgen ="0.2.87" +js-sys ="0.3.64" +wasm-bindgen-futures="0.4.37" + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +circom_witnesscalc={ git="https://github.com/pluto/circom-witnesscalc" } # Fork is needed apparently?? + +[dev-dependencies] +tracing-test="0.2.5" +tempdir ="0.3.7" +tokio = { version = "1.43", features = ["full"] } + +[features] +verify-steps=[] +timing =[] diff --git a/frontend/examples/noir_circuit_data/add_external.json b/frontend/examples/noir_circuit_data/add_external.json new file mode 100644 index 0000000..9dc5d4c --- /dev/null +++ b/frontend/examples/noir_circuit_data/add_external.json @@ -0,0 +1 @@ +{"noir_version":"1.0.0-beta.1+03b58fa2dfcc8acc8cf5198b1b23b55676fbdb02","hash":6167922121693004072,"abi":{"parameters":[{"name":"external","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"step_in","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"}],"return_type":{"abi_type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WSQQoDMQhFJ5nMfTTqRHe9SkMz9z9CW8ZCaJd1Hoi4eeDnp+Vke01efll933wT7Myj1YGEd6jWVYCl74qKovKoSjSUtVm3BoZMAw8xOuAk0pUnF/wHljgXXJlfCnStgfltgfm9fyzumjuZpjt7tp+7eIe/eQIHkT9F4AIAAA==","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/examples/noir_circuit_data/square_zeroth.json b/frontend/examples/noir_circuit_data/square_zeroth.json new file mode 100644 index 0000000..70f42dc --- /dev/null +++ b/frontend/examples/noir_circuit_data/square_zeroth.json @@ -0,0 +1 @@ +{"noir_version":"1.0.0-beta.1+03b58fa2dfcc8acc8cf5198b1b23b55676fbdb02","hash":2978522905452580593,"abi":{"parameters":[{"name":"step_in","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"}],"return_type":{"abi_type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/62SSw6FMAhF+3kLggIWZm8rNtb9L0GNxm8c2ZMQEgY3HIJ3K3Gu4A781v9bJ+iYa04VCXtIVlSApXSKiqIyJCWqypqtWAZDpoqjGI2w8pYL38DQLgtO+vstWvn7hs6xofOy189duf9BOM2jezIBi8Z/+kQCAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/examples/noir_circuit_data/swap_memory.json b/frontend/examples/noir_circuit_data/swap_memory.json new file mode 100644 index 0000000..41bfe20 --- /dev/null +++ b/frontend/examples/noir_circuit_data/swap_memory.json @@ -0,0 +1 @@ +{"noir_version":"1.0.0-beta.1+03b58fa2dfcc8acc8cf5198b1b23b55676fbdb02","hash":16112970499548868061,"abi":{"parameters":[{"name":"step_in","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"}],"return_type":{"abi_type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WSQQqAMAwE2+qDkiaxyc2vWKz/f4KKCkXxZB0IITksLLveHXTbBPfk+o3nJhiYS4oFCSeIllWAJQ+KiqIyRyUqyposWwJDpoKLGC1w4Cst+AaGdlrwp2fX0HPX0POeRf+S946v7nB25M4K4O7a9UACAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/mock/fold.json b/frontend/mock/fold.json new file mode 100644 index 0000000..919d152 --- /dev/null +++ b/frontend/mock/fold.json @@ -0,0 +1 @@ +{"noir_version":"1.0.0-beta.1+03b58fa2dfcc8acc8cf5198b1b23b55676fbdb02","hash":12961822853839078970,"abi":{"parameters":[{"name":"x0","type":{"kind":"field"},"visibility":"private"},{"name":"w","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"}],"return_type":{"abi_type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WSWwoDMQhFJ48FaYwT/etWGmr2v4S2MxMa+lt7IEhADlw1bCf59eL2IVz1dlWCvVZrxZDwDkW7MFTuu6AgCz+KEJlUadq1gWIlw8FKA06mMzp74+KC38D0h7zeM3znTc7e4OCycWCe+8h+riNj/rrtOc/1Nuc/Lf0rT0Upl3gwAwAA","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/mock/mock.json b/frontend/mock/mock.json new file mode 100644 index 0000000..d1a9128 --- /dev/null +++ b/frontend/mock/mock.json @@ -0,0 +1 @@ +{"noir_version":"1.0.0-beta.1+03b58fa2dfcc8acc8cf5198b1b23b55676fbdb02","hash":2759475469106495053,"abi":{"parameters":[{"name":"x0","type":{"kind":"field"},"visibility":"public"},{"name":"w0","type":{"kind":"field"},"visibility":"private"},{"name":"w1","type":{"kind":"field"},"visibility":"private"}],"return_type":null,"error_types":{}},"bytecode":"H4sIAAAAAAAA/62QQQoAIAgEVXpQ9YL+/6qKWhTBkw6IexqWZXrIOSYFef3fcww4q71S55q3WzN7oLPfJmIDlIN1nEwBAAA=","debug_symbols":"TYxLCsMwDAXvonUWTVuy8FVKCf7IQWBsI9uFYnL3KCGB7DRv0HRwaNoyU/SpgPp0CMnqSikKdXgcU8k67lSq5gpqfI8DYHRyTc91AE8BQU2v9StgmEKgZb5nZP5pJm0CnuhbtDdb//ky13/mZNE1xr10OMlv","file_map":{"63":{"source":"use http::parse;\n\n// pub fn main(data: str<1024>) {\n// parse(data);\n// }\n\npub fn main(x0: pub Field, w0: Field, w1: Field) {\n assert(x0 * w0 + w1 + 2 == 0);\n}","path":"/Users/autoparallel/Code/noir-web-prover-circuits/bin/src/main.nr"}},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/src/circom/mod.rs b/frontend/src/circom/mod.rs new file mode 100644 index 0000000..b243310 --- /dev/null +++ b/frontend/src/circom/mod.rs @@ -0,0 +1,172 @@ +//! # Circom Module +//! +//! The `circom` module provides utilities for working with Circom circuits within the `proofs` +//! crate. It includes functionalities for handling R1CS (Rank-1 Constraint System) representations +//! of circuits, managing circuit inputs, and generating witnesses for the circuits. +//! +//! ## Modules +//! +//! - `r1cs`: Contains the implementation and utilities for working with R1CS representations of +//! Circom circuits. +//! - `wasm_witness`: Provides functionalities for generating witnesses using WebAssembly (only +//! available for `wasm32` target). +//! - `witness`: Contains utilities for generating witnesses for Circom circuits. +//! +//! ## Structs +//! +//! - `CircomInput`: Represents the input structure for Circom circuits, including step inputs and +//! additional parameters. +//! - `CircuitJson`: Represents the JSON structure of a Circom circuit, including constraints, +//! number of inputs, outputs, and variables. +//! - `CircomCircuit`: Represents a Circom circuit, including its R1CS representation and optional +//! witness data. + +use std::{ + collections::{BTreeMap, HashMap}, + env::current_dir, + fs, + io::{BufReader, Cursor, Read, Seek, SeekFrom}, + path::PathBuf, + process::Command, + sync::Arc, +}; + +use bellpepper_core::{num::AllocatedNum, ConstraintSystem, LinearCombination, SynthesisError}; +use byteorder::{LittleEndian, ReadBytesExt}; +use ff::PrimeField; +use r1cs::R1CS; +use serde::{Deserialize, Serialize}; + +use super::*; + +pub mod r1cs; +#[cfg(target_arch = "wasm32")] pub mod wasm_witness; +pub mod witness; + +/// Circom input +#[derive(Debug, Serialize, Deserialize)] +pub struct CircomInput { + /// Step inputs + pub step_in: Vec, + /// Extra parameters + #[serde(flatten)] + pub extra: HashMap, +} + +/// Circuit JSON +#[derive(Serialize, Deserialize)] +pub struct CircuitJson { + /// Constraints + pub constraints: Vec>>, + /// Number of inputs + #[serde(rename = "nPubInputs")] + pub num_inputs: usize, + /// Number of outputs + #[serde(rename = "nOutputs")] + pub num_outputs: usize, + /// Number of variables + #[serde(rename = "nVars")] + pub num_variables: usize, +} + +/// Circom circuit +#[derive(Clone)] +pub struct CircomCircuit { + /// R1CS + pub r1cs: Arc, + /// Witness + pub witness: Option>>, +} + +// NOTE (Colin): This is added so we can cache only the active circuits we are using. +#[allow(clippy::derivable_impls)] +impl Default for CircomCircuit { + fn default() -> Self { Self { r1cs: Arc::new(R1CS::default()), witness: None } } +} + +impl CircomCircuit { + /// Return the arity of the circuit ie the number of public inputs + pub fn arity(&self) -> usize { self.r1cs.num_public_inputs } + + /// Vanilla synthesize + /// + /// This function synthesizes the circuit using the provided constraint system. + /// + /// # Arguments + /// + /// * `cs`: The constraint system to use for synthesis. + /// * `z`: The witness values to use for synthesis. + pub fn vanilla_synthesize>>( + &self, + cs: &mut CS, + z: &[AllocatedNum>], + ) -> Result>>, SynthesisError> { + let witness = &self.witness; + + let mut vars: Vec>> = vec![]; + let mut z_out: Vec>> = vec![]; + let pub_output_count = self.r1cs.num_public_outputs; + + for i in 1..self.r1cs.num_inputs { + // Public inputs do not exist, so we alloc, and later enforce equality from z values + let f: F = { + match witness { + None => F::::ONE, + Some(w) => w[i], + } + }; + let v = AllocatedNum::alloc(cs.namespace(|| format!("public_{}", i)), || Ok(f))?; + + vars.push(v.clone()); + if i <= pub_output_count { + // public output + z_out.push(v); + } + } + for i in 0..self.r1cs.num_aux { + // Private witness trace + let f: F = { + match witness { + None => F::::ONE, + Some(w) => w[i + self.r1cs.num_inputs], + } + }; + + let v = AllocatedNum::alloc(cs.namespace(|| format!("aux_{}", i)), || Ok(f))?; + vars.push(v); + } + + let make_lc = |lc_data: Vec<(usize, F)>| { + let res = lc_data.iter().fold( + LinearCombination::>::zero(), + |lc: LinearCombination>, (index, coeff)| { + lc + if *index > 0 { + (*coeff, vars[*index - 1].get_variable()) + } else { + (*coeff, CS::one()) + } + }, + ); + res + }; + for (i, constraint) in self.r1cs.constraints.iter().enumerate() { + cs.enforce( + || format!("constraint {}", i), + |_| make_lc(constraint.0.clone()), + |_| make_lc(constraint.1.clone()), + |_| make_lc(constraint.2.clone()), + ); + } + + for i in (pub_output_count + 1)..self.r1cs.num_inputs { + cs.enforce( + || format!("pub input enforce {}", i), + |lc| lc + z[i - 1 - pub_output_count].get_variable(), + |lc| lc + CS::one(), + |lc| lc + vars[i - 1].get_variable(), + ); + } + + Ok(z_out) + } +} diff --git a/frontend/src/circom/r1cs.rs b/frontend/src/circom/r1cs.rs new file mode 100644 index 0000000..031dbb8 --- /dev/null +++ b/frontend/src/circom/r1cs.rs @@ -0,0 +1,277 @@ +//! # R1CS Module +//! +//! The `r1cs` module provides functionalities for handling Rank-1 Constraint System (R1CS) +//! representations of Circom circuits. It includes utilities for reading, parsing, and +//! managing R1CS data, which is essential for circuit synthesis and proof generation. +//! +//! ## Structs +//! +//! - `R1CS`: Represents the R1CS structure, including the number of inputs, outputs, variables, and +//! constraints. +//! - `Header`: Represents the header of an R1CS file, including field size and various counts. +//! +//! ## Type Definitions +//! +//! - `Constraint`: A type alias for a tuple representing a constraint in the R1CS, consisting of +//! vectors of pairs of indices and field elements. +//! +//! ## Functions +//! +//! - `read_field`: Reads a field element from a reader. +//! - `read_header`: Reads the header of an R1CS file from a reader. +//! - `read_constraint_vec`: Reads a vector of constraints from a reader. +//! - `read_constraints`: Reads all constraints from a reader based on the R1CS header. +use fs::OpenOptions; + +use super::*; +// This was borrowed from `nova-scotia`. Big thank you for this middleware! +// some codes borrowed from https://github.com/poma/zkutil/blob/master/src/r1cs_reader.rs + +/// Constraint type +pub type Constraint = (Vec<(usize, F)>, Vec<(usize, F)>, Vec<(usize, F)>); + +/// R1CS type +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub struct R1CS { + /// Number of private inputs + pub num_private_inputs: usize, + /// Number of public inputs + pub num_public_inputs: usize, + /// Number of public outputs + pub num_public_outputs: usize, + /// Number of inputs + pub num_inputs: usize, + /// Number of auxiliary variables + pub num_aux: usize, + /// Number of variables + pub num_variables: usize, + /// Constraints + pub constraints: Vec, +} + +// NOTE (Colin): This is added so we can cache only the active circuits we are using. +#[allow(clippy::derivable_impls)] +impl Default for R1CS { + fn default() -> Self { + Self { + num_private_inputs: 0, + num_public_inputs: 0, + num_public_outputs: 0, + num_inputs: 0, + num_aux: 0, + num_variables: 0, + constraints: vec![], + } + } +} + +/// R1CSFile's header +#[derive(Debug, Default)] +pub struct Header { + /// Field size + pub field_size: u32, + /// Prime size + pub prime_size: Vec, + /// Number of wires + pub n_wires: u32, + /// Number of public outputs + pub n_pub_out: u32, + /// Number of public inputs + pub n_pub_in: u32, + /// Number of private inputs + pub n_prv_in: u32, + /// Number of labels + pub n_labels: u64, + /// Number of constraints + pub n_constraints: u32, +} + +impl TryFrom<&R1CSType> for R1CS { + type Error = ProofError; + + fn try_from(value: &R1CSType) -> Result { + match value { + R1CSType::File(path) => R1CS::try_from(path), + R1CSType::Raw(bytes) => R1CS::try_from(&bytes[..]), + } + } +} + +impl TryFrom<&[u8]> for R1CS { + type Error = ProofError; + + fn try_from(value: &[u8]) -> Result { + let cursor = BufReader::new(Cursor::new(value)); + from_reader(cursor) + } +} + +impl TryFrom<&PathBuf> for R1CS { + type Error = ProofError; + + fn try_from(filename: &PathBuf) -> Result { + let reader = BufReader::new(OpenOptions::new().read(true).open(filename)?); + from_reader(reader) + } +} + +/// Reads an R1CS from a reader +fn from_reader(mut reader: R) -> Result { + let mut magic = [0u8; 4]; + reader.read_exact(&mut magic)?; + assert_eq!(magic, [0x72, 0x31, 0x63, 0x73]); + + let version = reader.read_u32::()?; + assert_eq!(version, 1); + + let num_sections = reader.read_u32::()?; + + // section type -> file offset + let mut section_offsets = HashMap::::new(); + let mut section_sizes = HashMap::::new(); + + // get file offset of each section + for _ in 0..num_sections { + let section_type = reader.read_u32::()?; + let section_size = reader.read_u64::()?; + let offset = reader.stream_position()?; + section_offsets.insert(section_type, offset); + section_sizes.insert(section_type, section_size); + reader.seek(SeekFrom::Current(section_size as i64))?; + } + + let header_type = 1; + let constraint_type = 2; + let wire2label_type = 3; + + reader + .seek(SeekFrom::Start(*section_offsets.get(&header_type).ok_or(ProofError::MissingSection)?))?; + let header_size = section_sizes.get(&header_type).ok_or(ProofError::MissingSection)?; + let header = read_header(&mut reader, *header_size)?; + assert_eq!(header.field_size, 32); + + reader.seek(SeekFrom::Start( + *section_offsets.get(&constraint_type).ok_or(ProofError::MissingSection)?, + ))?; + let constraints = read_constraints(&mut reader, &header)?; + + reader.seek(SeekFrom::Start( + *section_offsets.get(&wire2label_type).ok_or(ProofError::MissingSection)?, + ))?; + + let num_public_inputs = header.n_pub_in as usize; + let num_private_inputs = header.n_prv_in as usize; + let num_public_outputs = header.n_pub_out as usize; + let num_variables = header.n_wires as usize; + let num_inputs = (1 + header.n_pub_in + header.n_pub_out) as usize; // TODO: This seems... odd... + let num_aux = num_variables - num_inputs; + Ok(R1CS { + num_private_inputs, + num_public_inputs, + num_public_outputs, + num_inputs, + num_aux, + num_variables, + constraints, + }) +} + +/// Reads a field from a reader +fn read_field(mut reader: R) -> Result, ProofError> { + let mut repr = F::::ZERO.to_repr(); + for digit in repr.as_mut().iter_mut() { + *digit = reader.read_u8()?; + } + let fr = F::::from_repr(repr); + if fr.is_some().into() { + Ok(fr.unwrap()) + } else { + Err(ProofError::Other("Failed to convert representation to field element".to_string())) + } +} + +/// Reads a header from a reader +/// +/// # Arguments +/// +/// * `reader`: The reader to read the header from. +/// * `size`: The size of the header. +/// +/// # Returns +/// +/// The header. +fn read_header(mut reader: R, size: u64) -> Result { + let field_size = reader.read_u32::()?; + let mut prime_size = vec![0u8; field_size as usize]; + reader.read_exact(&mut prime_size)?; + assert_eq!(size, 32 + field_size as u64); + + Ok(Header { + field_size, + prime_size, + n_wires: reader.read_u32::()?, + n_pub_out: reader.read_u32::()?, + n_pub_in: reader.read_u32::()?, + n_prv_in: reader.read_u32::()?, + n_labels: reader.read_u64::()?, + n_constraints: reader.read_u32::()?, + }) +} + +/// Reads a constraint vector from a reader +/// +/// # Arguments +/// +/// * `reader`: The reader to read the constraint vector from. +/// +/// # Returns +/// +/// The constraint vector. +fn read_constraint_vec(mut reader: R) -> Result)>, ProofError> { + let n_vec = reader.read_u32::()? as usize; + let mut vec = Vec::with_capacity(n_vec); + for _ in 0..n_vec { + vec.push((reader.read_u32::()? as usize, read_field::<&mut R>(&mut reader)?)); + } + Ok(vec) +} + +/// Reads constraints from a reader +/// +/// # Arguments +/// +/// * `reader`: The reader to read the constraints from. +/// * `header`: The header of the R1CS. +/// +/// # Returns +/// +/// The constraints. +fn read_constraints( + mut reader: R, + header: &Header, +) -> Result, ProofError> { + // todo check section size + let mut vec = Vec::with_capacity(header.n_constraints as usize); + for _ in 0..header.n_constraints { + let a = read_constraint_vec(&mut reader)?; + let b = read_constraint_vec(&mut reader)?; + let c = read_constraint_vec(&mut reader)?; + vec.push((a, b, c)); + } + Ok(vec) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[tracing_test::traced_test] + fn test_r1cs_from_bin() { + let r1cs = R1CS::try_from(crate::tests::inputs::ADD_EXTERNAL_R1CS).unwrap(); + assert_eq!(r1cs.num_inputs, 5); // TODO: What is the 5th input?? + assert_eq!(r1cs.num_private_inputs, 2); + assert_eq!(r1cs.num_public_inputs, 2); + assert_eq!(r1cs.num_public_outputs, 2); + } +} diff --git a/frontend/src/circom/wasm_witness.rs b/frontend/src/circom/wasm_witness.rs new file mode 100644 index 0000000..34042d4 --- /dev/null +++ b/frontend/src/circom/wasm_witness.rs @@ -0,0 +1,36 @@ +use tracing::{debug, info}; +use wasm_bindgen::prelude::*; + +#[wasm_bindgen(getter_with_clone)] +#[derive(Debug)] +pub struct WitnessOutput { + pub data: js_sys::Uint8Array, +} + +#[wasm_bindgen] +impl WitnessOutput { + #[wasm_bindgen(constructor)] + pub fn new(wit: js_sys::Uint8Array) -> WitnessOutput { Self { data: wit } } +} + +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(js_namespace = witness, js_name = createWitness)] + async fn create_witness_js(input: &JsValue, opcode: u64) -> JsValue; +} + +#[wasm_bindgen] +pub async fn create_witness(input: JsValue, opcode: u64) -> Result { + // Convert the Rust WitnessInput to a JsValue + let js_witnesses_output = create_witness_js(&input, opcode).await; + + // Call JavaScript function and await the Promise + info!("result: {:?}", js_witnesses_output); + let js_obj = js_sys::Object::from(js_witnesses_output); + let data_value = js_sys::Reflect::get(&js_obj, &JsValue::from_str("data"))?; + let array = js_sys::Array::from(&data_value); + let data = js_sys::Uint8Array::new(&array); + + debug!("data: {:?}", data); + Ok(WitnessOutput { data }) +} diff --git a/frontend/src/circom/witness.rs b/frontend/src/circom/witness.rs new file mode 100644 index 0000000..8d68452 --- /dev/null +++ b/frontend/src/circom/witness.rs @@ -0,0 +1,261 @@ +//! # Witness Module +//! +//! The `witness` module provides functionalities for generating and loading witnesses from various +//! sources. It includes functions for generating witnesses from browser types, WASM files, and +//! witnesscalc files. +//! +//! ## Functions +//! +//! - `generate_witness_from_browser_type`: Generates a witness from a browser type. +//! - `generate_witness_from_generator_type`: Generates a witness from a generator type. +//! - `generate_witness_from_graph`: Generates a witness from a graph. +//! - `generate_witness_from_witnesscalc_file`: Generates a witness from a witnesscalc file. +//! - `load_witness_from_bin_reader`: Loads a witness from a bin reader. +//! - `read_field`: Reads a field from a reader. + +use fs::OpenOptions; + +use super::*; + +/// Generates a witness from a browser type +/// +/// # Arguments +/// +/// * `circom_input` - A `CircomInput` struct. +/// * `opcode` - A `u64` representing the opcode. +/// +/// # Returns +/// +/// A `Result` containing a vector of field elements. +#[allow(unused_variables)] +pub async fn generate_witness_from_browser_type( + circom_input: CircomInput, + opcode: u64, +) -> Result>, ProofError> { + #[cfg(target_arch = "wasm32")] + { + let js_witness_input = serde_wasm_bindgen::to_value(&circom_input).map_err(ProofError::from)?; + + let js_witness = + crate::circom::wasm_witness::create_witness(js_witness_input, opcode).await.unwrap(); + + let js_computed_witnesses: Vec = js_witness.data.to_vec(); + let witnesses = + load_witness_from_bin_reader(BufReader::new(Cursor::new(js_computed_witnesses)))?; + + return Ok(witnesses); + } + #[cfg(not(target_arch = "wasm32"))] + Err(ProofError::Other(String::from( + "Browser type witness generation cannot be generated in process", + ))) +} + +/// Generates a witness from a generator type +/// +/// # Arguments +/// +/// * `input_json` - A string slice that holds the input JSON. +/// * `witness_generator_type` - A `WitnessGeneratorType` enum. +/// +/// # Returns +/// +/// A `Result` containing a vector of field elements. +pub fn generate_witness_from_generator_type( + input_json: &str, + witness_generator_type: &WitnessGeneratorType, +) -> Result>, ProofError> { + match witness_generator_type { + WitnessGeneratorType::Browser => { + panic!("browser type witness generation cannot be generated in process") + }, + WitnessGeneratorType::Wasm { path, wtns_path } => + generate_witness_from_wasm_file(input_json, &PathBuf::from(path), &PathBuf::from(wtns_path)), + WitnessGeneratorType::Path(path) => generate_witness_from_witnesscalc_file(input_json, path), + WitnessGeneratorType::Raw(graph_data) => generate_witness_from_graph(input_json, graph_data), + } +} + +/// Generates a witness from a graph +/// +/// # Arguments +/// +/// * `input_json` - A string slice that holds the input JSON. +/// * `graph_data` - A reference to the graph data. +/// +/// # Returns +/// +/// A `Result` containing a vector of field elements. +pub fn generate_witness_from_graph( + input_json: &str, + graph_data: &[u8], +) -> Result::Scalar>, ProofError> { + #[cfg(not(target_arch = "wasm32"))] + { + let witness = circom_witnesscalc::calc_witness(input_json, graph_data)?; + let result = witness + .iter() + .map(|elem| { + as PrimeField>::from_str_vartime(elem.to_string().as_str()) + .ok_or_else(|| ProofError::Other("Failed to parse field element".to_string())) + }) + .collect::>, ProofError>>()?; + Ok(result) + } + #[cfg(target_arch = "wasm32")] + todo!("circom_witnesscalc not supported in wasm"); +} + +/// Generates a witness from a witnesscalc file +/// +/// # Arguments +/// +/// * `witness_input_json` - A string slice that holds the witness input JSON. +/// * `graph_path` - A reference to the path of the witnesscalc file. +/// +/// # Returns +/// +/// A `Result` containing a vector of field elements. +pub fn generate_witness_from_witnesscalc_file( + witness_input_json: &str, + graph_path: &PathBuf, +) -> Result>, ProofError> { + #[cfg(not(target_arch = "wasm32"))] + { + let mut file = std::fs::File::open(graph_path)?; + let mut graph_data = Vec::new(); + file.read_to_end(&mut graph_data)?; + + let witness = circom_witnesscalc::calc_witness(witness_input_json, &graph_data)?; + let result = witness + .iter() + .map(|elem| { + as PrimeField>::from_str_vartime(elem.to_string().as_str()) + .ok_or_else(|| ProofError::Other("Failed to parse field element".to_string())) + }) + .collect::>, ProofError>>()?; + Ok(result) + } + #[cfg(target_arch = "wasm32")] + todo!("circom_witnesscalc not supported in wasm"); +} + +#[warn(missing_docs, clippy::missing_docs_in_private_items)] +/// Generates a witness from a WASM file. +/// +/// # Arguments +/// +/// * `input_json` - A string slice that holds the input JSON. +/// * `wasm_path` - A reference to the path of the WASM file. +/// * `wtns_path` - A reference to the path of the witness file. +/// +/// # Returns +/// +/// A vector of field elements. +pub fn generate_witness_from_wasm_file( + input_json: &str, + wasm_path: &PathBuf, + wtns_path: &PathBuf, +) -> Result>, ProofError> { + let root = current_dir()?; + let witness_generator_input = root.join("circom_input.json"); + fs::write(&witness_generator_input, input_json)?; + + let witness_js = wasm_path + .parent() + .ok_or_else(|| ProofError::Other("Invalid wasm path".to_string()))? + .join("generate_witness.js"); + + let output = Command::new("node") + .arg(witness_js) + .arg(wasm_path) + .arg(&witness_generator_input) + .arg(wtns_path) + .output() + .expect("failed to execute process"); + if !output.stdout.is_empty() || !output.stderr.is_empty() { + debug!( + "{}", + std::str::from_utf8(&output.stdout).map_err(|e| ProofError::Other(e.to_string()))? + ); + error!( + "{}", + std::str::from_utf8(&output.stderr).map_err(|e| ProofError::Other(e.to_string()))? + ); + } + fs::remove_file(witness_generator_input)?; + let reader = OpenOptions::new().read(true).open(wtns_path).expect("unable to open."); + let witness = load_witness_from_bin_reader(BufReader::new(reader)); + fs::remove_file(wtns_path)?; + witness +} + +/// Loads a witness from a bin reader +/// +/// # Arguments +/// +/// * `reader` - A reference to the reader. +/// +/// # Returns +/// +/// A `Result` containing a vector of field elements. +pub fn load_witness_from_bin_reader(mut reader: R) -> Result>, ProofError> { + let mut wtns_header = [0u8; 4]; + reader.read_exact(&mut wtns_header)?; + assert_eq!(wtns_header, [119, 116, 110, 115]); + + let version = reader.read_u32::()?; + assert!(version <= 2); + + let num_sections = reader.read_u32::()?; + assert_eq!(num_sections, 2); + + // read the first section + let sec_type = reader.read_u32::()?; + assert_eq!(sec_type, 1); + + let sec_size = reader.read_u64::()?; + assert_eq!(sec_size, 4 + 32 + 4); + + let field_size = reader.read_u32::()?; + assert_eq!(field_size, 32); + + let mut prime = vec![0u8; field_size as usize]; + reader.read_exact(&mut prime)?; + + let witness_len = reader.read_u32::()?; + + let sec_type = reader.read_u32::()?; + assert_eq!(sec_type, 2); + + let sec_size = reader.read_u64::()?; + assert_eq!(sec_size, (witness_len * field_size) as u64); + + let mut result = Vec::with_capacity(witness_len as usize); + for _ in 0..witness_len { + result.push(read_field(&mut reader)?); + } + Ok(result) +} + +/// Reads a field from a reader +/// +/// # Arguments +/// +/// * `reader` - A reference to the reader. +/// +/// # Returns +/// +/// A `Result` containing a field element. +pub(crate) fn read_field(mut reader: R) -> Result, ProofError> { + let mut repr = F::::ZERO.to_repr(); + for digit in repr.as_mut().iter_mut() { + *digit = reader.read_u8()?; + } + let fr = F::::from_repr(repr); + if fr.is_some().into() { + Ok(fr.unwrap()) + } else { + Err(ProofError::Other("Failed to convert representation to field element".to_string())) + } +} diff --git a/frontend/src/errors.rs b/frontend/src/errors.rs new file mode 100644 index 0000000..1f69d24 --- /dev/null +++ b/frontend/src/errors.rs @@ -0,0 +1,106 @@ +//! Error type for the `proofs` crate. +//! +//! This enum represents the various error conditions that can occur within the +//! `proofs` crate. It provides a unified way to handle and propagate errors +//! throughout the crate. +//! +//! The possible error variants include: +//! +//! - `Synthesis`: Represents an error that occurred during the synthesis process. +//! - `Io`: Represents an I/O error. +//! - `Serde`: Represents a serialization or deserialization error. +//! - `Other`: Represents any other error with a custom error message. +//! - `VerifyFailed`: Indicates that the proof verification failed. +//! - `Parse`: Represents an error that occurred while parsing a big integer. +//! - `WitnessCalc`: Represents an error that occurred during witness calculation (only available +//! when not targeting `wasm32`). +//! - `MissingSection`: Indicates that a required section is missing. +//! - `Bincode`: Represents a Bincode serialization or deserialization error. +use thiserror::Error; + +#[cfg(not(target_arch = "wasm32"))] +#[derive(Debug, Error)] +/// Wrapper for circom_witnesscalc::Error since it doesn't implement display +pub enum WitnessCalcError { + /// The error is a circom_witnesscalc::Error + Circom(circom_witnesscalc::Error), +} + +#[cfg(not(target_arch = "wasm32"))] +impl std::fmt::Display for WitnessCalcError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } +} + +#[cfg(not(target_arch = "wasm32"))] +impl From for ProofError { + fn from(err: circom_witnesscalc::Error) -> ProofError { + ProofError::WitnessCalc(WitnessCalcError::Circom(err)) + } +} + +impl From> for ProofError { + fn from(err: Box) -> ProofError { ProofError::Bincode(*err) } +} + +/// Represents the various error conditions that can occur within the `proofs` +/// crate. +#[derive(Debug, Error)] +pub enum ProofError { + /// The error is a bellpepper_core::SynthesisError + #[error(transparent)] + Synthesis(#[from] bellpepper_core::SynthesisError), + + /// The error is a std::io::Error + #[error(transparent)] + Io(#[from] std::io::Error), + + /// The error is a serde_json::Error + #[error(transparent)] + Serde(#[from] serde_json::Error), + + /// The error is a custom error with a message + #[error("Other error: {0}")] + Other(String), + + /// The error is a failed proof verification + #[error("Failed to verify proof: {0}")] + VerifyFailed(String), + + /// The error is a num_bigint::ParseBigIntError + #[error(transparent)] + Parse(#[from] num_bigint::ParseBigIntError), + + /// The error is a WitnessCalcError + #[cfg(not(target_arch = "wasm32"))] + #[error(transparent)] + WitnessCalc(#[from] WitnessCalcError), + + /// The error is a missing header section + #[error("Missing header section")] + MissingSection, + + /// The error is a bincode::ErrorKind + #[error(transparent)] + Bincode(#[from] bincode::ErrorKind), + + /// The error is a client_side_prover::supernova::error::SuperNovaError + #[error(transparent)] + SuperNova(#[from] client_side_prover::supernova::error::SuperNovaError), + + /// The error is a json key error + #[error("json key not found: {0}")] + JsonKeyError(String), + + /// The error is an invalid circuit size + #[error("Invalid circuit size")] + InvalidCircuitSize, + + /// The error is a serde_wasm_bindgen::Error + #[cfg(target_arch = "wasm32")] + #[error(transparent)] + SerdeWasmBindgen(#[from] serde_wasm_bindgen::Error), + + /// The error is an invalid manifest + #[error("Invalid manifest: {0}")] + InvalidManifest(String), +} diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs new file mode 100644 index 0000000..09f93ad --- /dev/null +++ b/frontend/src/lib.rs @@ -0,0 +1,91 @@ +#![warn(missing_docs, clippy::missing_docs_in_private_items)] + +//! # Proofs Crate +//! +//! The `proofs` crate provides a comprehensive framework for creating and +//! verifying zero-knowledge proofs. It includes various modules and utilities +//! to facilitate the construction of proofs, circuits, and the necessary +//! cryptographic primitives. +//! +//! ## Modules +//! +//! - `circom`: Contains utilities for working with Circom circuits. +//! - `circuits`: Provides the implementation of various circuits used in the proof system. +//! - `errors`: Defines error types used throughout the crate. +//! - `program`: Contains the core logic for setting up and running the proof system. +//! - `proof`: Provides the implementation of the proof generation and verification. +//! - `setup`: Contains utilities for setting up the proof system. +//! - `tests`: Contains tests for the proof system. +//! +//! ## Types +//! +//! - `E1`: Represents the first elliptic curve engine used in the proof system. +//! - `E2`: Represents the second elliptic curve engine used in the proof system. +//! - `G1`: Represents the group associated with the first elliptic curve engine. +//! - `G2`: Represents the group associated with the second elliptic curve engine. +//! - `EE1`: Represents the evaluation engine for the first elliptic curve. +//! - `EE2`: Represents the evaluation engine for the second elliptic curve. +//! - `S1`: Represents the SNARK for the first elliptic curve. +//! - `S2`: Represents the SNARK for the second elliptic curve. +//! - `F`: Represents the scalar field associated with a given group. +//! - `AuxParams`: Represents the auxiliary parameters needed to create `PublicParams`. +//! - `ProverKey`: Represents the prover key needed to create a `CompressedSNARK`. +//! - `VerifierKey`: Represents the verifier key needed to create a `CompressedSNARK`. + +use std::{collections::HashMap, path::PathBuf, str::FromStr}; + +use circom::CircomCircuit; +use client_side_prover::{ + provider::GrumpkinEngine, + spartan::batched::BatchedRelaxedR1CSSNARK, + supernova::{snark::CompressedSNARK, PublicParams, TrivialCircuit}, + traits::{Engine, Group}, +}; +use ff::Field; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use tracing::{debug, error, info}; + +use crate::{ + errors::ProofError, + program::data::{InstanceParams, Online, R1CSType, UninitializedSetup, WitnessGeneratorType}, +}; + +pub mod circom; +pub mod errors; +pub mod noir; +pub mod program; +pub mod proof; +pub mod setup; +#[cfg(test)] pub(crate) mod tests; + +/// Represents the first elliptic curve engine used in the proof system. +pub type E1 = client_side_prover::provider::Bn256EngineKZG; +/// Represents the second elliptic curve engine used in the proof system. +pub type E2 = GrumpkinEngine; +/// Represents the group associated with the first elliptic curve engine. +pub type G1 = ::GE; +/// Represents the group associated with the second elliptic curve engine. +pub type G2 = ::GE; +/// Represents the evaluation engine for the first elliptic curve. +pub type EE1 = + client_side_prover::provider::hyperkzg::EvaluationEngine; +/// Represents the evaluation engine for the second elliptic curve. +pub type EE2 = client_side_prover::provider::ipa_pc::EvaluationEngine; +/// Represents the SNARK for the first elliptic curve. +pub type S1 = BatchedRelaxedR1CSSNARK; +/// Represents the SNARK for the second elliptic curve. +pub type S2 = BatchedRelaxedR1CSSNARK; +/// Represents the scalar field associated with a given group. +pub type F = ::Scalar; + +/// Represents the params needed to create `PublicParams` alongside the +/// circuits' R1CSs. Specifically typed to the `proofs` crate choices of curves +/// and engines. +pub type AuxParams = client_side_prover::supernova::AuxParams; +/// The `ProverKey` needed to create a `CompressedSNARK` using the `proofs` +/// crate choices of curves and engines. +pub type ProverKey = client_side_prover::supernova::snark::ProverKey; +/// The `VerifierKey` needed to create a `CompressedSNARK` using the `proofs` +/// crate choices of curves and engines. +pub type VerifierKey = client_side_prover::supernova::snark::VerifierKey; diff --git a/frontend/src/noir/mod.rs b/frontend/src/noir/mod.rs new file mode 100644 index 0000000..9eefa9d --- /dev/null +++ b/frontend/src/noir/mod.rs @@ -0,0 +1,212 @@ +use acvm::{ + acir::{ + acir_field::GenericFieldElement, + circuit::{brillig::BrilligBytecode, Circuit, Opcode, Program}, + native_types::{Witness, WitnessMap}, + }, + blackbox_solver::StubbedBlackBoxSolver, + pwg::ACVM, + AcirField, +}; +use ark_bn254::Fr; +use bellpepper_core::{ + num::AllocatedNum, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable, +}; +use ff::PrimeField; +use tracing::trace; + +use super::*; + +#[cfg(test)] mod tests; + +// TODO: If we deserialize more here and get metadata, we could more easily look at witnesses, etc. +// Especially if we want to output a constraint to the PC. Using the abi would be handy for +// assigning inputs. +#[derive(Clone, Serialize, Deserialize, Debug)] +pub struct NoirProgram { + #[serde( + serialize_with = "Program::serialize_program_base64", + deserialize_with = "Program::deserialize_program_base64" + )] + pub bytecode: Program>, + pub witness: Option>>, + // TODO: To make this more efficient, we could just store an option of the `&mut CS` inside of + // here so we don't actually need to rebuild it always, though the enforcement for the public + // inputs is tougher +} + +impl NoirProgram { + pub fn new(bin: &[u8]) -> Self { serde_json::from_slice(bin).unwrap() } + + pub fn arity(&self) -> usize { self.circuit().public_parameters.0.len() } + + pub fn circuit(&self) -> &Circuit> { &self.bytecode.functions[0] } + + pub fn unconstrained_functions(&self) -> &Vec>> { + &self.bytecode.unconstrained_functions + } + + pub fn set_private_inputs(&mut self, inputs: Vec>) { self.witness = Some(inputs); } + + // TODO: we now need to shift this to use the `z` values as the sole public inputs, the struct + // should only hold witness + // TODO: We should check if the constraints for z are actually done properly + // tell clippy to shut up + #[allow(clippy::too_many_lines)] + pub fn vanilla_synthesize>>( + &self, + cs: &mut CS, + z: &[AllocatedNum>], + ) -> Result>>, SynthesisError> { + dbg!(z); + let mut acvm = if self.witness.is_some() { + Some(ACVM::new( + &StubbedBlackBoxSolver(false), + &self.circuit().opcodes, + WitnessMap::new(), + self.unconstrained_functions(), + &[], + )) + } else { + None + }; + + // For folding in particular: + assert_eq!(self.circuit().return_values.0.len(), self.circuit().public_parameters.0.len()); + + // TODO: we could probably avoid this but i'm lazy + // Create a map to track allocated variables for the cs + let mut allocated_vars: HashMap>> = HashMap::new(); + + // TODO: Hacking here to get the first index of public, assuming the come in a block. This is + // really dirty too + let num_private_inputs = dbg!(self.circuit().private_parameters.len()); + + // Set up public inputs + self.circuit().public_parameters.0.iter().for_each(|witness| { + println!("public instance: {witness:?}"); + let var = z[witness.as_usize() - num_private_inputs].clone(); + if self.witness.is_some() { + trace!("overwriting public {witness:?} with {var:?}"); + // TODO: This is a bit hacky and assumes private inputs come first. I don't like that + acvm + .as_mut() + .unwrap() + .overwrite_witness(*witness, convert_to_acir_field(var.get_value().unwrap())); + } + // TODO: Fix unwrap + // Alloc 1 for now and update later as needed + // let var = AllocatedNum::alloc(&mut *cs, || Ok(F::::ONE)).unwrap(); + // println!("AllocatedNum pub input: {var:?}"); + + allocated_vars.insert(*witness, var); + }); + + // Set up private inputs + self.circuit().private_parameters.iter().for_each(|witness| { + let f = self.witness.as_ref().map(|inputs| { + let f = convert_to_acir_field(inputs[witness.as_usize()]); + acvm.as_mut().unwrap().overwrite_witness(*witness, f); + f + }); + let var = AllocatedNum::alloc(&mut *cs, || Ok(convert_to_halo2_field(f.unwrap_or_default()))) + .unwrap(); + allocated_vars.insert(*witness, var); + }); + + let acir_witness_map = if self.witness.is_some() { + let _status = acvm.as_mut().unwrap().solve(); + Some(acvm.unwrap().finalize()) + } else { + None + }; + + let get_witness_value = |witness: &Witness| -> F { + acir_witness_map.as_ref().map_or(F::::ONE, |map| { + map.get(witness).map_or(F::::ONE, |value| convert_to_halo2_field(*value)) + }) + }; + + // Helper to get or create a variable for a witness + let get_var = |witness: &Witness, + allocated_vars: &mut HashMap>>, + cs: &mut CS, + gate_idx: usize| + -> Result { + if let Some(var) = allocated_vars.get(witness) { + Ok(var.get_variable()) + } else { + let var = AllocatedNum::alloc(cs.namespace(|| format!("aux_{gate_idx}")), || { + Ok(get_witness_value(witness)) + })?; + allocated_vars.insert(*witness, var.clone()); + Ok(var.get_variable()) + } + }; + + // Process gates + for (gate_idx, opcode) in self.circuit().opcodes.iter().enumerate() { + if let Opcode::AssertZero(gate) = opcode { + // Initialize empty linear combinations for each part of our R1CS constraint + let mut left_terms = LinearCombination::zero(); + let mut right_terms = LinearCombination::zero(); + let mut final_terms = LinearCombination::zero(); + + // Process multiplication terms (these form the A and B matrices in R1CS) + for mul_term in &gate.mul_terms { + let coeff = convert_to_halo2_field(mul_term.0); + let left_var = get_var(&mul_term.1, &mut allocated_vars, cs, gate_idx)?; + let right_var = get_var(&mul_term.2, &mut allocated_vars, cs, gate_idx)?; + + // Build Az (left terms) with coefficient + left_terms = left_terms + (coeff, left_var); + // Build Bz (right terms) with coefficient 1 + right_terms = right_terms + (F::::one(), right_var); + } + + // Process addition terms (these contribute to the C matrix in R1CS) + for add_term in &gate.linear_combinations { + let coeff = convert_to_halo2_field(add_term.0); + let var = get_var(&add_term.1, &mut allocated_vars, cs, gate_idx)?; + final_terms = final_terms + (coeff, var); + } + + // Handle constant term if present + if !gate.q_c.is_zero() { + let const_coeff = convert_to_halo2_field(gate.q_c); + // Negate the constant term since we're moving it to the other side of the equation + final_terms = final_terms - (const_coeff, Variable::new_unchecked(Index::Input(0))); + } + + // Enforce the R1CS constraint: Az ∘ Bz = Cz + cs.enforce( + || format!("gate_{gate_idx}"), + |_| left_terms.clone(), + |_| right_terms.clone(), + |_| final_terms, + ); + } + } + + let mut z_out = vec![]; + for ret in &self.circuit().return_values.0 { + z_out.push(allocated_vars.get(ret).unwrap().clone()); + } + + Ok(dbg!(z_out)) + } +} + +fn convert_to_halo2_field(f: GenericFieldElement) -> F { + let bytes = f.to_be_bytes(); + let mut arr = [0u8; 32]; + arr.copy_from_slice(&bytes[..32]); + arr.reverse(); + F::::from_repr(arr).unwrap() +} + +fn convert_to_acir_field(f: F) -> GenericFieldElement { + let mut bytes = f.to_bytes(); + bytes.reverse(); + GenericFieldElement::from_be_bytes_reduce(&bytes) +} diff --git a/frontend/src/noir/tests.rs b/frontend/src/noir/tests.rs new file mode 100644 index 0000000..b6d8600 --- /dev/null +++ b/frontend/src/noir/tests.rs @@ -0,0 +1,246 @@ +// TODO: This module is so I can see if we can actually fold these noir circuits properly. I'm going +// to write code here to make it work that doesn't effect the circom build at all. I found bringing +// those together in some generic way is quite painful and truth be told would likely be easier to +// just completely rebuild. + +use std::path::Path; + +use client_side_prover::{ + supernova::{NonUniformCircuit, RecursiveSNARK, StepCircuit}, + traits::snark::default_ck_hint, +}; +use tracing::trace; +use tracing_test::traced_test; + +use super::*; +use crate::program::utils; + +const ADD_EXTERNAL: &[u8] = include_bytes!("../../examples/noir_circuit_data/add_external.json"); +const SQUARE_ZEROTH: &[u8] = include_bytes!("../../examples/noir_circuit_data/square_zeroth.json"); +const SWAP_MEMORY: &[u8] = include_bytes!("../../examples/noir_circuit_data/swap_memory.json"); + +#[derive(Debug, Clone)] +pub struct NoirMemory { + // TODO: Using a BTreeSet here would perhaps be preferable, or just some kind of set that checks + // over circuit indices + pub circuits: Vec, + // TODO: I really think the ROM can just be removed and we can clean this up, but leaving it for + // now is a bit easier + pub rom: Vec, + pub public_input: Vec>, +} + +#[derive(Clone, Debug)] +pub struct NoirRomCircuit { + pub circuit: NoirProgram, + // TODO: It would be nice to have the circuit index automatically be used in the memory, but + // perhaps we don't even need memory + pub circuit_index: usize, + // TODO: Not having ROM size here would be nice, but mayabe we don't even need ROM + pub rom_size: usize, +} + +impl NonUniformCircuit for NoirMemory { + type C1 = NoirRomCircuit; + type C2 = TrivialCircuit>; + + fn num_circuits(&self) -> usize { self.circuits.len() } + + fn primary_circuit(&self, circuit_index: usize) -> Self::C1 { + self.circuits[circuit_index].clone() + } + + fn secondary_circuit(&self) -> Self::C2 { TrivialCircuit::default() } + + // Use the initial input to set this + fn initial_circuit_index(&self) -> usize { self.rom[0] as usize } +} + +impl StepCircuit> for NoirRomCircuit { + fn arity(&self) -> usize { self.circuit.arity() + 1 + self.rom_size } + + fn circuit_index(&self) -> usize { self.circuit_index } + + fn synthesize>>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>>, + z: &[AllocatedNum>], + ) -> Result<(Option>>, Vec>>), SynthesisError> { + let rom_index = &z[self.circuit.arity()]; // jump to where we pushed pc data into CS + let allocated_rom = &z[self.circuit.arity() + 1..]; // jump to where we pushed rom data into C + let (rom_index_next, pc_next) = utils::next_rom_index_and_pc( + &mut cs.namespace(|| "next and rom_index and pc"), + rom_index, + allocated_rom, + pc.ok_or(SynthesisError::AssignmentMissing)?, + )?; + let mut circuit_constraints = self.circuit.vanilla_synthesize(cs, z)?; + circuit_constraints.push(rom_index_next); + circuit_constraints.extend(z[self.circuit.arity() + 1..].iter().cloned()); + Ok((Some(pc_next), circuit_constraints)) + } +} + +pub fn run(memory: &NoirMemory) -> Result, ProofError> { + info!("Starting SuperNova program..."); + + info!("Setting up PublicParams..."); + // TODO: This is stupid to do, but I need to get around the original setting of the witness. + // Having separate setup is the way (we already know this) + let mut memory_clone = memory.clone(); + memory_clone.circuits.iter_mut().for_each(|circ| circ.circuit.witness = None); + let public_params = PublicParams::setup(&memory_clone, &*default_ck_hint(), &*default_ck_hint()); + + let z0_primary = &memory.public_input; + let z0_secondary = &[F::::ZERO]; + + let mut recursive_snark_option = None; + + #[cfg(feature = "timing")] + let time = std::time::Instant::now(); + for (idx, &op_code) in memory.rom.iter().enumerate() { + info!("Step {} of ROM", idx); + debug!("Opcode = {:?}", op_code); + + let circuit_primary = memory.primary_circuit(op_code as usize); + let circuit_secondary = memory.secondary_circuit(); + + let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { + RecursiveSNARK::new( + &public_params, + memory, + &circuit_primary, + &circuit_secondary, + z0_primary, + z0_secondary, + ) + })?; + + info!("Proving single step..."); + recursive_snark.prove_step(&public_params, &circuit_primary, &circuit_secondary)?; + info!("Done proving single step..."); + + // TODO: For some reason this is failing + // info!("Verifying single step..."); + // recursive_snark.verify(&public_params, recursive_snark.z0_primary(), z0_secondary)?; + // info!("Single step verification done"); + + recursive_snark_option = Some(Ok(recursive_snark)); + } + // Note, this unwrap cannot fail + let recursive_snark = recursive_snark_option.unwrap(); + trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); + + Ok(recursive_snark?) +} + +// `fold.json` is: +// pub fn main(x0: Field, w: pub [Field;2]) -> pub [Field;2] { +// [x0 * w[0] + w[1] + 1, (x0 + 3) * w[1] + w[0]] +// } +fn noir_fold() -> NoirProgram { + let json_path = Path::new("./mock").join(format!("fold.json")); + let noir_json = std::fs::read(&json_path).unwrap(); + + NoirProgram::new(&noir_json) +} + +#[test] +fn test_conversions() { + let f = F::::from(5); + let acir_f = convert_to_acir_field(f); + assert_eq!(acir_f, GenericFieldElement::from_repr(Fr::from(5))); + + let f = GenericFieldElement::from_repr(Fr::from(3)); + let halo2_f = convert_to_halo2_field(f); + assert_eq!(halo2_f, F::::from(3)); +} + +#[test] +#[traced_test] +fn test_mock_noir_ivc() { + let mut circuit = noir_fold(); + circuit.set_private_inputs(vec![F::::from(3)]); + + let rom_circuit = NoirRomCircuit { circuit, circuit_index: 0, rom_size: 2 }; + + let memory = NoirMemory { + circuits: vec![rom_circuit], + rom: vec![0, 0], + public_input: vec![ + F::::from(1), // Actual input + F::::from(2), // Actual input + F::::from(0), // PC + F::::from(0), // ROM + F::::from(0), // ROM + ], + }; + + let snark = run(&memory).unwrap(); + let zi = snark.zi_primary(); + dbg!(zi); + // First fold: + // step_out[0] == 3 * 1 + 2 + 1 == 6 + // step_out[1] == (3 + 3) * 2 + 1 == 13 + // Second fold: + // step_out[0] == 3 * 6 + 13 + 1 == 32 + // step_out[1] == (3 + 3) * 13 + 6 == 84 + assert_eq!(zi[0], F::::from(32)); + assert_eq!(zi[1], F::::from(84)); + assert_eq!(zi[2], F::::from(2)); + assert_eq!(zi[3], F::::from(0)); + assert_eq!(zi[4], F::::from(0)); +} + +#[test] +#[traced_test] +fn test_mock_noir_nivc() { + let mut add_external = NoirProgram::new(ADD_EXTERNAL); + add_external.set_private_inputs(vec![F::::from(5), F::::from(7)]); + let add_external = + NoirRomCircuit { circuit: add_external, circuit_index: 0, rom_size: 3 }; + + // TODO: The issue is the private inputs need to be an empty vector or else this isn't computed at + // all. Be careful, this is insanely touchy and I hate that it is this way. + let mut square_zeroth = NoirProgram::new(SQUARE_ZEROTH); + square_zeroth.set_private_inputs(vec![]); + let square_zeroth = + NoirRomCircuit { circuit: square_zeroth, circuit_index: 1, rom_size: 3 }; + let mut swap_memory = NoirProgram::new(SWAP_MEMORY); + swap_memory.set_private_inputs(vec![]); + let swap_memory = + NoirRomCircuit { circuit: swap_memory, circuit_index: 2, rom_size: 3 }; + + let memory = NoirMemory { + circuits: vec![add_external, square_zeroth, swap_memory], + rom: vec![0, 1, 2], + public_input: vec![ + F::::from(1), // Actual input + F::::from(2), // Actual input + F::::from(0), // PC + F::::from(0), // ROM + F::::from(1), // ROM + F::::from(2), // ROM + ], + }; + + let snark = run(&memory).unwrap(); + let zi = snark.zi_primary(); + dbg!(zi); + // First fold: + // step_out[0] == 1 + 5 == 6 + // step_out[1] == 2 + 7 == 9 + // Second fold: + // step_out[0] == 6 ** 2 == 36 + // step_out[1] == 9 + // Third fold: + // step_out[0] == 9 + // step_out[1] == 36 + assert_eq!(zi[0], F::::from(9)); + assert_eq!(zi[1], F::::from(36)); + assert_eq!(zi[2], F::::from(3)); + assert_eq!(zi[3], F::::from(0)); + assert_eq!(zi[4], F::::from(1)); + assert_eq!(zi[5], F::::from(2)); +} diff --git a/frontend/src/program/data.rs b/frontend/src/program/data.rs new file mode 100644 index 0000000..318a481 --- /dev/null +++ b/frontend/src/program/data.rs @@ -0,0 +1,636 @@ +//! # Data Module +//! +//! The `data` module contains data structures and types used in the proof system. +//! +//! ## Structs +//! +//! - `FoldInput`: Represents the fold input for any circuit containing signal names and values. +//! - `R1CSType`: Represents the R1CS file type, which can be either a file path or raw bytes. + +use std::{ + fs::{self, File}, + io::Write, + sync::Arc, +}; + +use client_side_prover::{fast_serde::FastSerde, supernova::get_circuit_shapes}; +use serde_json::json; + +use super::*; +use crate::setup::ProvingParams; + +/// Fold input for any circuit containing signals name and vector of values. Inputs are distributed +/// evenly across folds after the ROM is finalised by the prover. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct FoldInput { + /// circuit name and consolidated values + #[serde(flatten)] + pub value: HashMap>, +} + +impl FoldInput { + /// splits the inputs evenly across folds as per instruction frequency + pub fn split(&self, freq: usize) -> Vec> { + let mut res = vec![HashMap::new(); freq]; + + for (key, value) in self.value.clone().into_iter() { + debug!("key: {:?}, freq: {}, value_len: {}", key, freq, value.len()); + assert_eq!(value.len() % freq, 0); + let chunk_size = value.len() / freq; + let chunks: Vec> = value.chunks(chunk_size).map(|chunk| chunk.to_vec()).collect(); + for i in 0..freq { + res[i].insert(key.clone(), json!(chunks[i].clone())); + } + } + + res + } +} + +/// R1CS file type +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub enum R1CSType { + /// file path to the R1CS file + #[serde(rename = "file")] + File(PathBuf), + /// raw bytes of the R1CS file + #[serde(rename = "raw")] + Raw(Vec), +} + +/// Witness generator type +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub enum WitnessGeneratorType { + /// Browser witness generator + #[serde(rename = "browser")] + Browser, + /// Wasm witness generator + #[serde(rename = "wasm")] + Wasm { + /// Path to the Wasm binary for witness generation + path: String, + /// Path where the witness files are stored + wtns_path: String, + }, + /// Path to the witness generator + Path(PathBuf), + /// Raw bytes of the witness generator + #[serde(skip)] + Raw(Vec), // TODO: Would prefer to not alloc here, but i got lifetime hell lol +} + +/// Uninitialized Circuit Setup data, in this configuration the R1CS objects have not +/// been initialized and require a bulky initialize process. +#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)] +pub struct UninitializedSetup { + /// vector of all circuits' r1cs + pub r1cs_types: Vec, + /// vector of all circuits' witness generator + pub witness_generator_types: Vec, + /// NIVC max ROM length + pub max_rom_length: usize, +} + +/// Initialized Circuit Setup data, in this configuration the R1CS objects have been +/// fully loaded for proving. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub struct InitializedSetup { + /// vector of all circuits' r1cs + pub r1cs: Vec>, + /// vector of all circuits' witness generator + pub witness_generator_types: Vec, + /// NIVC max ROM length + pub max_rom_length: usize, +} + +// Note, the below are typestates that prevent misuse of our current API. +/// Setup status trait +pub trait SetupStatus { + /// Public parameters type + type PublicParams; + /// Setup data type + type SetupData; +} + +/// Online setup status +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct Online; +impl SetupStatus for Online { + type PublicParams = Arc>; + type SetupData = Arc; +} + +/// Offline setup status +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Offline; +impl SetupStatus for Offline { + type PublicParams = Vec; + type SetupData = UninitializedSetup; +} + +/// Witness status trait +pub trait WitnessStatus { + /// Private input for a circuit containing signals name and vector of values + /// - For [`Expanded`] status, it is a vector of private inputs for each fold of a circuit + /// - For [`NotExpanded`] status, it is a tuple of private input and fold input of a circuit + type PrivateInputs; +} + +/// Expanded witness status +pub struct Expanded; +impl WitnessStatus for Expanded { + /// expanded input for each fold of each circuit in the ROM + type PrivateInputs = Vec>; +} + +/// Not expanded witness status +pub struct NotExpanded; +impl WitnessStatus for NotExpanded { + /// Private input and fold input for each circuit in the ROM + type PrivateInputs = (Vec>, HashMap); +} + +/// Auxiliary circuit data required to execute the ROM +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub struct CircuitData { + /// circuit instruction opcode in [`S::SetupData`] + pub opcode: u64, +} + +/// ROM data type +pub type RomData = HashMap; +/// ROM type +pub type Rom = Vec; +/// NIVC input type +pub type NivcInput = Vec>; + +/// Represents configuration and circuit data required for initializing the proving system. +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct SetupParams { + /// Public parameters of the proving system. Maps to the client-side prover parameters. + pub public_params: S::PublicParams, + // TODO: Refactor this onto the PublicParams object and share the ProvingParams abstraction + /// Setup-specific verification key digest for the primary elliptic curve. + pub vk_digest_primary: ::Scalar, + /// Setup-specific verification key digest for the secondary elliptic curve. + pub vk_digest_secondary: as Engine>::Scalar, + /// Describes R1CS configurations used in proving setup. + pub setup_data: S::SetupData, + /// A mapping between ROM opcodes and circuit configuration. + pub rom_data: RomData, +} + +impl PartialEq for SetupParams +where S::SetupData: PartialEq +{ + fn eq(&self, other: &Self) -> bool { + // TODO: Supernova types are not supporting PartialEq + // self.public_params == other.public_params && + self.vk_digest_primary == other.vk_digest_primary + && self.vk_digest_secondary == other.vk_digest_secondary + && self.setup_data == other.setup_data + && self.rom_data == other.rom_data + } +} + +/// Defines the logic of the proof program. +pub struct ProofParams { + /// Represents sequence of circuit operations (circuit "bytecode") + pub rom: Rom, +} + +/// Contains inputs and state specific to a single proof generation instance. +#[derive(Debug)] +pub struct InstanceParams { + /// Initial public input for NIVC + pub nivc_input: NivcInput, + /// Private inputs for each fold + pub private_inputs: W::PrivateInputs, +} + +impl InstanceParams { + /// Converts proving instance parameters into an expanded form by distributing fold inputs across + /// their corresponding circuit instances in the ROM. + /// + /// This method performs the following steps: + /// 1. Creates a map of circuit names to their positions in the ROM + /// 2. Collects private inputs from each ROM opcode configuration + /// 3. Distributes fold inputs across matching circuit instances based on their labels + /// 4. Combines the distributed inputs with existing private inputs for each ROM position + /// + /// # Arguments + /// + /// * `self` - The program data instance to expand + /// + /// # Returns + /// + /// Returns a `Result` containing either: + /// * `Ok(InstanceParams)` - The expanded instance with distributed inputs + /// * `Err(ProofError)` - If the expansion process fails + /// + /// # Errors + /// + /// This function will return an error if: + /// * A circuit label in the inputs is not found in the ROM + /// * Input distribution fails + /// + /// # Details + /// + /// The expansion process handles fold inputs, which are inputs that need to be distributed across + /// multiple instances of the same circuit in the ROM. For each circuit label in the inputs: + /// 1. Finds all positions of that circuit in the ROM + /// 2. Splits the fold inputs into equal parts + /// 3. Assigns each part to the corresponding circuit instance + /// + /// The resulting expanded form contains individual private inputs for each ROM position, with + /// fold inputs properly distributed according to circuit usage. + pub fn into_expanded( + self, + proof_params: &ProofParams, + ) -> Result, ProofError> { + assert_eq!(self.private_inputs.0.len(), proof_params.rom.len()); + + let mut instruction_usage: HashMap> = HashMap::new(); + for (index, circuit) in proof_params.rom.iter().enumerate() { + if let Some(usage) = instruction_usage.get_mut(circuit.as_str()) { + usage.push(index); + } else { + instruction_usage.insert(circuit.clone(), vec![index]); + } + } + let mut private_inputs: Vec> = self.private_inputs.0; + + // add fold input sliced to chunks and add to private input + for (circuit_label, fold_inputs) in self.private_inputs.1.iter() { + let inputs = match instruction_usage.get(circuit_label) { + Some(inputs) => inputs, + None => + Err(ProofError::Other(format!("Circuit label '{}' not found in rom", circuit_label)))?, + }; + let split_inputs = fold_inputs.split(inputs.len()); + for (idx, input) in inputs.iter().zip(split_inputs) { + private_inputs[*idx].extend(input); + } + } + + assert_eq!(private_inputs.len(), proof_params.rom.len()); + + let Self { nivc_input: initial_nivc_input, .. } = self; + Ok(InstanceParams { nivc_input: initial_nivc_input, private_inputs }) + } +} + +impl SetupParams { + /// Converts an offline setup parameters instance back into an online version by decompressing and + /// deserializing the public parameters and reconstructing the circuit shapes. + /// + /// This method performs the following steps: + /// 1. Deserializes raw bytes into an AuxParams object + /// 2. Initializes the circuit list from setup data + /// 3. Generates circuit shapes from the initialized memory + /// 4. Reconstructs full public parameters from circuit shapes and auxiliary parameters + /// 5. Constructs a new online program data instance + /// + /// # Arguments + /// + /// * `self` - The offline program data instance to convert + /// + /// # Returns + /// + /// Returns a `Result` containing either: + /// * `Ok(SetupParams)` - The converted online program data + /// * `Err(ProofError)` - If any step in the conversion process fails + /// + /// # Errors + /// + /// This function will return an error if: + /// * Circuit initialization fails + /// * Circuit shape generation fails + /// + /// # Features + /// + /// When compiled with the "timing" feature, this function will output timing information for: + /// * Reading and deserializing auxiliary parameters + /// * Generating circuit shapes + pub fn into_online(self) -> Result, ProofError> { + debug!("init proving params, proving_param_bytes={:?}", self.public_params.len()); + let proving_params = ProvingParams::from_bytes(&self.public_params).unwrap(); + + info!("init setup"); + let initialized_setup = initialize_setup_data(&self.setup_data).unwrap(); + + let circuits = initialize_circuit_list(&initialized_setup); + let memory = Memory { circuits, rom: vec![0; self.setup_data.max_rom_length] }; + + // TODO: This converts the r1cs memory into sparse matrices, which doubles + // the memory usage. Can we re-used these sparse matrices in our constraint + // system? + info!("init circuit shapes"); + let circuit_shapes = get_circuit_shapes(&memory); + + info!("init public params from parts"); + let public_params = + PublicParams::::from_parts_unchecked(circuit_shapes, proving_params.aux_params); + let Self { rom_data, .. } = self; + + Ok(SetupParams { + public_params: Arc::new(public_params), + vk_digest_primary: proving_params.vk_digest_primary, + vk_digest_secondary: proving_params.vk_digest_secondary, + setup_data: Arc::new(initialized_setup), + rom_data, + }) + } +} + +impl SetupParams { + /// Converts an online setup parameters instance into an offline version by serializing + /// the public parameters to disk. + /// + /// This method performs the following steps: + /// 1. Extracts auxiliary parameters from the public parameters + /// 2. Serializes the auxiliary parameters to bytes + /// 3. Writes the compressed data to the specified path + /// 4. Constructs a new offline program data instance + /// + /// # Arguments + /// + /// * `self` - The online program data instance to convert + /// * `path` - The file path where compressed public parameters will be saved + /// + /// # Returns + /// + /// Returns a `Result` containing either: + /// * `Ok(SetupParams)` - The converted offline program data + /// * `Err(ProofError)` - If any step in the conversion process fails + /// + /// # Errors + /// + /// This function will return an error if: + /// * Bytes serialization fails + /// * File system operations fail (creating directories or writing file) + pub fn into_offline(self, path: PathBuf) -> Result, ProofError> { + let exclusive = Arc::try_unwrap(self.public_params).unwrap(); + let (_, aux_params) = exclusive.into_parts(); + let vk_digest_primary = self.vk_digest_primary; + let vk_digest_secondary = self.vk_digest_secondary; + let proving_param_bytes = + ProvingParams { aux_params, vk_digest_primary, vk_digest_secondary }.to_bytes(); + + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + + let bytes_path = path.with_extension("bytes"); + debug!("bytes_path={:?}", bytes_path); + File::create(&bytes_path)?.write_all(&proving_param_bytes)?; + + let Self { rom_data, .. } = self; + Ok(SetupParams { + public_params: proving_param_bytes, + vk_digest_primary, + vk_digest_secondary, + // TODO: This approach is odd, refactor with #375 + setup_data: Default::default(), + rom_data, + }) + } + + /// Extends and prepares the public inputs for the zero-knowledge proof circuits. + /// + /// This function performs two main operations: + /// 1. Expands the ROM (Read-Only Memory) to the maximum length specified in `setup_data` + /// 2. Constructs the primary public input vector `z0_primary` by combining: + /// - The initial NIVC (Non-Interactive Verifiable Computation) input + /// - An initial ROM index of zero + /// - The expanded ROM opcodes converted to field elements + /// + /// # Arguments + /// + /// * `rom` - A reference to the ROM (sequence of circuit operations) containing circuit + /// configurations. + /// * `initial_nivc_input` - The initial public input required for NIVC. + /// + /// # Returns + /// + /// Returns a tuple containing: + /// - `Vec>`: The extended primary public input vector (z0_primary) + /// - `Vec`: The expanded ROM containing opcodes + /// + /// # Errors + /// + /// Returns a `ProofError` if: + /// - Any opcode configuration specified in the ROM is not found in `rom_data` + pub fn extend_public_inputs( + &self, + rom: &Rom, + initial_nivc_input: &NivcInput, + ) -> Result<(Vec>, Vec), ProofError> { + // TODO: This is currently enabled for _either_ Expanded or NotExpanded + let mut rom = rom + .iter() + .map(|opcode_config| { + self + .rom_data + .get(opcode_config) + .ok_or_else(|| { + ProofError::Other(format!("Opcode config '{}' not found in rom_data", opcode_config)) + }) + .map(|config| config.opcode) + }) + .collect::, ProofError>>()?; + + rom.resize(self.setup_data.max_rom_length, u64::MAX); + + let mut z0_primary: Vec> = initial_nivc_input.clone(); + z0_primary.push(F::::ZERO); // rom_index = 0 + z0_primary.extend(rom.iter().map(|opcode| ::Scalar::from(*opcode))); + debug!("z0_primary={:?}", z0_primary); + Ok((z0_primary, rom.clone())) + } +} + +impl SetupParams { + /// Generates NIVC proof from [`InstanceParams`] + /// - run NIVC recursive proving + /// - run CompressedSNARK to compress proof + /// - serialize proof + pub async fn generate_proof( + &self, + proof_params: &ProofParams, + instance_params: &InstanceParams, + ) -> Result, String>, ProofError> { + debug!("starting recursive proving"); + let program_output = program::run(self, proof_params, instance_params).await?; + + debug!("starting proof compression"); + let compressed_snark_proof = program::compress_proof_no_setup( + &program_output, + &self.public_params, + self.vk_digest_primary, + self.vk_digest_secondary, + )?; + compressed_snark_proof.serialize() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + const JSON: &str = r#" +{ + "input": [ + [ + {},{},{} + ], + { + "CIRCUIT_1": { + "external": [5,7], + "plaintext": [1,2,3,4] + }, + "CIRCUIT_2": { + "ciphertext": [1, 2, 3, 4], + "external": [2, 4] + }, + "CIRCUIT_3": { + "key": [2, 3], + "value": [4, 5] + } + } + ] +}"#; + + #[derive(Debug, Deserialize)] + struct MockInputs { + input: (Vec>, HashMap), + } + + // Helper function to create test program data + fn create_test_program_data() -> (SetupParams, ProofParams, InstanceParams) { + // Load add.r1cs from examples + let add_r1cs = crate::tests::inputs::ADD_EXTERNAL_R1CS.to_vec(); + let r1cs = R1CSType::Raw(add_r1cs.to_vec()); + // Create ROM data with proper circuit data + let mut rom_data = HashMap::new(); + rom_data.insert("add".to_string(), CircuitData { opcode: 1u64 }); + rom_data.insert("mul".to_string(), CircuitData { opcode: 2u64 }); + + // Rest of the function remains same + let rom: Vec = vec!["add".to_string(), "mul".to_string()]; + + let setup_data = UninitializedSetup { + max_rom_length: 4, + r1cs_types: vec![r1cs], + witness_generator_types: vec![WitnessGeneratorType::Raw(vec![])], + }; + let initialized_setup = initialize_setup_data(&setup_data).unwrap(); + + let public_params = program::setup(&setup_data); + let (prover_key, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); + + let setup_params = SetupParams { + public_params: Arc::new(public_params), + setup_data: Arc::new(initialized_setup), + vk_digest_primary: prover_key.pk_primary.vk_digest, + vk_digest_secondary: prover_key.pk_secondary.vk_digest, + rom_data, + }; + let proof_params = ProofParams { rom }; + let instance_params = InstanceParams { + nivc_input: vec![F::::ONE], + private_inputs: vec![HashMap::new(), HashMap::new()], + }; + + (setup_params, proof_params, instance_params) + } + + #[test] + fn test_extend_public_inputs() { + // Setup test data + let (setup_params, proof_params, instance_params) = create_test_program_data(); + + // Test successful case + let result = setup_params.extend_public_inputs(&proof_params.rom, &instance_params.nivc_input); + assert!(result.is_ok()); + + let (z0_primary, expanded_rom) = result.unwrap(); + + // Verify z0_primary structure + assert_eq!( + z0_primary.len(), + instance_params.nivc_input.len() + 1 + setup_params.setup_data.max_rom_length + ); + assert_eq!(z0_primary[instance_params.nivc_input.len()], F::::ZERO); // Check ROM index is 0 + + // Verify ROM expansion + assert_eq!(expanded_rom.len(), setup_params.setup_data.max_rom_length); + assert_eq!(expanded_rom[0], 1u64); // First opcode + assert_eq!(expanded_rom[1], 2u64); // Second opcode + assert_eq!(expanded_rom[2], u64::MAX); // Padding + } + + #[test] + fn test_extend_public_inputs_missing_opcode() { + let (setup_params, mut proof_params, instance_params) = create_test_program_data(); + + // Add an opcode config that doesn't exist in rom_data + proof_params.rom.push("nonexistent".to_string()); + + let result = setup_params.extend_public_inputs(&proof_params.rom, &instance_params.nivc_input); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + ProofError::Other(e) if e.contains("not found in rom_data") + )); + } + + #[test] + #[tracing_test::traced_test] + fn test_deserialize_inputs() { + let mock_inputs: MockInputs = serde_json::from_str(JSON).unwrap(); + dbg!(&mock_inputs.input); + assert!(mock_inputs.input.1.contains_key("CIRCUIT_1")); + assert!(mock_inputs.input.1.contains_key("CIRCUIT_2")); + assert!(mock_inputs.input.1.contains_key("CIRCUIT_3")); + } + + #[test] + #[tracing_test::traced_test] + fn test_expand_private_inputs() { + let mock_inputs: MockInputs = serde_json::from_str(JSON).unwrap(); + let proof_params = ProofParams { + rom: vec![String::from("CIRCUIT_1"), String::from("CIRCUIT_2"), String::from("CIRCUIT_3")], + }; + let instance_params = + InstanceParams:: { nivc_input: vec![], private_inputs: mock_inputs.input } + .into_expanded(&proof_params) + .unwrap(); + dbg!(&instance_params.private_inputs); + assert!(!instance_params.private_inputs[0].is_empty()); + assert!(!instance_params.private_inputs[1].is_empty()); + assert!(!instance_params.private_inputs[2].is_empty()); + } + + #[test] + fn test_online_to_offline_serialization_round_trip() { + let temp_dir = tempdir::TempDir::new("setup").unwrap(); + let offline_path = temp_dir.path().join("offline"); + + let (setup_params_online, ..) = create_test_program_data(); + let setup_params_offline = setup_params_online.into_offline(offline_path).unwrap(); + + // Matches itself + assert_eq!(setup_params_offline, setup_params_offline); + + // Verify round-trip serialization for `Offline` + let serialized_offline = serde_json::to_string(&setup_params_offline).unwrap(); + let deserialized_offline: SetupParams = + serde_json::from_str(&serialized_offline).unwrap(); + assert_eq!(setup_params_offline, deserialized_offline); + + // Can be "onlined" + let result = deserialized_offline.into_online(); + assert!(result.is_ok()); + } +} diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs new file mode 100644 index 0000000..a818922 --- /dev/null +++ b/frontend/src/program/mod.rs @@ -0,0 +1,458 @@ +//! # Program Module +//! +//! The `program` module contains the core logic for setting up and running the +//! proof system. It provides functionality for initializing the setup, +//! generating proofs, and verifying proofs. +//! +//! ## Submodules +//! +//! - `data`: Contains data structures and types used in the proof system. +//! - `http`: Provides utilities for handling HTTP-related operations in the proof system. +//! - `manifest`: Contains the manifest structure and related utilities. +//! - `utils`: Provides utility functions used throughout the module. + +use std::sync::Arc; + +use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; +use circom::{r1cs::R1CS, witness::generate_witness_from_generator_type}; +use client_side_prover::{ + supernova::{NonUniformCircuit, RecursiveSNARK, StepCircuit}, + traits::{snark::default_ck_hint, Dual}, +}; +use data::{Expanded, InitializedSetup}; +use proof::FoldingProof; +#[cfg(feature = "timing")] use tracing::trace; +use utils::into_input_json; + +use super::*; +use crate::{ + circom::witness::generate_witness_from_browser_type, + program::{ + data::{ProofParams, SetupParams}, + utils::into_circom_input, + }, +}; + +pub mod data; +pub mod utils; + +// TODO: Consider moving contents of mod.rs files to a separate files. mod.rs +// files should only be used to adjust the visibility of exported items. + +/// Compressed proof type +pub type CompressedProof = FoldingProof, F>; + +/// Represents the memory used in the proof system. +/// +/// This struct contains the circuits and ROM data required for the proof +/// system. +pub struct Memory { + /// A vector of ROM circuits used in the proof system. + pub circuits: Vec, + /// A vector of ROM data, represented as 64-bit unsigned integers. + pub rom: Vec, +} + +/// Represents a ROM circuit used in the proof system. +/// +/// This struct encapsulates the necessary components and metadata for a ROM +/// circuit, including the circuit itself, its index, size, and various inputs. +#[derive(Clone)] +pub struct RomCircuit { + /// The Circom circuit associated with this ROM circuit. + pub circuit: CircomCircuit, + /// The index of this circuit within the ROM. + pub circuit_index: usize, + /// The size of the ROM. + pub rom_size: usize, + /// Optional NIVC I/O values for the circuit. + pub nivc_io: Option>>, + /// Optional private inputs for the circuit, mapped by their labels. + pub private_input: Option>, + /// The type of witness generator used for this circuit. + pub witness_generator_type: WitnessGeneratorType, +} + +// NOTE (Colin): This is added so we can cache only the active circuits we are +// using. +impl Default for RomCircuit { + fn default() -> Self { + Self { + circuit: CircomCircuit::default(), + circuit_index: usize::MAX - 1, + rom_size: 0, + nivc_io: None, + private_input: None, + witness_generator_type: WitnessGeneratorType::Raw(vec![]), + } + } +} + +impl NonUniformCircuit for Memory { + type C1 = RomCircuit; + type C2 = TrivialCircuit>; + + fn num_circuits(&self) -> usize { self.circuits.len() } + + fn primary_circuit(&self, circuit_index: usize) -> Self::C1 { + self.circuits[circuit_index].clone() + } + + fn secondary_circuit(&self) -> Self::C2 { Default::default() } + + fn initial_circuit_index(&self) -> usize { self.rom[0] as usize } +} + +impl StepCircuit> for RomCircuit { + fn arity(&self) -> usize { self.circuit.arity() + 1 + self.rom_size } + + fn circuit_index(&self) -> usize { self.circuit_index } + + fn synthesize>>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>>, + z: &[AllocatedNum>], + ) -> Result<(Option>>, Vec>>), SynthesisError> { + let rom_index = &z[self.circuit.arity()]; // jump to where we pushed pc data into CS + let allocated_rom = &z[self.circuit.arity() + 1..]; // jump to where we pushed rom data into C + let (rom_index_next, pc_next) = utils::next_rom_index_and_pc( + &mut cs.namespace(|| "next and rom_index and pc"), + rom_index, + allocated_rom, + pc.ok_or(SynthesisError::AssignmentMissing)?, + )?; + let mut circuit_constraints = self.circuit.vanilla_synthesize(cs, z)?; + circuit_constraints.push(rom_index_next); + circuit_constraints.extend(z[self.circuit.arity() + 1..].iter().cloned()); + Ok((Some(pc_next), circuit_constraints)) + } +} + +// TODO: This is like a one-time use setup that overlaps some with +// `ProgramData::into_online()`. Worth checking out how to make this simpler, +// clearer, more efficient. +/// Setup function +pub fn setup(setup_data: &UninitializedSetup) -> PublicParams { + // Optionally time the setup stage for the program + #[cfg(feature = "timing")] + let time = std::time::Instant::now(); + + // TODO: I don't think we want to have to call `initialize_circuit_list` more + // than once on setup ever and it seems like it may get used more + // frequently. + let initilized_setup = initialize_setup_data(setup_data).unwrap(); + let circuits = initialize_circuit_list(&initilized_setup); // TODO, change the type signature of trait to use arbitrary error types. + let memory = Memory { circuits, rom: vec![0; setup_data.max_rom_length] }; // Note, `rom` here is not used in setup, only `circuits` + let public_params = PublicParams::setup(&memory, &*default_ck_hint(), &*default_ck_hint()); + + #[cfg(feature = "timing")] + trace!("`PublicParams::setup()` elapsed: {:?}", time.elapsed()); + + public_params +} + +/// Executes the SuperNova program with the provided setup, proof, and instance +/// parameters. +/// +/// This function initializes the public inputs, resizes the ROM, and +/// iteratively processes each step of the ROM to generate a recursive SNARK +/// proof. It handles the private inputs, witness generation, and circuit +/// execution for each opcode in the ROM. +/// +/// # Arguments +/// +/// * `setup_params` - The setup parameters containing the setup data and public parameters. +/// * `proof_params` - The proof parameters containing the ROM and other proof-related data. +/// * `instance_params` - The instance parameters containing the NIVC input and private inputs. +/// +/// # Returns +/// +/// A `Result` containing the generated `RecursiveSNARK` on success, or a +/// `ProofError` on failure. +/// +/// # Errors +/// +/// This function can return a `ProofError` if there are issues with the NIVC +/// input, private inputs, or witness generation. +pub async fn run( + setup_params: &SetupParams, + proof_params: &ProofParams, + instance_params: &InstanceParams, +) -> Result, ProofError> { + info!("Starting SuperNova program..."); + + // Resize the rom to be the `max_rom_length` committed to in the `S::SetupData` + let (z0_primary, resized_rom) = + setup_params.extend_public_inputs(&proof_params.rom, &instance_params.nivc_input)?; + let z0_secondary = vec![F::::ZERO]; + + let mut recursive_snark_option = None; + let mut next_public_input = z0_primary.clone(); + + // TODO (Colin): We are basically creating a `R1CS` for each circuit here, then + // also creating `R1CSWithArity` for the circuits in the `PublicParams`. + // Surely we don't need both? + let circuits = initialize_circuit_list(&setup_params.setup_data); // TODO: AwK? + + let mut memory = Memory { rom: resized_rom.clone(), circuits }; + + #[cfg(feature = "timing")] + let time = std::time::Instant::now(); + for (idx, &op_code) in + resized_rom.iter().enumerate().take_while(|(_, &op_code)| op_code != u64::MAX) + { + info!("Step {} of ROM", idx); + debug!("Opcode = {:?}", op_code); + memory.circuits[op_code as usize].private_input = + Some(instance_params.private_inputs[idx].clone()); + // trace!("private input: {:?}", memory.circuits[op_code as + // usize].private_input); + memory.circuits[op_code as usize].nivc_io = Some(next_public_input); + + let wit_type = memory.circuits[op_code as usize].witness_generator_type.clone(); + let public_params = &setup_params.public_params; + + memory.circuits[op_code as usize].circuit.witness = + if wit_type == WitnessGeneratorType::Browser { + // When running in browser, the witness is passed as input. + // Some(witnesses[idx].clone()) + let arity = memory.circuits[op_code as usize].circuit.arity(); + let nivc_io = + &memory.circuits[op_code as usize].nivc_io.as_ref().ok_or_else(|| { + ProofError::Other(format!("nivc_io not found for op_code {}", op_code)) + })?[..arity]; + + let private_input = + memory.circuits[op_code as usize].private_input.as_ref().ok_or_else(|| { + ProofError::Other(format!("private_input not found for op_code {}", op_code)) + })?; + + let circom_input = into_circom_input(nivc_io, private_input); + let witness = generate_witness_from_browser_type(circom_input, op_code).await?; + Some(witness) + } else { + let arity = memory.circuits[op_code as usize].circuit.arity(); + let nivc_io = + &memory.circuits[op_code as usize].nivc_io.as_ref().ok_or_else(|| { + ProofError::Other(format!("nivc_io not found for op_code {}", op_code)) + })?[..arity]; + + let private_input = + memory.circuits[op_code as usize].private_input.as_ref().ok_or_else(|| { + ProofError::Other(format!("private_input not found for op_code {}", op_code)) + })?; + let in_json = into_input_json(nivc_io, private_input)?; + let witness = generate_witness_from_generator_type(&in_json, &wit_type)?; + Some(witness) + }; + + let circuit_primary = memory.primary_circuit(op_code as usize); + let circuit_secondary = memory.secondary_circuit(); + + let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { + RecursiveSNARK::new( + public_params, + &memory, + &circuit_primary, + &circuit_secondary, + &z0_primary, + &z0_secondary, + ) + })?; + + info!("Proving single step..."); + recursive_snark.prove_step(public_params, &circuit_primary, &circuit_secondary)?; + info!("Done proving single step..."); + + #[cfg(feature = "verify-steps")] + { + info!("Verifying single step..."); + recursive_snark.verify(public_params, &z0_primary, &z0_secondary)?; + info!("Single step verification done"); + } + + // Update everything now for next step + next_public_input = recursive_snark.zi_primary().clone(); + next_public_input.truncate(circuit_primary.arity()); + + recursive_snark_option = Some(Ok(recursive_snark)); + } + // Note, this unwrap cannot fail + let recursive_snark = recursive_snark_option.unwrap(); + #[cfg(feature = "timing")] + trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); + + Ok(recursive_snark?) +} + +/// Compresses a proof without performing the setup step. +/// +/// This function takes an existing `RecursiveSNARK` and compresses it into a +/// `CompressedProof` using pre-initialized proving keys. This is useful when +/// the setup step has already been performed and the proving keys are +/// available, allowing for more efficient proof generation. +/// +/// # Arguments +/// +/// * `recursive_snark` - A reference to the `RecursiveSNARK` that needs to be compressed. +/// * `public_params` - The public parameters required for the proof system. +/// * `vk_digest_primary` - The primary verification key digest. +/// * `vk_digest_secondary` - The secondary verification key digest. +/// +/// # Returns +/// +/// A `Result` containing the `CompressedProof` if successful, or a `ProofError` +/// if an error occurs. +/// +/// # Errors +/// +/// This function will return a `ProofError` if the compression process fails at +/// any step. +pub fn compress_proof_no_setup( + recursive_snark: &RecursiveSNARK, + public_params: &PublicParams, + vk_digest_primary: ::Scalar, + vk_digest_secondary: as Engine>::Scalar, +) -> Result { + let pk = CompressedSNARK::::initialize_pk( + public_params, + vk_digest_primary, + vk_digest_secondary, + ) + .unwrap(); + debug!( + "initialized pk pk_primary.digest={:?}, pk_secondary.digest={:?}", + pk.pk_primary.vk_digest, pk.pk_secondary.vk_digest + ); + + debug!("`CompressedSNARK::prove STARTING PROVING!"); + let proof = FoldingProof { + proof: CompressedSNARK::::prove(public_params, &pk, recursive_snark)?, + verifier_digest: pk.pk_primary.vk_digest, + }; + debug!("`CompressedSNARK::prove completed!"); + + Ok(proof) +} + +/// Compresses a proof by performing the setup step and generating a compressed +/// proof. +/// +/// This function initializes the proving keys by performing the setup step, and +/// then uses these keys to generate a compressed proof from an existing +/// `RecursiveSNARK`. This is useful when the setup step has not been performed +/// yet, and the proving keys need to be initialized before generating the +/// proof. +/// +/// # Arguments +/// +/// * `recursive_snark` - A reference to the `RecursiveSNARK` that needs to be compressed. +/// * `public_params` - The public parameters required for the proof system. +/// +/// # Returns +/// +/// A `Result` containing the `CompressedProof` if successful, or a `ProofError` +/// if an error occurs. +/// +/// # Errors +/// +/// This function will return a `ProofError` if the setup or compression process +/// fails at any step. +pub fn compress_proof( + recursive_snark: &RecursiveSNARK, + public_params: &PublicParams, +) -> Result { + debug!("Setting up `CompressedSNARK`"); + #[cfg(feature = "timing")] + let time = std::time::Instant::now(); + let (pk, _vk) = CompressedSNARK::::setup(public_params)?; + debug!("Done setting up `CompressedSNARK`"); + #[cfg(feature = "timing")] + trace!("`CompressedSNARK::setup` elapsed: {:?}", time.elapsed()); + + #[cfg(feature = "timing")] + let time = std::time::Instant::now(); + + let proof = FoldingProof { + proof: CompressedSNARK::::prove(public_params, &pk, recursive_snark)?, + verifier_digest: pk.pk_primary.vk_digest, + }; + debug!("`CompressedSNARK::prove completed!"); + + #[cfg(feature = "timing")] + trace!("`CompressedSNARK::prove` elapsed: {:?}", time.elapsed()); + + Ok(proof) +} + +/// Initializes the setup data for the program. +/// +/// This function takes an `UninitializedSetup` and converts it into an +/// `InitializedSetup` by iterating over the R1CS types and witness generator +/// types, creating `R1CS` instances and collecting them into vectors. It then +/// returns an `InitializedSetup` containing the R1CS and witness generator +/// types, along with the maximum ROM length. +/// +/// # Arguments +/// +/// * `setup_data` - The `UninitializedSetup` to initialize. +/// +/// # Returns +/// +/// A `Result` containing the `InitializedSetup` if successful, or a +/// `ProofError` if an error occurs. +pub fn initialize_setup_data( + setup_data: &UninitializedSetup, +) -> Result { + let (r1cs, witness_generator_types) = setup_data + .r1cs_types + .iter() + .zip(setup_data.witness_generator_types.iter()) + .map(|(r1cs_type, generator)| { + let r1cs = R1CS::try_from(r1cs_type)?; + Ok::<(Arc, data::WitnessGeneratorType), ProofError>(( + Arc::new(r1cs), + generator.clone(), + )) + }) + .collect::, _>>()? + .into_iter() + .unzip(); + + Ok(InitializedSetup { r1cs, witness_generator_types, max_rom_length: setup_data.max_rom_length }) +} + +/// Initializes a list of ROM circuits from the provided setup data. +/// +/// This function takes an `InitializedSetup` and creates a vector of +/// `RomCircuit` instances. Each `RomCircuit` is constructed using the R1CS and +/// witness generator types from the setup data, and is assigned a unique +/// circuit index and the maximum ROM length. +/// +/// # Arguments +/// +/// * `setup_data` - The `InitializedSetup` containing the R1CS and witness generator types. +/// +/// # Returns +/// +/// A vector of `RomCircuit` instances initialized with the provided setup data. +pub fn initialize_circuit_list(setup_data: &InitializedSetup) -> Vec { + setup_data + .r1cs + .iter() + .zip(setup_data.witness_generator_types.iter()) + .enumerate() + .map(|(i, (r1cs, generator))| { + let circuit = circom::CircomCircuit { r1cs: r1cs.clone(), witness: None }; + RomCircuit { + circuit, + circuit_index: i, + rom_size: setup_data.max_rom_length, + nivc_io: None, + private_input: None, + witness_generator_type: generator.clone(), + } + }) + .collect::>() +} diff --git a/frontend/src/program/utils.rs b/frontend/src/program/utils.rs new file mode 100644 index 0000000..a295d40 --- /dev/null +++ b/frontend/src/program/utils.rs @@ -0,0 +1,182 @@ +//! # Utils Module +//! +//! The `utils` module contains utility functions used throughout the proof system. +//! +//! ## Functions +//! +//! - `next_rom_index_and_pc`: Computes the next ROM index and program counter. +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + LinearCombination, +}; +use itertools::Itertools; +use num_bigint::BigInt; + +use super::*; +use crate::circom::CircomInput; + +/// Computes the next ROM index and program counter. +#[allow(clippy::type_complexity)] +pub fn next_rom_index_and_pc>>( + cs: &mut CS, + rom_index: &AllocatedNum>, + allocated_rom: &[AllocatedNum>], + pc: &AllocatedNum>, +) -> Result<(AllocatedNum>, AllocatedNum>), SynthesisError> { + // Compute a selector for the current rom_index in allocated_rom + let current_rom_selector = + get_selector_vec_from_index(cs.namespace(|| "rom selector"), rom_index, allocated_rom.len())?; + + // Enforce that allocated_rom[rom_index] = pc + for (rom, bit) in allocated_rom.iter().zip_eq(current_rom_selector.iter()) { + // if bit = 1, then rom = pc + // bit * (rom - pc) = 0 + cs.enforce( + || "enforce bit = 1 => rom = pc", + |lc| lc + &bit.lc(CS::one(), F::::ONE), + |lc| lc + rom.get_variable() - pc.get_variable(), + |lc| lc, + ); + } + + // Get the index of the current rom, or the index of the invalid rom if no match + let current_rom_index = current_rom_selector + .iter() + .position(|bit| bit.get_value().is_some_and(|v| v)) + .unwrap_or_default(); + let next_rom_index = current_rom_index + 1; + + let rom_index_next = AllocatedNum::alloc_infallible(cs.namespace(|| "next rom index"), || { + F::::from(next_rom_index as u64) + }); + cs.enforce( + || " rom_index + 1 - next_rom_index_num = 0", + |lc| lc, + |lc| lc, + |lc| lc + rom_index.get_variable() + CS::one() - rom_index_next.get_variable(), + ); + + // Allocate the next pc without checking. + // The next iteration will check whether the next pc is valid. + let pc_next = AllocatedNum::alloc(cs.namespace(|| "next pc"), || { + let next_value = allocated_rom + .get(next_rom_index) + .and_then(|v| v.get_value()) + .and_then(|value| if value == F::::from(u64::MAX) { None } else { Some(value) }); + + let current_value = allocated_rom + .get(current_rom_index) + .and_then(|v| v.get_value()) + .ok_or(SynthesisError::AssignmentMissing)?; + + Ok(next_value.unwrap_or(current_value)) + })?; + + Ok((rom_index_next, pc_next)) +} + +/// Computes the selector vector from the given index. +pub fn get_selector_vec_from_index>>( + mut cs: CS, + target_index: &AllocatedNum>, + num_indices: usize, +) -> Result, SynthesisError> { + // TODO (Colin): This breaks currently with the hacky way of handling circuit in pp + // assert_ne!(num_indices, 0); + + // Compute the selector vector non-deterministically + let selector = (0..num_indices) + .map(|idx| { + // b <- idx == target_index + Ok(Boolean::Is(AllocatedBit::alloc( + cs.namespace(|| format!("allocate s_{:?}", idx)), + target_index.get_value().map(|v| v == F::::from(idx as u64)), + )?)) + }) + .collect::, SynthesisError>>()?; + + // Enforce ∑ selector[i] = 1 + { + let selected_sum = selector + .iter() + .fold(LinearCombination::zero(), |lc, bit| lc + &bit.lc(CS::one(), F::::ONE)); + cs.enforce( + || "exactly-one-selection", + |_| selected_sum, + |lc| lc + CS::one(), + |lc| lc + CS::one(), + ); + } + + // Enforce `target_index - ∑ i * selector[i] = 0`` + { + let selected_value = + selector.iter().enumerate().fold(LinearCombination::zero(), |lc, (i, bit)| { + lc + &bit.lc(CS::one(), F::::from(i as u64)) + }); + cs.enforce( + || "target_index - ∑ i * selector[i] = 0", + |lc| lc, + |lc| lc, + |lc| lc + target_index.get_variable() - &selected_value, + ); + } + + Ok(selector) +} + +/// Converts the given public and private inputs into a `CircomInput` struct. +pub fn into_circom_input( + public_input: &[F], + private_input: &HashMap, +) -> CircomInput { + let decimal_stringified_input: Vec = public_input + .iter() + .map(|x| BigInt::from_bytes_le(num_bigint::Sign::Plus, &x.to_bytes()).to_str_radix(10)) + .collect(); + + CircomInput { step_in: decimal_stringified_input, extra: private_input.clone() } +} + +/// Converts the given public and private inputs into a JSON string. +pub fn into_input_json( + public_input: &[F], + private_input: &HashMap, +) -> Result { + let decimal_stringified_input: Vec = public_input + .iter() + .map(|x| BigInt::from_bytes_le(num_bigint::Sign::Plus, &x.to_bytes()).to_str_radix(10)) + .collect(); + + let input = CircomInput { step_in: decimal_stringified_input, extra: private_input.clone() }; + Ok(serde_json::to_string(&input)?) +} + +/// Remaps the given input JSON string into a vector of tuples containing the key and value. +pub fn remap_inputs(input_json: &str) -> Result)>, ProofError> { + let circom_input: CircomInput = serde_json::from_str(input_json)?; + let mut remapped = vec![]; + + let step_in_values: Result, _> = circom_input + .step_in + .into_iter() + .map(|s| BigInt::from_str(&s).map_err(ProofError::from)) + .collect(); + remapped.push(("step_in".to_string(), step_in_values?)); + + for (k, v) in circom_input.extra { + let val = v + .as_array() + .ok_or_else(|| ProofError::Other(format!("Expected array for key {}", k)))? + .iter() + .map(|x| { + x.as_str() + .ok_or_else(|| ProofError::Other(format!("Expected string for key {}", k))) + .and_then(|s| BigInt::from_str(s).map_err(ProofError::from)) + }) + .collect::, ProofError>>()?; + remapped.push((k, val)); + } + + Ok(remapped) +} diff --git a/frontend/src/proof.rs b/frontend/src/proof.rs new file mode 100644 index 0000000..4fced4c --- /dev/null +++ b/frontend/src/proof.rs @@ -0,0 +1,63 @@ +//! # Proof Module +//! +//! This module provides the implementation for generating and verifying proofs. +//! It includes functionalities for serializing and deserializing folding proofs, +//! which are used in the proof system to ensure the integrity and correctness of computations. +//! +//! ## Structs +//! +//! - `FoldingProof`: Represents a folding proof with a generic proof type `T` and verifier +//! digest type `V`. +//! +//! ## Functions +//! +//! - `serialize`: Serializes a `FoldingProof` into a format suitable for storage or transmission. +//! - `deserialize`: Deserializes a `FoldingProof` from a stored or transmitted format back into its +//! original form. + +use hex; + +use super::*; +use crate::program::CompressedProof; + +/// Folding proof`` +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct FoldingProof { + /// Proof + pub proof: T, + /// Verifier digest + pub verifier_digest: V, +} + +impl CompressedProof { + /// Serializes a `FoldingProof` into a format suitable for storage or transmission. + /// + /// # Returns + /// + /// A `FoldingProof` with a `Vec` proof and a `String` verifier digest. + pub fn serialize(self) -> Result, String>, ProofError> { + let proof = bincode::serialize(&self.proof)?; + + Ok(FoldingProof { proof, verifier_digest: hex::encode(self.verifier_digest.to_bytes()) }) + } +} + +/// Folding proof implementation +impl FoldingProof, String> { + /// Deserializes a `FoldingProof` from a stored or transmitted format back into its original form. + /// + /// # Returns + /// + /// A `FoldingProof` with a `CompressedSNARK` proof and a `F` verifier digest. + pub fn deserialize(self) -> Result { + let proof = bincode::deserialize(&self.proof[..])?; + + Ok(FoldingProof { + proof, + verifier_digest: F::::from_bytes( + &hex::decode(&self.verifier_digest).unwrap().try_into().unwrap(), + ) + .unwrap(), + }) + } +} diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs new file mode 100644 index 0000000..020bf35 --- /dev/null +++ b/frontend/src/setup.rs @@ -0,0 +1,130 @@ +//! # Setup Module +//! +//! The `setup` module contains utilities and structures for setting up the proof system. +//! +//! ## Structs +//! +//! - `ProvingParams`: Represents the parameters needed for proving, including auxiliary parameters +//! and verification key digests. +//! +//! ## Functions +//! +//! - `from_bytes`: Initializes `ProvingParams` from an efficiently serializable data format. +//! - `to_bytes`: Converts `ProvingParams` to an efficient serialization. +//! +//! ## Types +//! +//! - `AuxParams`: Represents the auxiliary parameters needed to create `PublicParams`. +//! - `ProverKey`: Represents the prover key needed to create a `CompressedSNARK`. +//! - `UninitializedSetup`: Represents the uninitialized setup data for circuits, including R1CS and +//! witness generator types. +//! - `WitnessGeneratorType`: Represents the type of witness generator, including raw bytes and +//! paths to Wasm binaries. + +use std::io::Cursor; + +use client_side_prover::{ + fast_serde::{self, FastSerde, SerdeByteError, SerdeByteTypes}, + supernova::snark::CompressedSNARK, + traits::{Dual, Engine}, +}; + +use crate::{ + errors::ProofError, program, program::data::R1CSType, AuxParams, ProverKey, UninitializedSetup, + WitnessGeneratorType, E1, S1, S2, +}; + +/// Proving parameters +#[derive(Debug)] +pub struct ProvingParams { + /// Auxiliary parameters + pub aux_params: AuxParams, + /// Primary verification key digest + pub vk_digest_primary: ::Scalar, + /// Secondary verification key digest + pub vk_digest_secondary: as Engine>::Scalar, +} + +impl FastSerde for ProvingParams { + /// Initialize ProvingParams from an efficiently serializable data format. + fn from_bytes(bytes: &Vec) -> Result { + let mut cursor = Cursor::new(bytes); + Self::validate_header(&mut cursor, SerdeByteTypes::ProverParams, 3)?; + + let aux_params = + Self::read_section_bytes(&mut cursor, 1).map(|bytes| AuxParams::from_bytes(&bytes))??; + + let vk_digest_primary = Self::read_section_bytes(&mut cursor, 2) + .and_then(|bytes| bytes.try_into().map_err(|_| SerdeByteError::G1DecodeError)) + .map(|bytes| ::Scalar::from_bytes(&bytes))? + .into_option() + .ok_or(SerdeByteError::G1DecodeError)?; + + let vk_digest_secondary = Self::read_section_bytes(&mut cursor, 3) + .and_then(|bytes| bytes.try_into().map_err(|_| SerdeByteError::G2DecodeError)) + .map(|bytes| as Engine>::Scalar::from_bytes(&bytes))? + .into_option() + .ok_or(SerdeByteError::G1DecodeError)?; + + Ok(ProvingParams { aux_params, vk_digest_primary, vk_digest_secondary }) + } + + /// Convert ProvingParams to an efficient serialization. + fn to_bytes(&self) -> Vec { + let mut out = Vec::new(); + out.extend_from_slice(&fast_serde::MAGIC_NUMBER); + out.push(SerdeByteTypes::ProverParams as u8); + out.push(3); // num_sections + + Self::write_section_bytes(&mut out, 1, &self.aux_params.to_bytes()); + Self::write_section_bytes(&mut out, 2, &self.vk_digest_primary.to_bytes().to_vec()); + Self::write_section_bytes(&mut out, 3, &self.vk_digest_secondary.to_bytes().to_vec()); + + out + } +} + +impl ProvingParams { + /// Method used externally to initialize all the backend data needed to create a verifiable proof + /// with [`client_side_prover`] and `proofs` crate. Intended to be used in combination with setup, + /// which creates these values offline to be loaded at or before proof creation or verification. + /// + /// # Arguments + /// - `aux_params`: the data that defines what types of supernova programs can be run, i.e., + /// specified by a list of circuit R1CS and max ROM length. + /// - `prover_key`: The key used for generating proofs, allows us to pin a specific verifier. + pub fn new(aux_params: AuxParams, prover_key: ProverKey) -> Result { + Ok(ProvingParams { + aux_params, + vk_digest_primary: prover_key.pk_primary.vk_digest, + vk_digest_secondary: prover_key.pk_secondary.vk_digest, + }) + } +} + +/// Create a setup for a given list of R1CS files including the necessary +/// setup for compressed proving. +/// +/// # Arguments +/// - `r1cs_files`: A list of r1cs files that are accessible by the program using the setup +/// +/// # Returns +/// * `Result, ProofError>` - Bytes ready to be written to disk +pub fn setup(r1cs_files: &[R1CSType], rom_length: usize) -> Vec { + let setup_data = UninitializedSetup { + r1cs_types: r1cs_files.to_vec(), + witness_generator_types: vec![WitnessGeneratorType::Browser; r1cs_files.len()], + max_rom_length: rom_length, + }; + + let public_params = program::setup(&setup_data); + let (pk, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); + let (_, aux_params) = public_params.into_parts(); + + ProvingParams { + aux_params, + vk_digest_primary: pk.pk_primary.vk_digest, + vk_digest_secondary: pk.pk_secondary.vk_digest, + } + .to_bytes() +} diff --git a/frontend/src/tests/examples/circuit_data/add_external.bin b/frontend/src/tests/examples/circuit_data/add_external.bin new file mode 100644 index 0000000000000000000000000000000000000000..8e7d5348de58af7ffdd4cd8047581539d19aaf52 GIT binary patch literal 114 zcmXRf$t%`NFG?)P&@(VFWM_Z?CN2gRE+!5}5Wxf@m_Y;!hY}kHlK`UxGlan;!J@^* v%D~FT$Rs4d#gST3l3J9Pm?Ol(!7RWe#LvZET#{N4pP2^~U=(1ohnfulGzty8 literal 0 HcmV?d00001 diff --git a/frontend/src/tests/examples/circuit_data/add_external.circom b/frontend/src/tests/examples/circuit_data/add_external.circom new file mode 100644 index 0000000..137d8e2 --- /dev/null +++ b/frontend/src/tests/examples/circuit_data/add_external.circom @@ -0,0 +1,13 @@ +pragma circom 2.1.9; + +template AddIntoZeroth() { + signal input step_in[2]; + signal input external[2]; + + signal output step_out[2]; + + step_out[0] <== step_in[0] + external[0]; + step_out[1] <== step_in[1] + external[1]; +} + +component main {public [step_in] } = AddIntoZeroth(); \ No newline at end of file diff --git a/frontend/src/tests/examples/circuit_data/add_external.r1cs b/frontend/src/tests/examples/circuit_data/add_external.r1cs new file mode 100644 index 0000000000000000000000000000000000000000..5c71f869f325dd4475b09be6efa20556d2be4327 GIT binary patch literal 152 zcmXRiOfF_*U|?VdVkRJl0g#vjl%oKof&34Xzdm%HSg^Cw*J SS`eR^C&a?RD8Qrz)dT>kxCs*g literal 0 HcmV?d00001 diff --git a/frontend/src/tests/examples/circuit_data/square_zeroth.circom b/frontend/src/tests/examples/circuit_data/square_zeroth.circom new file mode 100644 index 0000000..703a2f5 --- /dev/null +++ b/frontend/src/tests/examples/circuit_data/square_zeroth.circom @@ -0,0 +1,12 @@ +pragma circom 2.1.9; + +template SquareZeroth() { + signal input step_in[2]; + + signal output step_out[2]; + + step_out[0] <== step_in[0] * step_in[0]; + step_out[1] <== step_in[1]; +} + +component main { public [step_in] } = SquareZeroth(); \ No newline at end of file diff --git a/frontend/src/tests/examples/circuit_data/square_zeroth.r1cs b/frontend/src/tests/examples/circuit_data/square_zeroth.r1cs new file mode 100644 index 0000000000000000000000000000000000000000..ebfa91bbc5623e36315e8acf62c1c90e2af27611 GIT binary patch literal 356 zcmXRiOfF_*U|?VdVkRIy1H>Qz69Y0oO#b@Nd1ArNN{<)D8nF?LjoVxUc4#gzd|2d{ zVgOSIqVWNcJX8%{^I&Fx+ycTtfLA>WPysP^JAh0A0tFxjs>f?4D^LxR{R|*J2!O;u W;iLiLAOV>DQ1^n>fTTfg0|5Zm1UwS} literal 0 HcmV?d00001 diff --git a/frontend/src/tests/examples/circuit_data/swap_memory.bin b/frontend/src/tests/examples/circuit_data/swap_memory.bin new file mode 100644 index 0000000000000000000000000000000000000000..3da9b5c8f6a2cc05b843b5702bed3ba787451b27 GIT binary patch literal 68 zcmXRf$t%`NFG?)P&@(VFWM+T>CN2gRE+!5}5WytD#md0M$jBtb&&6I`l3Ea-nJ2`; L!6?9_3{?#Pc@zk< literal 0 HcmV?d00001 diff --git a/frontend/src/tests/examples/circuit_data/swap_memory.circom b/frontend/src/tests/examples/circuit_data/swap_memory.circom new file mode 100644 index 0000000..68604d9 --- /dev/null +++ b/frontend/src/tests/examples/circuit_data/swap_memory.circom @@ -0,0 +1,12 @@ +pragma circom 2.1.9; + +template SwapMemory() { + signal input step_in[2]; + + signal output step_out[2]; + + step_out[0] <== step_in[1]; + step_out[1] <== step_in[0]; +} + +component main {public [step_in] } = SwapMemory(); \ No newline at end of file diff --git a/frontend/src/tests/examples/circuit_data/swap_memory.r1cs b/frontend/src/tests/examples/circuit_data/swap_memory.r1cs new file mode 100644 index 0000000000000000000000000000000000000000..ba2766164c608b830fa07d38817318f081255344 GIT binary patch literal 320 zcmXRiOfF_*U|?VdVkRJ70mR4vBnFZPf)A6wK6IW~u(Q(Rg|S9#L}TMN*MJ?G3k)9? z`K1`J02M&hffeH;KxTq45a2Zv UninitializedSetup { + UninitializedSetup { + r1cs_types: vec![ + R1CSType::Raw(ADD_EXTERNAL_R1CS.to_vec()), + R1CSType::Raw(SQUARE_ZEROTH_R1CS.to_vec()), + R1CSType::Raw(SWAP_MEMORY_R1CS.to_vec()), + ], + witness_generator_types: vec![ + WitnessGeneratorType::Raw(ADD_EXTERNAL_GRAPH.to_vec()), + WitnessGeneratorType::Raw(SQUARE_ZEROTH_GRAPH.to_vec()), + WitnessGeneratorType::Raw(SWAP_MEMORY_GRAPH.to_vec()), + ], + max_rom_length: MAX_ROM_LENGTH, + } +} + +async fn run_entry( + setup_data: UninitializedSetup, +) -> Result<(SetupParams, RecursiveSNARK), ProofError> { + let mut external_input0: HashMap = HashMap::new(); + external_input0.insert("external".to_string(), json!(EXTERNAL_INPUTS[0])); + let mut external_input1: HashMap = HashMap::new(); + external_input1.insert("external".to_string(), json!(EXTERNAL_INPUTS[1])); + let rom_data = HashMap::from([ + (String::from("ADD_EXTERNAL"), CircuitData { opcode: 0 }), + (String::from("SQUARE_ZEROTH"), CircuitData { opcode: 1 }), + (String::from("SWAP_MEMORY"), CircuitData { opcode: 2 }), + ]); + + let mut private_inputs = vec![]; + + let mut rom = vec![String::from("ADD_EXTERNAL")]; + private_inputs.push(external_input0); + + rom.push(String::from("SQUARE_ZEROTH")); + private_inputs.push(HashMap::new()); + + rom.push(String::from("SWAP_MEMORY")); + private_inputs.push(HashMap::new()); + + rom.push(String::from("ADD_EXTERNAL")); + private_inputs.push(external_input1); + + rom.push(String::from("SQUARE_ZEROTH")); + private_inputs.push(HashMap::new()); + + rom.push(String::from("SWAP_MEMORY")); + private_inputs.push(HashMap::new()); + let public_params = program::setup(&setup_data); + let initialized_setup = initialize_setup_data(&setup_data)?; + + let setup_params = SetupParams:: { + public_params: Arc::new(public_params), + setup_data: Arc::new(initialized_setup), + rom_data, + vk_digest_primary: F::::ZERO, + vk_digest_secondary: F::::ZERO, + }; + let proof_params = ProofParams { rom }; + let instance_params = InstanceParams:: { + nivc_input: vec![F::::from(1), F::::from(2)], + private_inputs: (private_inputs, HashMap::new()), + } + .into_expanded(&proof_params)?; + let recursive_snark = program::run(&setup_params, &proof_params, &instance_params).await?; + Ok((setup_params, recursive_snark)) +} + +#[tokio::test] +#[tracing_test::traced_test] +async fn test_run() { + let setup_data = get_setup_data(); + let (_, proof) = run_entry(setup_data).await.unwrap(); + // [1,2] + [5,7] + // --> [6,9] + // --> [36,9] + // --> [9,36] + [13,1] + // --> [22,37] + // --> [484,37] + // [37,484] + let final_mem = [ + F::::from(37), + F::::from(484), + F::::from(6), + F::::from(0), + F::::from(1), + F::::from(2), + F::::from(0), + F::::from(1), + F::::from(2), + F::::from(u64::MAX), + F::::from(u64::MAX), + F::::from(u64::MAX), + F::::from(u64::MAX), + ]; + assert_eq!(&final_mem.to_vec(), proof.zi_primary()); +} + +#[tokio::test] +#[tracing_test::traced_test] +async fn test_run_serialized_verify() { + let setup_data = get_setup_data(); + let (instance_params, recursive_snark) = run_entry(setup_data.clone()).await.unwrap(); + + // Pseudo-offline the `SetupParams` and regenerate it + let mut setup_params = + instance_params.into_offline(PathBuf::from_str(TEST_OFFLINE_PATH).unwrap()).unwrap(); + setup_params.setup_data = setup_data.clone(); + let setup_params = setup_params.into_online().unwrap(); + + // Create the compressed proof with the offlined `PublicParams` + let proof = program::compress_proof(&recursive_snark, &setup_params.public_params).unwrap(); + let serialized_compressed_proof = proof.serialize().unwrap(); + let proof = serialized_compressed_proof.deserialize().unwrap(); + + // Extend the initial state input with the ROM (happens internally inside `program::run`, so + // we do it out here just for the test) + let mut z0_primary = vec![F::::ONE, F::::from(2)]; + z0_primary.push(F::::ZERO); + let mut rom = vec![ + F::::ZERO, + F::::ONE, + F::::from(2), + F::::ZERO, + F::::ONE, + F::::from(2), + ]; + rom.resize(MAX_ROM_LENGTH, F::::from(u64::MAX)); + z0_primary.extend_from_slice(&rom); + + // Check that it verifies with offlined `PublicParams` regenerated pkey vkey + let (_pk, vk) = CompressedSNARK::::setup(&setup_params.public_params).unwrap(); + let res = proof.proof.verify(&setup_params.public_params, &vk, &z0_primary, &[F::::ZERO]); + assert!(res.is_ok()); + std::fs::remove_file(PathBuf::from_str(TEST_OFFLINE_PATH).unwrap()).unwrap(); +} diff --git a/prover/Cargo.toml b/prover/Cargo.toml new file mode 100644 index 0000000..b4bf4a7 --- /dev/null +++ b/prover/Cargo.toml @@ -0,0 +1,84 @@ +[package] +name ="client-side-prover" +version ="0.1.0" +authors =["Pluto Engineering"] +edition ="2021" +description ="Client side proving" +readme ="README.md" +repository ="https://github.com/pluto/client-side-prover" +license-file="LICENSE" +keywords =["zkSNARKs", "cryptography", "proofs"] + +[dependencies] +bellpepper-core ={ workspace=true } +bellpepper ={ workspace=true } +ff ={ workspace=true } +digest ={ workspace=true } +halo2curves ={ workspace=true } +sha3 ={ workspace=true } +rayon ={ workspace=true } +rand_core ={ workspace=true } +rand_chacha ={ workspace=true } +subtle ={ workspace=true } +neptune ={ workspace=true } +generic-array ={ workspace=true } +num-bigint ={ workspace=true } +num-traits ={ workspace=true } +num-integer ={ workspace=true } +serde ={ workspace=true } +bincode ={ workspace=true } +bitvec ={ workspace=true } +byteorder ={ workspace=true } +thiserror ={ workspace=true } +group ={ workspace=true } +pairing ={ workspace=true } +tracing ={ workspace=true } +cfg-if ={ workspace=true } +once_cell ={ workspace=true } +itertools ={ workspace=true } +rand ={ workspace=true } +ref-cast ={ workspace=true } +static_assertions={ workspace=true } +rayon-scan ={ workspace=true } + +[target.'cfg(any(target_arch = "x86_64", target_arch = "aarch64"))'.dependencies] +# grumpkin-msm has been patched to support MSMs for the pasta curve cycle +# see: https://github.com/argumentcomputer/grumpkin-msm/pull/3 +grumpkin-msm={ workspace=true } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +getrandom={ workspace=true } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +proptest={ workspace=true } + +[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] +criterion={ version="0.5", features=["html_reports"] } + +[dev-dependencies] +flate2 ={ workspace=true } +hex ={ workspace=true } +sha2 ={ workspace=true } +tracing-test ={ workspace=true } +expect-test ={ workspace=true } +anyhow ={ workspace=true } +tap ={ workspace=true } +tracing-texray ={ workspace=true } +tracing-subscriber={ workspace=true } +handlebars ={ workspace=true } +serde_json ={ workspace=true } + +# [build-dependencies] +# vergen = { workspace = true } + +[features] +default=["grumpkin-msm/portable"] +# asm = ["halo2curves/asm"] +# Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems. +# portable = ["grumpkin-msm/portable"] +# cuda = ["grumpkin-msm/cuda"] + +[profile.release] +lto =true +codegen-units=1 +panic ="abort" diff --git a/prover/src/bellpepper/mod.rs b/prover/src/bellpepper/mod.rs new file mode 100644 index 0000000..4c19d83 --- /dev/null +++ b/prover/src/bellpepper/mod.rs @@ -0,0 +1,62 @@ +//! Support for generating R1CS from [Bellpepper]. +//! +//! [Bellpepper]: https://github.com/argumentcomputer/bellpepper + +pub mod r1cs; +pub mod shape_cs; +pub mod solver; +pub mod test_shape_cs; + +#[cfg(test)] +mod tests { + use bellpepper_core::{num::AllocatedNum, ConstraintSystem}; + use ff::PrimeField; + + use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + provider::Bn256EngineKZG, + traits::{snark::default_ck_hint, Engine}, + }; + + fn synthesize_alloc_bit>(cs: &mut CS) { + // get two bits as input and check that they are indeed bits + let a = AllocatedNum::alloc_infallible(cs.namespace(|| "a"), || Fr::ONE); + let _ = a.inputize(cs.namespace(|| "a is input")); + cs.enforce( + || "check a is 0 or 1", + |lc| lc + CS::one() - a.get_variable(), + |lc| lc + a.get_variable(), + |lc| lc, + ); + let b = AllocatedNum::alloc_infallible(cs.namespace(|| "b"), || Fr::ONE); + let _ = b.inputize(cs.namespace(|| "b is input")); + cs.enforce( + || "check b is 0 or 1", + |lc| lc + CS::one() - b.get_variable(), + |lc| lc + b.get_variable(), + |lc| lc, + ); + } + + fn test_alloc_bit_with() { + // First create the shape + let mut cs: ShapeCS = ShapeCS::new(); + synthesize_alloc_bit(&mut cs); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + // Now get the assignment + let mut cs = SatisfyingAssignment::::new(); + synthesize_alloc_bit(&mut cs); + let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + + // Make sure that this is satisfiable + shape.is_sat(&ck, &inst, &witness).unwrap(); + } + + #[test] + fn test_alloc_bit() { test_alloc_bit_with::(); } +} diff --git a/prover/src/bellpepper/r1cs.rs b/prover/src/bellpepper/r1cs.rs new file mode 100644 index 0000000..0cbb9e8 --- /dev/null +++ b/prover/src/bellpepper/r1cs.rs @@ -0,0 +1,147 @@ +//! Support for generating R1CS using bellpepper. + +#![allow(non_snake_case)] + +use bellpepper_core::{Index, LinearCombination}; +use ff::PrimeField; + +use super::{shape_cs::ShapeCS, solver::SatisfyingAssignment, test_shape_cs::TestShapeCS}; +use crate::{ + errors::NovaError, + r1cs::{commitment_key, CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, SparseMatrix}, + traits::Engine, + CommitmentKey, +}; + +/// `NovaWitness` provide a method for acquiring an `R1CSInstance` and +/// `R1CSWitness` from implementers. +pub trait NovaWitness { + /// Return an instance and witness, given a shape and ck. + fn r1cs_instance_and_witness( + self, + shape: &R1CSShape, + ck: &CommitmentKey, + ) -> Result<(R1CSInstance, R1CSWitness), NovaError>; +} + +/// `NovaShape` provides methods for acquiring `R1CSShape` and `CommitmentKey` +/// from implementers. +pub trait NovaShape { + /// Return an appropriate `R1CSShape` and `CommitmentKey` structs. + /// A `CommitmentKeyHint` should be provided to help guide the construction + /// of the `CommitmentKey`. This parameter is documented in + /// `r1cs::R1CS::commitment_key`. + fn r1cs_shape_and_key(&self, ck_hint: &CommitmentKeyHint) -> (R1CSShape, CommitmentKey) { + let S = self.r1cs_shape(); + let ck = commitment_key(&S, ck_hint); + + (S, ck) + } + /// Return an appropriate `R1CSShape`. + fn r1cs_shape(&self) -> R1CSShape; +} + +impl NovaWitness for SatisfyingAssignment { + fn r1cs_instance_and_witness( + self, + shape: &R1CSShape, + ck: &CommitmentKey, + ) -> Result<(R1CSInstance, R1CSWitness), NovaError> { + let (input_assignment, aux_assignment) = self.to_assignments(); + let W = R1CSWitness::::new(shape, aux_assignment)?; + let X = input_assignment[1..].to_owned(); + + let comm_W = W.commit(ck); + + let instance = R1CSInstance::::new(shape, comm_W, X)?; + + Ok((instance, W)) + } +} + +macro_rules! impl_nova_shape { + ($name:ident) => { + impl NovaShape for $name + where E::Scalar: PrimeField + { + fn r1cs_shape(&self) -> R1CSShape { + let mut A = SparseMatrix::::empty(); + let mut B = SparseMatrix::::empty(); + let mut C: SparseMatrix<::Scalar> = SparseMatrix::::empty(); + + let mut num_cons_added = 0; + let mut X = (&mut A, &mut B, &mut C, &mut num_cons_added); + let num_inputs = self.num_inputs(); + let num_constraints = self.num_constraints(); + let num_vars = self.num_aux(); + + for constraint in self.constraints.iter() { + add_constraint(&mut X, num_vars, &constraint.0, &constraint.1, &constraint.2); + } + assert_eq!(num_cons_added, num_constraints); + + A.cols = num_vars + num_inputs; + B.cols = num_vars + num_inputs; + C.cols = num_vars + num_inputs; + + // Don't count One as an input for shape's purposes. + let res = R1CSShape::new(num_constraints, num_vars, num_inputs - 1, A, B, C); + res.unwrap() + } + } + }; +} + +impl_nova_shape!(ShapeCS); +impl_nova_shape!(TestShapeCS); + +fn add_constraint( + X: &mut (&mut SparseMatrix, &mut SparseMatrix, &mut SparseMatrix, &mut usize), + num_vars: usize, + a_lc: &LinearCombination, + b_lc: &LinearCombination, + c_lc: &LinearCombination, +) { + let (A, B, C, nn) = X; + let n = **nn; + assert_eq!(n, A.num_rows(), "A: invalid shape"); + assert_eq!(n, B.num_rows(), "B: invalid shape"); + assert_eq!(n, C.num_rows(), "C: invalid shape"); + + let add_constraint_component = |index: Index, coeff: &S, M: &mut SparseMatrix| { + // we add constraints to the matrix only if the associated coefficient is + // non-zero + if *coeff != S::ZERO { + match index { + Index::Input(idx) => { + // Inputs come last, with input 0, representing 'one', + // at position num_vars within the witness vector. + let idx = idx + num_vars; + M.data.push(*coeff); + M.indices.push(idx); + }, + Index::Aux(idx) => { + M.data.push(*coeff); + M.indices.push(idx); + }, + } + } + }; + + for (index, coeff) in a_lc.iter() { + add_constraint_component(index.0, coeff, A); + } + A.indptr.push(A.indices.len()); + + for (index, coeff) in b_lc.iter() { + add_constraint_component(index.0, coeff, B) + } + B.indptr.push(B.indices.len()); + + for (index, coeff) in c_lc.iter() { + add_constraint_component(index.0, coeff, C) + } + C.indptr.push(C.indices.len()); + + **nn += 1; +} diff --git a/prover/src/bellpepper/shape_cs.rs b/prover/src/bellpepper/shape_cs.rs new file mode 100644 index 0000000..9752680 --- /dev/null +++ b/prover/src/bellpepper/shape_cs.rs @@ -0,0 +1,82 @@ +//! Support for generating R1CS shape using bellpepper. + +use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; +use ff::PrimeField; + +use crate::traits::Engine; + +/// `ShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a circuit. +pub struct ShapeCS +where E::Scalar: PrimeField { + /// All constraints added to the `ShapeCS`. + pub constraints: + Vec<(LinearCombination, LinearCombination, LinearCombination)>, + inputs: usize, + aux: usize, +} + +impl ShapeCS { + /// Create a new, default `ShapeCS`, + pub fn new() -> Self { Self::default() } + + /// Returns the number of constraints defined for this `ShapeCS`. + pub fn num_constraints(&self) -> usize { self.constraints.len() } + + /// Returns the number of inputs defined for this `ShapeCS`. + pub fn num_inputs(&self) -> usize { self.inputs } + + /// Returns the number of aux inputs defined for this `ShapeCS`. + pub fn num_aux(&self) -> usize { self.aux } +} + +impl Default for ShapeCS { + fn default() -> Self { Self { constraints: vec![], inputs: 1, aux: 0 } } +} + +impl ConstraintSystem for ShapeCS { + type Root = Self; + + fn alloc(&mut self, _annotation: A, _f: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, { + self.aux += 1; + + Ok(Variable::new_unchecked(Index::Aux(self.aux - 1))) + } + + fn alloc_input(&mut self, _annotation: A, _f: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, { + self.inputs += 1; + + Ok(Variable::new_unchecked(Index::Input(self.inputs - 1))) + } + + fn enforce(&mut self, _annotation: A, a: LA, b: LB, c: LC) + where + A: FnOnce() -> AR, + AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination, { + let a = a(LinearCombination::zero()); + let b = b(LinearCombination::zero()); + let c = c(LinearCombination::zero()); + + self.constraints.push((a, b, c)); + } + + fn push_namespace(&mut self, _name_fn: N) + where + NR: Into, + N: FnOnce() -> NR, { + } + + fn pop_namespace(&mut self) {} + + fn get_root(&mut self) -> &mut Self::Root { self } +} diff --git a/src/bellpepper/solver.rs b/prover/src/bellpepper/solver.rs similarity index 100% rename from src/bellpepper/solver.rs rename to prover/src/bellpepper/solver.rs diff --git a/prover/src/bellpepper/test_shape_cs.rs b/prover/src/bellpepper/test_shape_cs.rs new file mode 100644 index 0000000..01e5098 --- /dev/null +++ b/prover/src/bellpepper/test_shape_cs.rs @@ -0,0 +1,297 @@ +//! Support for generating R1CS shape using bellpepper. +//! `TestShapeCS` implements a superset of `ShapeCS`, adding non-trivial +//! namespace support for use in testing. + +use core::fmt::Write; +use std::{ + cmp::Ordering, + collections::{BTreeMap, HashMap}, +}; + +use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; +use ff::{Field, PrimeField}; + +use crate::traits::Engine; + +#[derive(Clone, Copy)] +struct OrderedVariable(Variable); + +#[allow(dead_code)] +#[derive(Debug)] +enum NamedObject { + Constraint(usize), + Var(Variable), + Namespace, +} + +impl Eq for OrderedVariable {} +impl PartialEq for OrderedVariable { + fn eq(&self, other: &Self) -> bool { + match (self.0.get_unchecked(), other.0.get_unchecked()) { + (Index::Input(ref a), Index::Input(ref b)) | (Index::Aux(ref a), Index::Aux(ref b)) => a == b, + _ => false, + } + } +} +impl PartialOrd for OrderedVariable { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } +} +impl Ord for OrderedVariable { + fn cmp(&self, other: &Self) -> Ordering { + match (self.0.get_unchecked(), other.0.get_unchecked()) { + (Index::Input(ref a), Index::Input(ref b)) | (Index::Aux(ref a), Index::Aux(ref b)) => + a.cmp(b), + (Index::Input(_), Index::Aux(_)) => Ordering::Less, + (Index::Aux(_), Index::Input(_)) => Ordering::Greater, + } + } +} + +/// `TestShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a +/// circuit. +pub struct TestShapeCS { + named_objects: HashMap, + current_namespace: Vec, + /// All constraints added to the `TestShapeCS`. + pub constraints: Vec<( + LinearCombination, + LinearCombination, + LinearCombination, + String, + )>, + inputs: Vec, + aux: Vec, +} + +fn proc_lc( + terms: &LinearCombination, +) -> BTreeMap { + let mut map = BTreeMap::new(); + for (var, &coeff) in terms.iter() { + map.entry(OrderedVariable(var)).or_insert_with(|| Scalar::ZERO).add_assign(&coeff); + } + + // Remove terms that have a zero coefficient to normalize + let mut to_remove = vec![]; + for (var, coeff) in map.iter() { + if coeff.is_zero().into() { + to_remove.push(*var) + } + } + + for var in to_remove { + map.remove(&var); + } + + map +} + +impl TestShapeCS +where E::Scalar: PrimeField +{ + #[allow(unused)] + /// Create a new, default `TestShapeCS`, + pub fn new() -> Self { Self::default() } + + /// Returns the number of constraints defined for this `TestShapeCS`. + pub fn num_constraints(&self) -> usize { self.constraints.len() } + + /// Returns the number of inputs defined for this `TestShapeCS`. + pub fn num_inputs(&self) -> usize { self.inputs.len() } + + /// Returns the number of aux inputs defined for this `TestShapeCS`. + pub fn num_aux(&self) -> usize { self.aux.len() } + + /// Print all public inputs, aux inputs, and constraint names. + #[allow(dead_code)] + pub fn pretty_print_list(&self) -> Vec { + let mut result = Vec::new(); + + for input in &self.inputs { + result.push(format!("INPUT {input}")); + } + for aux in &self.aux { + result.push(format!("AUX {aux}")); + } + + for (_a, _b, _c, name) in &self.constraints { + result.push(name.to_string()); + } + + result + } + + /// Print all iputs and a detailed representation of each constraint. + #[allow(dead_code)] + pub fn pretty_print(&self) -> String { + let mut s = String::new(); + + for input in &self.inputs { + writeln!(s, "INPUT {}", &input).unwrap() + } + + let negone = -::ONE; + + let powers_of_two = (0..E::Scalar::NUM_BITS) + .map(|i| E::Scalar::from(2u64).pow_vartime([u64::from(i)])) + .collect::>(); + + let pp = |s: &mut String, lc: &LinearCombination| { + s.push('('); + let mut is_first = true; + for (var, coeff) in proc_lc::(lc) { + if coeff == negone { + s.push_str(" - ") + } else if !is_first { + s.push_str(" + ") + } + is_first = false; + + if coeff != ::ONE && coeff != negone { + for (i, x) in powers_of_two.iter().enumerate() { + if x == &coeff { + write!(s, "2^{i} . ").unwrap(); + break; + } + } + + write!(s, "{coeff:?} . ").unwrap() + } + + match var.0.get_unchecked() { + Index::Input(i) => { + write!(s, "`I{}`", &self.inputs[i]).unwrap(); + }, + Index::Aux(i) => { + write!(s, "`A{}`", &self.aux[i]).unwrap(); + }, + } + } + if is_first { + // Nothing was visited, print 0. + s.push('0'); + } + s.push(')'); + }; + + for (a, b, c, name) in &self.constraints { + s.push('\n'); + + write!(s, "{name}: ").unwrap(); + pp(&mut s, a); + write!(s, " * ").unwrap(); + pp(&mut s, b); + s.push_str(" = "); + pp(&mut s, c); + } + + s.push('\n'); + + s + } + + /// Associate `NamedObject` with `path`. + /// `path` must not already have an associated object. + fn set_named_obj(&mut self, path: String, to: NamedObject) { + assert!( + !self.named_objects.contains_key(&path), + "tried to create object at existing path: {path}" + ); + + self.named_objects.insert(path, to); + } +} + +impl Default for TestShapeCS { + fn default() -> Self { + let mut map = HashMap::new(); + map.insert("ONE".into(), NamedObject::Var(Self::one())); + Self { + named_objects: map, + current_namespace: vec![], + constraints: vec![], + inputs: vec![String::from("ONE")], + aux: vec![], + } + } +} + +impl ConstraintSystem for TestShapeCS +where E::Scalar: PrimeField +{ + type Root = Self; + + fn alloc(&mut self, annotation: A, _f: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, { + let path = compute_path(&self.current_namespace, &annotation().into()); + self.aux.push(path); + + Ok(Variable::new_unchecked(Index::Aux(self.aux.len() - 1))) + } + + fn alloc_input(&mut self, annotation: A, _f: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, { + let path = compute_path(&self.current_namespace, &annotation().into()); + self.inputs.push(path); + + Ok(Variable::new_unchecked(Index::Input(self.inputs.len() - 1))) + } + + fn enforce(&mut self, annotation: A, a: LA, b: LB, c: LC) + where + A: FnOnce() -> AR, + AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination, { + let path = compute_path(&self.current_namespace, &annotation().into()); + let index = self.constraints.len(); + self.set_named_obj(path.clone(), NamedObject::Constraint(index)); + + let a = a(LinearCombination::zero()); + let b = b(LinearCombination::zero()); + let c = c(LinearCombination::zero()); + + self.constraints.push((a, b, c, path)); + } + + fn push_namespace(&mut self, name_fn: N) + where + NR: Into, + N: FnOnce() -> NR, { + let name = name_fn().into(); + let path = compute_path(&self.current_namespace, &name); + self.set_named_obj(path, NamedObject::Namespace); + self.current_namespace.push(name); + } + + fn pop_namespace(&mut self) { + assert!(self.current_namespace.pop().is_some()); + } + + fn get_root(&mut self) -> &mut Self::Root { self } +} + +fn compute_path(ns: &[String], this: &str) -> String { + assert!(!this.contains('/'), "'/' is not allowed in names"); + + let mut name = String::new(); + + let mut needs_separation = false; + for ns in ns.iter().chain(Some(this.to_string()).iter()) { + if needs_separation { + name += "/"; + } + + name += ns; + needs_separation = true; + } + + name +} diff --git a/prover/src/circuit.rs b/prover/src/circuit.rs new file mode 100644 index 0000000..8976cc6 --- /dev/null +++ b/prover/src/circuit.rs @@ -0,0 +1,496 @@ +//! There are two augmented circuits: the primary and the secondary. +//! Each of them is over a curve in a 2-cycle of elliptic curves. +//! We have two running instances. Each circuit takes as input 2 hashes: one for +//! each of the running instances. Each of these hashes is H(params = H(shape, +//! ck), i, z0, zi, U). Each circuit folds the last invocation of the other into +//! the running instance + +use bellpepper::gadgets::{boolean_utils::conditionally_select_slice, Assignment}; +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, SynthesisError, +}; +use ff::Field; +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::{NIO_NOVA_FOLD, NUM_FE_WITHOUT_IO_FOR_CRHF, NUM_HASH_BITS}, + gadgets::{ + alloc_num_equals, alloc_scalar_as_base, alloc_zero, le_bits_to_num, AllocatedPoint, + AllocatedR1CSInstance, AllocatedRelaxedR1CSInstance, + }, + r1cs::{R1CSInstance, RelaxedR1CSInstance}, + supernova::StepCircuit, + traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, + Commitment, +}; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct NovaAugmentedCircuitParams { + limb_width: usize, + n_limbs: usize, + is_primary_circuit: bool, // A boolean indicating if this is the primary circuit +} + +impl NovaAugmentedCircuitParams { + pub const fn new(limb_width: usize, n_limbs: usize, is_primary_circuit: bool) -> Self { + Self { limb_width, n_limbs, is_primary_circuit } + } +} + +// NOTES: All these options here seem to point towards using a typestate pattern +// or something. + +#[derive(Debug, Serialize)] +#[serde(bound = "")] +pub struct NovaAugmentedCircuitInputs { + params: E::Scalar, + i: E::Base, + z0: Vec, + zi: Option>, + U: Option>, + u: Option>, + T: Option>, +} + +impl NovaAugmentedCircuitInputs { + /// Create new inputs/witness for the verification circuit + pub fn new( + params: E::Scalar, + i: E::Base, + z0: Vec, + zi: Option>, + U: Option>, + u: Option>, + T: Option>, + ) -> Self { + Self { params, i, z0, zi, U, u, T } + } +} + +/// The augmented circuit F' in Nova that includes a step circuit F +/// and the circuit for the verifier in Nova's non-interactive folding scheme +pub struct NovaAugmentedCircuit<'a, E: Engine, SC: StepCircuit> { + params: &'a NovaAugmentedCircuitParams, + ro_consts: ROConstantsCircuit, + inputs: Option>, + step_circuit: &'a SC, // The function that is applied for each step +} + +impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { + /// Create a new verification circuit for the input relaxed r1cs instances + pub const fn new( + params: &'a NovaAugmentedCircuitParams, + inputs: Option>, + step_circuit: &'a SC, + ro_consts: ROConstantsCircuit, + ) -> Self { + Self { params, inputs, step_circuit, ro_consts } + } + + /// Allocate all witnesses and return + fn alloc_witness::Base>>( + &self, + mut cs: CS, + arity: usize, + ) -> Result< + ( + AllocatedNum, + AllocatedNum, + Vec>, + Vec>, + AllocatedRelaxedR1CSInstance, + AllocatedR1CSInstance, + AllocatedPoint, + ), + SynthesisError, + > { + // Allocate the params + let params = alloc_scalar_as_base::( + cs.namespace(|| "params"), + self.inputs.as_ref().map(|inputs| inputs.params), + )?; + + // Allocate i + let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; + + // Allocate z0 + let z_0 = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || Ok(self.inputs.get()?.z0[i])) + }) + .collect::>, _>>()?; + + // Allocate zi. If inputs.zi is not provided (base case) allocate default value + // 0 + let zero = vec![E::Base::ZERO; arity]; + let z_i = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { + Ok(self.inputs.get()?.zi.as_ref().unwrap_or(&zero)[i]) + }) + }) + .collect::>, _>>()?; + + // Allocate the running instance + let U: AllocatedRelaxedR1CSInstance = AllocatedRelaxedR1CSInstance::alloc( + cs.namespace(|| "Allocate U"), + self.inputs.as_ref().and_then(|inputs| inputs.U.as_ref()), + self.params.limb_width, + self.params.n_limbs, + )?; + + // Allocate the instance to be folded in + let u = AllocatedR1CSInstance::alloc( + cs.namespace(|| "allocate instance u to fold"), + self.inputs.as_ref().and_then(|inputs| inputs.u.as_ref()), + )?; + + // Allocate T + let T = AllocatedPoint::alloc( + cs.namespace(|| "allocate T"), + self.inputs.as_ref().and_then(|inputs| inputs.T.map(|T| T.to_coordinates())), + )?; + T.check_on_curve(cs.namespace(|| "check T on curve"))?; + + Ok((params, i, z_0, z_i, U, u, T)) + } + + /// Synthesizes base case and returns the new relaxed `R1CSInstance` + fn synthesize_base_case::Base>>( + &self, + mut cs: CS, + u: AllocatedR1CSInstance, + ) -> Result, SynthesisError> { + let U_default: AllocatedRelaxedR1CSInstance = + if self.params.is_primary_circuit { + // The primary circuit just returns the default R1CS instance + AllocatedRelaxedR1CSInstance::default( + cs.namespace(|| "Allocate U_default"), + self.params.limb_width, + self.params.n_limbs, + )? + } else { + // The secondary circuit returns the incoming R1CS instance + AllocatedRelaxedR1CSInstance::from_r1cs_instance( + cs.namespace(|| "Allocate U_default"), + u, + self.params.limb_width, + self.params.n_limbs, + )? + }; + Ok(U_default) + } + + /// Synthesizes non base case and returns the new relaxed `R1CSInstance` + /// And a boolean indicating if all checks pass + fn synthesize_non_base_case::Base>>( + &self, + mut cs: CS, + params: &AllocatedNum, + i: &AllocatedNum, + z_0: &[AllocatedNum], + z_i: &[AllocatedNum], + U: &AllocatedRelaxedR1CSInstance, + u: &AllocatedR1CSInstance, + T: &AllocatedPoint, + arity: usize, + ) -> Result<(AllocatedRelaxedR1CSInstance, AllocatedBit), SynthesisError> { + // Check that u.x[0] = Hash(params, U, i, z0, zi) + let mut ro = E::ROCircuit::new(self.ro_consts.clone(), NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity); + ro.absorb(params); + ro.absorb(i); + for e in z_0 { + ro.absorb(e); + } + for e in z_i { + ro.absorb(e); + } + U.absorb_in_ro(cs.namespace(|| "absorb U"), &mut ro)?; + + let hash_bits = ro.squeeze(cs.namespace(|| "Input hash"), NUM_HASH_BITS)?; + let hash = le_bits_to_num(cs.namespace(|| "bits to hash"), &hash_bits)?; + let check_pass = alloc_num_equals( + cs.namespace(|| "check consistency of u.X[0] with H(params, U, i, z0, zi)"), + &u.X[0], + &hash, + )?; + + // Run NIFS Verifier + let U_fold = U.fold_with_r1cs( + cs.namespace(|| "compute fold of U and u"), + params, + u, + T, + self.ro_consts.clone(), + self.params.limb_width, + self.params.n_limbs, + )?; + + Ok((U_fold, check_pass)) + } +} + +impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { + /// synthesize circuit giving constraint system + pub fn synthesize::Base>>( + self, + cs: &mut CS, + ) -> Result>, SynthesisError> { + let arity = self.step_circuit.arity(); + + // Allocate all witnesses + let (params, i, z_0, z_i, U, u, T) = + self.alloc_witness(cs.namespace(|| "allocate the circuit witness"), arity)?; + + // Compute variable indicating if this is the base case + let zero = alloc_zero(cs.namespace(|| "zero")); + let is_base_case = alloc_num_equals(cs.namespace(|| "Check if base case"), &i.clone(), &zero)?; + + // Synthesize the circuit for the base case and get the new running instance + let Unew_base = self.synthesize_base_case(cs.namespace(|| "base case"), u.clone())?; + + // Synthesize the circuit for the non-base case and get the new running + // instance along with a boolean indicating if all checks have passed + let (Unew_non_base, check_non_base_pass) = self.synthesize_non_base_case( + cs.namespace(|| "synthesize non base case"), + ¶ms, + &i, + &z_0, + &z_i, + &U, + &u, + &T, + arity, + )?; + + // Either check_non_base_pass=true or we are in the base case + let should_be_false = AllocatedBit::nor( + cs.namespace(|| "check_non_base_pass nor base_case"), + &check_non_base_pass, + &is_base_case, + )?; + cs.enforce( + || "check_non_base_pass nor base_case = false", + |lc| lc + should_be_false.get_variable(), + |lc| lc + CS::one(), + |lc| lc, + ); + + // Compute the U_new + let Unew = Unew_base.conditionally_select( + cs.namespace(|| "compute U_new"), + &Unew_non_base, + &Boolean::from(is_base_case.clone()), + )?; + + // Compute i + 1 + let i_new = + AllocatedNum::alloc(cs.namespace(|| "i + 1"), || Ok(*i.get_value().get()? + E::Base::ONE))?; + cs.enforce( + || "check i + 1", + |lc| lc, + |lc| lc, + |lc| lc + i_new.get_variable() - CS::one() - i.get_variable(), + ); + + // Compute z_{i+1} + let z_input = conditionally_select_slice( + cs.namespace(|| "select input to F"), + &z_0, + &z_i, + &Boolean::from(is_base_case), + )?; + + // TODO: Note, I changed this here because I removed the other `StepCircuit` + // trait. + let (_pc, z_next) = self.step_circuit.synthesize(&mut cs.namespace(|| "F"), None, &z_input)?; + + if z_next.len() != arity { + return Err(SynthesisError::IncompatibleLengthVector("z_next".to_string())); + } + + // Compute the new hash H(params, Unew, i+1, z0, z_{i+1}) + let mut ro = E::ROCircuit::new(self.ro_consts, NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity); + ro.absorb(¶ms); + ro.absorb(&i_new); + for e in &z_0 { + ro.absorb(e); + } + for e in &z_next { + ro.absorb(e); + } + Unew.absorb_in_ro(cs.namespace(|| "absorb U_new"), &mut ro)?; + let hash_bits = ro.squeeze(cs.namespace(|| "output hash bits"), NUM_HASH_BITS)?; + let hash = le_bits_to_num(cs.namespace(|| "convert hash to num"), &hash_bits)?; + + // Outputs the computed hash and u.X[1] that corresponds to the hash of the + // other circuit + u.X[1].inputize(cs.namespace(|| "Output unmodified hash of the other circuit"))?; + hash.inputize(cs.namespace(|| "output new hash of this circuit"))?; + + Ok(z_next) + } +} + +// #[cfg(test)] +// mod tests { +// use expect_test::{expect, Expect}; + +// use super::*; +// use crate::{ +// bellpepper::{ +// r1cs::{NovaShape, NovaWitness}, +// solver::SatisfyingAssignment, +// test_shape_cs::TestShapeCS, +// }, +// constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, +// gadgets::scalar_as_base, +// provider::{ +// poseidon::PoseidonConstantsCircuit, Bn256EngineKZG, +// GrumpkinEngine, PallasEngine, Secp256k1Engine, Secq256k1Engine, +// VestaEngine, }, +// traits::{snark::default_ck_hint, CurveCycleEquipped, Dual}, +// }; + +// // In the following we use 1 to refer to the primary, and 2 to refer to +// the // secondary circuit +// fn test_recursive_circuit_with( +// primary_params: &NovaAugmentedCircuitParams, +// secondary_params: &NovaAugmentedCircuitParams, +// ro_consts1: ROConstantsCircuit>, +// ro_consts2: ROConstantsCircuit, +// expected_num_constraints_primary: &Expect, +// expected_num_constraints_secondary: &Expect, +// ) where +// E1: CurveCycleEquipped, +// { +// let tc1 = TrivialCircuit::default(); +// // Initialize the shape and ck for the primary +// let circuit1: NovaAugmentedCircuit< +// '_, +// Dual, +// TrivialCircuit< as Engine>::Base>, +// > = NovaAugmentedCircuit::new(primary_params, None, &tc1, +// > ro_consts1.clone()); +// let mut cs: TestShapeCS = TestShapeCS::new(); +// let _ = circuit1.synthesize(&mut cs); +// let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); + +// expected_num_constraints_primary.assert_eq(&cs.num_constraints(). +// to_string()); + +// let tc2 = TrivialCircuit::default(); +// // Initialize the shape and ck for the secondary +// let circuit2: NovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(secondary_params, +// None, &tc2, ro_consts2.clone()); let mut cs: TestShapeCS> = +// TestShapeCS::new(); let _ = circuit2.synthesize(&mut cs); +// let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); + +// expected_num_constraints_secondary.assert_eq(&cs.num_constraints(). +// to_string()); + +// // Execute the base case for the primary +// let zero1 = < as Engine>::Base as Field>::ZERO; +// let mut cs1 = SatisfyingAssignment::::new(); +// let inputs1: NovaAugmentedCircuitInputs> = +// NovaAugmentedCircuitInputs::new( scalar_as_base::(zero1), // +// pass zero for testing zero1, +// vec![zero1], +// None, +// None, +// None, +// None, +// ); +// let circuit1: NovaAugmentedCircuit< +// '_, +// Dual, +// TrivialCircuit< as Engine>::Base>, +// > = NovaAugmentedCircuit::new(primary_params, Some(inputs1), &tc1, +// > ro_consts1); +// let _ = circuit1.synthesize(&mut cs1); +// let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, +// &ck1).unwrap(); // Make sure that this is satisfiable +// shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); + +// // Execute the base case for the secondary +// let zero2 = <::Base as Field>::ZERO; +// let mut cs2 = SatisfyingAssignment::>::new(); +// let inputs2: NovaAugmentedCircuitInputs = +// NovaAugmentedCircuitInputs::new( +// scalar_as_base::>(zero2), // pass zero for testing +// zero2, vec![zero2], +// None, +// None, +// Some(inst1), +// None, +// ); +// let circuit2: NovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(secondary_params, +// Some(inputs2), &tc2, ro_consts2); let _ = circuit2.synthesize(&mut +// cs2); let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, +// &ck2).unwrap(); // Make sure that it is satisfiable +// shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); +// } + +// #[test] +// fn test_recursive_circuit_pasta() { +// // this test checks against values that must be replicated in +// benchmarks if // changed here +// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, +// BN_N_LIMBS, true); let params2 = +// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); +// let ro_consts1: ROConstantsCircuit = +// PoseidonConstantsCircuit::default(); let ro_consts2: +// ROConstantsCircuit = PoseidonConstantsCircuit::default(); + +// test_recursive_circuit_with::( +// ¶ms1, +// ¶ms2, +// ro_consts1, +// ro_consts2, +// &expect!["9817"], +// &expect!["10349"], +// ); +// } + +// #[test] +// fn test_recursive_circuit_bn256_grumpkin() { +// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, +// BN_N_LIMBS, true); let params2 = +// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); +// let ro_consts1: ROConstantsCircuit = +// PoseidonConstantsCircuit::default(); let ro_consts2: +// ROConstantsCircuit = PoseidonConstantsCircuit::default(); + +// test_recursive_circuit_with::( +// ¶ms1, +// ¶ms2, +// ro_consts1, +// ro_consts2, +// &expect!["9985"], +// &expect!["10538"], +// ); +// } + +// #[test] +// fn test_recursive_circuit_secpq() { +// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, +// BN_N_LIMBS, true); let params2 = +// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); +// let ro_consts1: ROConstantsCircuit = +// PoseidonConstantsCircuit::default(); let ro_consts2: +// ROConstantsCircuit = PoseidonConstantsCircuit::default(); + +// test_recursive_circuit_with::( +// ¶ms1, +// ¶ms2, +// ro_consts1, +// ro_consts2, +// &expect!["10264"], +// &expect!["10961"], +// ); +// } +// } diff --git a/src/constants.rs b/prover/src/constants.rs similarity index 100% rename from src/constants.rs rename to prover/src/constants.rs diff --git a/prover/src/cyclefold/circuit.rs b/prover/src/cyclefold/circuit.rs new file mode 100644 index 0000000..9d4e7d3 --- /dev/null +++ b/prover/src/cyclefold/circuit.rs @@ -0,0 +1,256 @@ +//! This module defines Cyclefold circuit + +use bellpepper::gadgets::boolean_utils::conditionally_select; +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + ConstraintSystem, SynthesisError, +}; +use ff::Field; +use neptune::{circuit2::poseidon_hash_allocated, poseidon::PoseidonConstants}; + +use crate::{ + constants::NUM_CHALLENGE_BITS, + gadgets::{alloc_zero, le_bits_to_num, AllocatedPoint}, + traits::{commitment::CommitmentTrait, Engine}, + Commitment, +}; + +/// A structure containing the CycleFold circuit inputs and implementing the +/// synthesize function +pub struct CycleFoldCircuit { + commit_1: Option>, + commit_2: Option>, + scalar: Option<[bool; NUM_CHALLENGE_BITS]>, + poseidon_constants: PoseidonConstants, +} + +impl Default for CycleFoldCircuit { + fn default() -> Self { + let poseidon_constants = PoseidonConstants::new(); + Self { commit_1: None, commit_2: None, scalar: None, poseidon_constants } + } +} +impl CycleFoldCircuit { + /// Create a new CycleFold circuit with the given inputs + pub fn new( + commit_1: Option>, + commit_2: Option>, + scalar: Option<[bool; NUM_CHALLENGE_BITS]>, + ) -> Self { + let poseidon_constants = PoseidonConstants::new(); + Self { commit_1, commit_2, scalar, poseidon_constants } + } + + fn alloc_witness::Base>>( + &self, + mut cs: CS, + ) -> Result< + ( + AllocatedPoint, // commit_1 + AllocatedPoint, // commit_2 + Vec, // scalar + ), + SynthesisError, + > { + let commit_1 = AllocatedPoint::alloc( + cs.namespace(|| "allocate C_1"), + self.commit_1.map(|C_1| C_1.to_coordinates()), + )?; + commit_1.check_on_curve(cs.namespace(|| "commit_1 on curve"))?; + + let commit_2 = AllocatedPoint::alloc( + cs.namespace(|| "allocate C_2"), + self.commit_2.map(|C_2| C_2.to_coordinates()), + )?; + commit_2.check_on_curve(cs.namespace(|| "commit_2 on curve"))?; + + let scalar: Vec = self + .scalar + .unwrap_or([false; NUM_CHALLENGE_BITS]) + .into_iter() + .enumerate() + .map(|(idx, bit)| { + AllocatedBit::alloc(cs.namespace(|| format!("scalar bit {idx}")), Some(bit)) + }) + .collect::, _>>()?; + + Ok((commit_1, commit_2, scalar)) + } + + /// Synthesize the CycleFold circuit + pub fn synthesize::Base>>( + &self, + mut cs: CS, + ) -> Result<(), SynthesisError> { + let (C_1, C_2, r) = self.alloc_witness(cs.namespace(|| "allocate circuit witness"))?; + + // Calculate C_final + let r_C_2 = C_2.scalar_mul(cs.namespace(|| "r * C_2"), &r)?; + + let C_final = C_1.add(cs.namespace(|| "C_1 + r * C_2"), &r_C_2)?; + + self.inputize_point(&C_1, cs.namespace(|| "inputize C_1"))?; + self.inputize_point(&C_2, cs.namespace(|| "inputize C_2"))?; + self.inputize_point(&C_final, cs.namespace(|| "inputize C_final"))?; + + let scalar = le_bits_to_num(cs.namespace(|| "get scalar"), &r)?; + + scalar.inputize(cs.namespace(|| "scalar"))?; + + Ok(()) + } + + // Represent the point in the public IO as its 2-ary Poseidon hash + fn inputize_point( + &self, + point: &AllocatedPoint, + mut cs: CS, + ) -> Result<(), SynthesisError> + where + E: Engine, + CS: ConstraintSystem, + { + let (x, y, is_infinity) = point.get_coordinates(); + let preimage = vec![x.clone(), y.clone()]; + let val = + poseidon_hash_allocated(cs.namespace(|| "hash point"), preimage, &self.poseidon_constants)?; + + let zero = alloc_zero(cs.namespace(|| "zero")); + + let is_infinity_bit = AllocatedBit::alloc( + cs.namespace(|| "is_infinity"), + Some(is_infinity.get_value().unwrap_or(E::Base::ONE) == E::Base::ONE), + )?; + + cs.enforce( + || "infinity_bit matches", + |lc| lc, + |lc| lc, + |lc| lc + is_infinity_bit.get_variable() - is_infinity.get_variable(), + ); + + // Output 0 when it is the point at infinity + let output = conditionally_select( + cs.namespace(|| "select output"), + &zero, + &val, + &Boolean::from(is_infinity_bit), + )?; + + output.inputize(cs.namespace(|| "inputize hash"))?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use expect_test::{expect, Expect}; + use ff::{Field, PrimeField, PrimeFieldBits}; + use neptune::Poseidon; + use rand_core::OsRng; + + use super::*; + use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + constants::NIO_CYCLE_FOLD, + gadgets::scalar_as_base, + provider::Bn256EngineKZG, + traits::{commitment::CommitmentEngineTrait, snark::default_ck_hint, CurveCycleEquipped, Dual}, + }; + + fn test_cyclefold_circuit_size_with(expected_constraints: &Expect, expected_vars: &Expect) + where E1: CurveCycleEquipped { + // Instantiate the circuit with trivial inputs + let circuit: CycleFoldCircuit> = CycleFoldCircuit::default(); + + // Synthesize the R1CS shape + let mut cs: ShapeCS = ShapeCS::new(); + let _ = circuit.synthesize(cs.namespace(|| "synthesizing shape")); + + // Extract the number of constraints and variables + let num_constraints = cs.num_constraints(); + let num_variables = cs.num_aux(); + let num_io = cs.num_inputs(); + + // Check the number of constraints and variables match the expected values + expected_constraints.assert_eq(&num_constraints.to_string()); + expected_vars.assert_eq(&num_variables.to_string()); + assert_eq!(num_io, NIO_CYCLE_FOLD + 1); // includes 1 + } + + #[test] + fn test_cyclefold_circuit_size() { + test_cyclefold_circuit_size_with::(&expect!("2090"), &expect!("2081")); + } + + fn test_cyclefold_circuit_sat_with() { + let rng = OsRng; + + let ck = < as Engine>::CE as CommitmentEngineTrait>>::setup(b"test", 5); + + // Generate random vectors to commit to + let v1 = + (0..5).map(|_| < as Engine>::Scalar as Field>::random(rng)).collect::>(); + let v2 = + (0..5).map(|_| < as Engine>::Scalar as Field>::random(rng)).collect::>(); + + // Calculate the random commitments + let C_1 = < as Engine>::CE as CommitmentEngineTrait>>::commit(&ck, &v1); + let C_2 = < as Engine>::CE as CommitmentEngineTrait>>::commit(&ck, &v2); + + // Generate a random scalar + let val: u128 = rand::random(); + let r = < as Engine>::Scalar as PrimeField>::from_u128(val); + let r_bits = r.to_le_bits().into_iter().take(128).collect::>().try_into().unwrap(); + + let circuit: CycleFoldCircuit> = + CycleFoldCircuit::new(Some(C_1), Some(C_2), Some(r_bits)); + + // Calculate the result out of circuit + let native_result = C_1 + C_2 * r; + + // Generate the R1CS shape and commitment key + let mut cs: ShapeCS = ShapeCS::new(); + let _ = circuit.synthesize(cs.namespace(|| "synthesizing shape")); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + // Synthesize the R1CS circuit on the random inputs + let mut cs = SatisfyingAssignment::::new(); + circuit.synthesize(cs.namespace(|| "synthesizing witness")).unwrap(); + + let (instance, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + let X = &instance.X; + + // Helper functio to calculate the hash + let compute_hash = |P: Commitment>| -> E::Scalar { + let (x, y, is_infinity) = P.to_coordinates(); + if is_infinity { + return E::Scalar::ZERO; + } + + let mut hasher = Poseidon::new_with_preimage(&[x, y], &circuit.poseidon_constants); + + hasher.hash() + }; + + // Check the circuit calculates the right thing + let hash_1 = compute_hash(C_1); + assert_eq!(hash_1, X[0]); + let hash_2 = compute_hash(C_2); + assert_eq!(hash_2, X[1]); + let hash_res = compute_hash(native_result); + assert_eq!(hash_res, X[2]); + assert_eq!(r, scalar_as_base::(X[3])); + + // Check the R1CS equation is satisfied + shape.is_sat(&ck, &instance, &witness).unwrap(); + } + + #[test] + fn test_cyclefold_circuit_sat() { test_cyclefold_circuit_sat_with::(); } +} diff --git a/prover/src/cyclefold/gadgets.rs b/prover/src/cyclefold/gadgets.rs new file mode 100644 index 0000000..58dafbc --- /dev/null +++ b/prover/src/cyclefold/gadgets.rs @@ -0,0 +1,632 @@ +//! This module defines many of the gadgets needed in the primary folding +//! circuit + +use bellpepper::gadgets::Assignment; +use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; +use ff::Field; +use itertools::Itertools; + +use super::util::FoldingData; +use crate::{ + constants::{BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS}, + gadgets::{ + alloc_bignat_constant, f_to_nat, le_bits_to_num, AllocatedPoint, AllocatedRelaxedR1CSInstance, + BigNat, Num, + }, + r1cs::R1CSInstance, + traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, +}; + +// An allocated version of the R1CS instance obtained from a single cyclefold +// invocation +pub struct AllocatedCycleFoldInstance { + W: AllocatedPoint, + X: [BigNat; NIO_CYCLE_FOLD], +} + +impl AllocatedCycleFoldInstance { + pub fn alloc>( + mut cs: CS, + inst: Option<&R1CSInstance>, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let W = AllocatedPoint::alloc( + cs.namespace(|| "allocate W"), + inst.map(|u| u.comm_W.to_coordinates()), + )?; + W.check_on_curve(cs.namespace(|| "check W on curve"))?; + + if let Some(inst) = inst { + if inst.X.len() != NIO_CYCLE_FOLD { + return Err(SynthesisError::IncompatibleLengthVector(String::from( + "R1CS instance has wrong arity", + ))); + } + } + + let X: [BigNat; NIO_CYCLE_FOLD] = (0..NIO_CYCLE_FOLD) + .map(|idx| { + BigNat::alloc_from_nat( + cs.namespace(|| format!("allocating IO {idx}")), + || Ok(f_to_nat(inst.map_or(&E::Scalar::ZERO, |inst| &inst.X[idx]))), + limb_width, + n_limbs, + ) + }) + .collect::, _>>()? + .try_into() + .map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {NIO_CYCLE_FOLD}", err.len())) + })?; + + Ok(Self { W, X }) + } + + pub fn absorb_in_ro( + &self, + mut cs: CS, + ro: &mut impl ROCircuitTrait, + ) -> Result<(), SynthesisError> + where + CS: ConstraintSystem, + { + ro.absorb(&self.W.x); + ro.absorb(&self.W.y); + ro.absorb(&self.W.is_infinity); + self.X.iter().enumerate().try_for_each(|(io_idx, x)| -> Result<(), SynthesisError> { + x.as_limbs().iter().enumerate().try_for_each( + |(limb_idx, limb)| -> Result<(), SynthesisError> { + ro.absorb(&limb.as_allocated_num( + cs.namespace(|| format!("convert limb {limb_idx} of X[{io_idx}] to num")), + )?); + Ok(()) + }, + ) + })?; + + Ok(()) + } +} + +/// An circuit allocated version of the `FoldingData` coming from a CycleFold +/// invocation. +pub struct AllocatedCycleFoldData { + pub U: AllocatedRelaxedR1CSInstance, + pub u: AllocatedCycleFoldInstance, + pub T: AllocatedPoint, +} + +impl AllocatedCycleFoldData { + pub fn alloc>( + mut cs: CS, + inst: Option<&FoldingData>, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let U = AllocatedRelaxedR1CSInstance::alloc( + cs.namespace(|| "U"), + inst.map(|x| &x.U), + limb_width, + n_limbs, + )?; + + let u = AllocatedCycleFoldInstance::alloc( + cs.namespace(|| "u"), + inst.map(|x| &x.u), + limb_width, + n_limbs, + )?; + + let T = AllocatedPoint::alloc(cs.namespace(|| "T"), inst.map(|x| x.T.to_coordinates()))?; + T.check_on_curve(cs.namespace(|| "T on curve"))?; + + Ok(Self { U, u, T }) + } + + /// The NIFS verifier which folds the CycleFold instance into a running + /// relaxed R1CS instance. + pub fn apply_fold( + &self, + mut cs: CS, + params: &AllocatedNum, + ro_consts: ROConstantsCircuit, + limb_width: usize, + n_limbs: usize, + ) -> Result, SynthesisError> + where + CS: ConstraintSystem, + { + // Compute r: + let mut ro = E::ROCircuit::new( + ro_consts, + 1 + (3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS) + (3 + NIO_CYCLE_FOLD * BN_N_LIMBS) + 3, /* digest + (U) + (u) + T */ + ); + ro.absorb(params); + + self.U.absorb_in_ro(cs.namespace(|| "absorb cyclefold running instance"), &mut ro)?; + // running instance `U` does not need to absorbed since u.X[0] = Hash(params, U, + // i, z0, zi) + self.u.absorb_in_ro(cs.namespace(|| "absorb cyclefold instance"), &mut ro)?; + + ro.absorb(&self.T.x); + ro.absorb(&self.T.y); + ro.absorb(&self.T.is_infinity); + let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; + let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; + + // W_fold = self.W + r * u.W + let rW = self.u.W.scalar_mul(cs.namespace(|| "r * u.W"), &r_bits)?; + let W_fold = self.U.W.add(cs.namespace(|| "self.W + r * u.W"), &rW)?; + + // E_fold = self.E + r * T + let rT = self.T.scalar_mul(cs.namespace(|| "r * T"), &r_bits)?; + let E_fold = self.U.E.add(cs.namespace(|| "self.E + r * T"), &rT)?; + + // u_fold = u_r + r + let u_fold = AllocatedNum::alloc(cs.namespace(|| "u_fold"), || { + Ok(*self.U.u.get_value().get()? + r.get_value().get()?) + })?; + cs.enforce( + || "Check u_fold", + |lc| lc, + |lc| lc, + |lc| lc + u_fold.get_variable() - self.U.u.get_variable() - r.get_variable(), + ); + + // Fold the IO: + // Analyze r into limbs + let r_bn = + BigNat::from_num(cs.namespace(|| "allocate r_bn"), &Num::from(r), limb_width, n_limbs)?; + + // Allocate the order of the non-native field as a constant + let m_bn = alloc_bignat_constant( + cs.namespace(|| "alloc m"), + &E::GE::group_params().2, + limb_width, + n_limbs, + )?; + + let mut X_fold = vec![]; + + // Calculate the + for (idx, (X, x)) in self.U.X.iter().zip_eq(self.u.X.iter()).enumerate() { + let (_, r) = x.mult_mod(cs.namespace(|| format!("r*u.X[{idx}]")), &r_bn, &m_bn)?; + let r_new = X.add(&r)?; + let X_i_fold = r_new.red_mod(cs.namespace(|| format!("reduce folded X[{idx}]")), &m_bn)?; + X_fold.push(X_i_fold); + } + + let X_fold = X_fold.try_into().map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {NIO_CYCLE_FOLD}", err.len())) + })?; + + Ok(AllocatedRelaxedR1CSInstance { W: W_fold, E: E_fold, u: u_fold, X: X_fold }) + } +} + +pub mod emulated { + use bellpepper::gadgets::{boolean_utils::conditionally_select, Assignment}; + use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, SynthesisError, + }; + use ff::Field; + + use super::FoldingData; + use crate::{ + constants::{NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT}, + gadgets::{ + alloc_bignat_constant, alloc_zero, conditionally_select_allocated_bit, + conditionally_select_bignat, f_to_nat, le_bits_to_num, BigNat, + }, + traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, + RelaxedR1CSInstance, + }; + + /// An allocated version of a curve point from the non-native curve + #[derive(Clone)] + pub struct AllocatedEmulPoint + where G: Group { + pub x: BigNat, + pub y: BigNat, + pub is_infinity: AllocatedBit, + } + + impl AllocatedEmulPoint + where G: Group + { + pub fn alloc( + mut cs: CS, + coords: Option<(G::Scalar, G::Scalar, bool)>, + limb_width: usize, + n_limbs: usize, + ) -> Result + where + CS: ConstraintSystem<::Base>, + { + let x = BigNat::alloc_from_nat( + cs.namespace(|| "x"), + || Ok(f_to_nat(&coords.map_or(::ZERO, |val| val.0))), + limb_width, + n_limbs, + )?; + + let y = BigNat::alloc_from_nat( + cs.namespace(|| "y"), + || Ok(f_to_nat(&coords.map_or(::ZERO, |val| val.1))), + limb_width, + n_limbs, + )?; + + let is_infinity = AllocatedBit::alloc( + cs.namespace(|| "alloc is_infinity"), + coords.map_or(Some(true), |(_, _, is_infinity)| Some(is_infinity)), + )?; + + Ok(Self { x, y, is_infinity }) + } + + pub fn absorb_in_ro( + &self, + mut cs: CS, + ro: &mut impl ROCircuitTrait, + ) -> Result<(), SynthesisError> + where + CS: ConstraintSystem, + { + let x_bn = self + .x + .as_limbs() + .iter() + .enumerate() + .map(|(i, limb)| { + limb.as_allocated_num(cs.namespace(|| format!("convert limb {i} of x to num"))) + }) + .collect::>, _>>()?; + + for limb in x_bn { + ro.absorb(&limb) + } + + let y_bn = self + .y + .as_limbs() + .iter() + .enumerate() + .map(|(i, limb)| { + limb.as_allocated_num(cs.namespace(|| format!("convert limb {i} of y to num"))) + }) + .collect::>, _>>()?; + + for limb in y_bn { + ro.absorb(&limb) + } + + let is_infinity_num: AllocatedNum = + AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { + self.is_infinity.get_value().map_or(Err(SynthesisError::AssignmentMissing), |bit| { + if bit { + Ok(G::Base::ONE) + } else { + Ok(G::Base::ZERO) + } + }) + })?; + + cs.enforce( + || "constrain num equals bit", + |lc| lc, + |lc| lc, + |lc| lc + is_infinity_num.get_variable() - self.is_infinity.get_variable(), + ); + + ro.absorb(&is_infinity_num); + + Ok(()) + } + + fn conditionally_select>( + &self, + mut cs: CS, + other: &Self, + condition: &Boolean, + ) -> Result { + let x = conditionally_select_bignat( + cs.namespace(|| "x = cond ? self.x : other.x"), + &self.x, + &other.x, + condition, + )?; + + let y = conditionally_select_bignat( + cs.namespace(|| "y = cond ? self.y : other.y"), + &self.y, + &other.y, + condition, + )?; + + let is_infinity = conditionally_select_allocated_bit( + cs.namespace(|| "is_infinity = cond ? self.is_infinity : other.is_infinity"), + &self.is_infinity, + &other.is_infinity, + condition, + )?; + + Ok(Self { x, y, is_infinity }) + } + + pub fn default>( + mut cs: CS, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let x = alloc_bignat_constant( + cs.namespace(|| "allocate x_default = 0"), + &f_to_nat(&G::Base::ZERO), + limb_width, + n_limbs, + )?; + let y = alloc_bignat_constant( + cs.namespace(|| "allocate y_default = 0"), + &f_to_nat(&G::Base::ZERO), + limb_width, + n_limbs, + )?; + + let is_infinity = AllocatedBit::alloc(cs.namespace(|| "allocate is_infinity"), Some(true))?; + cs.enforce( + || "is_infinity = 1", + |lc| lc, + |lc| lc, + |lc| lc + CS::one() - is_infinity.get_variable(), + ); + + Ok(Self { x, y, is_infinity }) + } + } + + /// A non-native circuit version of a `RelaxedR1CSInstance`. This is used + /// for the in-circuit representation of the primary running instance + pub struct AllocatedEmulRelaxedR1CSInstance { + pub comm_W: AllocatedEmulPoint, + pub comm_E: AllocatedEmulPoint, + u: AllocatedNum, + x0: AllocatedNum, + x1: AllocatedNum, + } + + impl AllocatedEmulRelaxedR1CSInstance + where E: Engine + { + pub fn alloc>( + mut cs: CS, + inst: Option<&RelaxedR1CSInstance>, + limb_width: usize, + n_limbs: usize, + ) -> Result + where + CS: ConstraintSystem<::Base>, + { + let comm_W = AllocatedEmulPoint::alloc( + cs.namespace(|| "allocate comm_W"), + inst.map(|x| x.comm_W.to_coordinates()), + limb_width, + n_limbs, + )?; + + let comm_E = AllocatedEmulPoint::alloc( + cs.namespace(|| "allocate comm_E"), + inst.map(|x| x.comm_E.to_coordinates()), + limb_width, + n_limbs, + )?; + + let u = AllocatedNum::alloc(cs.namespace(|| "allocate u"), || { + inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u)) + })?; + + let x0 = AllocatedNum::alloc(cs.namespace(|| "allocate x0"), || { + inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.X[0])) + })?; + + let x1 = AllocatedNum::alloc(cs.namespace(|| "allocate x1"), || { + inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.X[1])) + })?; + + Ok(Self { comm_W, comm_E, u, x0, x1 }) + } + + /// Performs a folding of a primary R1CS instance (`u_W`, `u_x0`, + /// `u_x1`) into a running `AllocatedEmulRelaxedR1CSInstance` + /// As the curve operations are performed in the CycleFold circuit and + /// provided to the primary circuit as non-deterministic advice, + /// this folding simply sets those values as the new witness and + /// error vector commitments. + pub fn fold_with_r1cs::Base>>( + &self, + mut cs: CS, + pp_digest: &AllocatedNum, + W_new: AllocatedEmulPoint, + E_new: AllocatedEmulPoint, + u_W: &AllocatedEmulPoint, + u_x0: &AllocatedNum, + u_x1: &AllocatedNum, + comm_T: &AllocatedEmulPoint, + ro_consts: ROConstantsCircuit, + ) -> Result { + let mut ro = E::ROCircuit::new( + ro_consts, + 1 + NUM_FE_IN_EMULATED_POINT + 2 + NUM_FE_IN_EMULATED_POINT, /* pp_digest + u.W + u.x + + * comm_T */ + ); + ro.absorb(pp_digest); + + // Absorb u + // Absorb the witness + u_W.absorb_in_ro(cs.namespace(|| "absorb u_W"), &mut ro)?; + // Absorb public IO + ro.absorb(u_x0); + ro.absorb(u_x1); + + // Absorb comm_T + comm_T.absorb_in_ro(cs.namespace(|| "absorb comm_T"), &mut ro)?; + + let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; + let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; + + let u_fold = self.u.add(cs.namespace(|| "u_fold = u + r"), &r)?; + let x0_fold = AllocatedNum::alloc(cs.namespace(|| "x0"), || { + Ok(*self.x0.get_value().get()? + *r.get_value().get()? * *u_x0.get_value().get()?) + })?; + cs.enforce( + || "x0_fold = x0 + r * u_x0", + |lc| lc + r.get_variable(), + |lc| lc + u_x0.get_variable(), + |lc| lc + x0_fold.get_variable() - self.x0.get_variable(), + ); + + let x1_fold = AllocatedNum::alloc(cs.namespace(|| "x1"), || { + Ok(*self.x1.get_value().get()? + *r.get_value().get()? * *u_x1.get_value().get()?) + })?; + cs.enforce( + || "x1_fold = x1 + r * u_x1", + |lc| lc + r.get_variable(), + |lc| lc + u_x1.get_variable(), + |lc| lc + x1_fold.get_variable() - self.x1.get_variable(), + ); + + Ok(Self { comm_W: W_new, comm_E: E_new, u: u_fold, x0: x0_fold, x1: x1_fold }) + } + + pub fn absorb_in_ro( + &self, + mut cs: CS, + ro: &mut impl ROCircuitTrait, + ) -> Result<(), SynthesisError> + where + CS: ConstraintSystem<::Base>, + { + self.comm_W.absorb_in_ro(cs.namespace(|| "absorb comm_W"), ro)?; + self.comm_E.absorb_in_ro(cs.namespace(|| "absorb comm_E"), ro)?; + + ro.absorb(&self.u); + ro.absorb(&self.x0); + ro.absorb(&self.x1); + + Ok(()) + } + + pub fn conditionally_select::Base>>( + &self, + mut cs: CS, + other: &Self, + condition: &Boolean, + ) -> Result { + let comm_W = self.comm_W.conditionally_select( + cs.namespace(|| "comm_W = cond ? self.comm_W : other.comm_W"), + &other.comm_W, + condition, + )?; + + let comm_E = self.comm_E.conditionally_select( + cs.namespace(|| "comm_E = cond? self.comm_E : other.comm_E"), + &other.comm_E, + condition, + )?; + + let u = conditionally_select( + cs.namespace(|| "u = cond ? self.u : other.u"), + &self.u, + &other.u, + condition, + )?; + + let x0 = conditionally_select( + cs.namespace(|| "x0 = cond ? self.x0 : other.x0"), + &self.x0, + &other.x0, + condition, + )?; + + let x1 = conditionally_select( + cs.namespace(|| "x1 = cond ? self.x1 : other.x1"), + &self.x1, + &other.x1, + condition, + )?; + + Ok(Self { comm_W, comm_E, u, x0, x1 }) + } + + pub fn default::Base>>( + mut cs: CS, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let comm_W = + AllocatedEmulPoint::default(cs.namespace(|| "default comm_W"), limb_width, n_limbs)?; + let comm_E = comm_W.clone(); + + let u = alloc_zero(cs.namespace(|| "u = 0")); + + let x0 = u.clone(); + let x1 = u.clone(); + + Ok(Self { comm_W, comm_E, u, x0, x1 }) + } + } + + /// The in-circuit representation of the primary folding data. + pub struct AllocatedFoldingData { + pub U: AllocatedEmulRelaxedR1CSInstance, + pub u_W: AllocatedEmulPoint, + pub u_x0: AllocatedNum, + pub u_x1: AllocatedNum, + pub T: AllocatedEmulPoint, + } + + impl AllocatedFoldingData { + pub fn alloc>( + mut cs: CS, + inst: Option<&FoldingData>, + limb_width: usize, + n_limbs: usize, + ) -> Result + where + CS: ConstraintSystem<::Base>, + { + let U = AllocatedEmulRelaxedR1CSInstance::alloc( + cs.namespace(|| "allocate U"), + inst.map(|x| &x.U), + limb_width, + n_limbs, + )?; + + let u_W = AllocatedEmulPoint::alloc( + cs.namespace(|| "allocate u_W"), + inst.map(|x| x.u.comm_W.to_coordinates()), + limb_width, + n_limbs, + )?; + + let u_x0 = AllocatedNum::alloc(cs.namespace(|| "allocate u_x0"), || { + inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u.X[0])) + })?; + + let u_x1 = AllocatedNum::alloc(cs.namespace(|| "allocate u_x1"), || { + inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u.X[1])) + })?; + + let T = AllocatedEmulPoint::alloc( + cs.namespace(|| "allocate T"), + inst.map(|x| x.T.to_coordinates()), + limb_width, + n_limbs, + )?; + + Ok(Self { U, u_W, u_x0, u_x1, T }) + } + } +} diff --git a/src/cyclefold/mod.rs b/prover/src/cyclefold/mod.rs similarity index 100% rename from src/cyclefold/mod.rs rename to prover/src/cyclefold/mod.rs diff --git a/prover/src/cyclefold/nifs.rs b/prover/src/cyclefold/nifs.rs new file mode 100644 index 0000000..95e008c --- /dev/null +++ b/prover/src/cyclefold/nifs.rs @@ -0,0 +1,141 @@ +//! This module defines the needed wrong-field NIFS prover + +use std::marker::PhantomData; + +use super::util::{absorb_cyclefold_r1cs, absorb_primary_commitment, absorb_primary_r1cs}; +use crate::{ + constants::{NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT}, + errors::NovaError, + gadgets::scalar_as_base, + r1cs::{R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness}, + traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROConstants, ROTrait}, + CommitmentKey, CompressedCommitment, +}; + +/// A SNARK that holds the proof of a step of an incremental computation of the +/// primary circuit in the CycleFold folding scheme. +/// The difference of this folding scheme from the Nova NIFS in `src/nifs.rs` is +/// that this +#[derive(Debug)] +pub struct PrimaryNIFS +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, { + pub(crate) comm_T: CompressedCommitment, + _p: PhantomData, +} + +impl PrimaryNIFS +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, +{ + /// Takes a relaxed R1CS instance-witness pair (U1, W1) and an R1CS + /// instance-witness pair (U2, W2) and folds them into a new relaxed + /// R1CS instance-witness pair (U, W) and a commitment to the cross term + /// T. It also provides the challenge r used to fold the instances. + pub fn prove( + ck: &CommitmentKey, + ro_consts: &ROConstants, + pp_digest: &E1::Scalar, + S: &R1CSShape, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) -> Result<(Self, (RelaxedR1CSInstance, RelaxedR1CSWitness), E1::Scalar), NovaError> { + let arity = U1.X.len(); + + if arity != U2.X.len() { + return Err(NovaError::InvalidInputLength); + } + + let mut ro = E2::RO::new( + ro_consts.clone(), + 1 + NUM_FE_IN_EMULATED_POINT + arity + NUM_FE_IN_EMULATED_POINT, /* pp_digest + u.W + * + u.X + T */ + ); + + ro.absorb(*pp_digest); + + absorb_primary_r1cs::(U2, &mut ro); + + let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; + + absorb_primary_commitment::(&comm_T, &mut ro); + + let r = scalar_as_base::(ro.squeeze(NUM_CHALLENGE_BITS)); + + let U = U1.fold(U2, &comm_T, &r); + + let W = W1.fold(W2, &T, &r)?; + + Ok((Self { comm_T: comm_T.compress(), _p: PhantomData }, (U, W), r)) + } +} + +/// A SNARK that holds the proof of a step of an incremental computation of the +/// CycleFold circuit The difference of this folding scheme from the Nova NIFS +/// in `src/nifs.rs` is that this folding prover and verifier must fold in the +/// `RelaxedR1CSInstance` accumulator because the optimization in the +#[derive(Debug)] +pub struct CycleFoldNIFS { + pub(crate) comm_T: CompressedCommitment, +} + +impl CycleFoldNIFS { + /// Folds an R1CS instance/witness pair (U2, W2) into a relaxed R1CS + /// instance/witness (U1, W1) returning the new folded accumulator and a + /// commitment to the cross-term. + pub fn prove( + ck: &CommitmentKey, + ro_consts: &ROConstants, + pp_digest: &E::Scalar, + S: &R1CSShape, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) -> Result<(Self, (RelaxedR1CSInstance, RelaxedR1CSWitness)), NovaError> { + // Check `U1` and `U2` have the same arity + if U2.X.len() != NIO_CYCLE_FOLD || U1.X.len() != NIO_CYCLE_FOLD { + return Err(NovaError::InvalidInputLength); + } + + // initialize a new RO + let mut ro = E::RO::new( + ro_consts.clone(), + 46, /* 1 + (3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS) + (3 + NIO_CYCLE_FOLD * + * BN_N_LIMBS) + 3, // digest + (U) + (u) + T */ + ); + + // append the digest of pp to the transcript + ro.absorb(scalar_as_base::(*pp_digest)); + + // append U1 to the transcript. + // NOTE: this must be here because the IO for `U2` does not have the data of the + // hash of `U1` + U1.absorb_in_ro(&mut ro); + + // append U2 to transcript + absorb_cyclefold_r1cs(U2, &mut ro); + + // compute a commitment to the cross-term + let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; + + // append `comm_T` to the transcript and obtain a challenge + comm_T.absorb_in_ro(&mut ro); + + // compute a challenge from the RO + let r = ro.squeeze(NUM_CHALLENGE_BITS); + + // fold the instance using `r` and `comm_T` + let U = U1.fold(U2, &comm_T, &r); + + // fold the witness using `r` and `T` + let W = W1.fold(W2, &T, &r)?; + + // return the folded instance and witness + Ok((Self { comm_T: comm_T.compress() }, (U, W))) + } +} diff --git a/prover/src/cyclefold/nova_circuit.rs b/prover/src/cyclefold/nova_circuit.rs new file mode 100644 index 0000000..5ca44ce --- /dev/null +++ b/prover/src/cyclefold/nova_circuit.rs @@ -0,0 +1,510 @@ +//! This module defines the Nova augmented circuit used for Cyclefold + +use bellpepper::gadgets::{ + boolean::Boolean, boolean_utils::conditionally_select_slice, num::AllocatedNum, Assignment, +}; +use bellpepper_core::{boolean::AllocatedBit, ConstraintSystem, SynthesisError}; +use ff::Field; +use serde::{Deserialize, Serialize}; + +use super::{ + gadgets::{emulated, AllocatedCycleFoldData}, + util::FoldingData, +}; +use crate::{ + constants::{BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_FE_IN_EMULATED_POINT, NUM_HASH_BITS}, + gadgets::{ + alloc_num_equals, alloc_scalar_as_base, alloc_zero, le_bits_to_num, + AllocatedRelaxedR1CSInstance, + }, + supernova::StepCircuit, + traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, + Commitment, +}; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct AugmentedCircuitParams { + limb_width: usize, + n_limbs: usize, +} + +impl AugmentedCircuitParams { + pub const fn new(limb_width: usize, n_limbs: usize) -> Self { Self { limb_width, n_limbs } } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct AugmentedCircuitInputs +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, { + pp_digest: E1::Scalar, + i: E1::Base, + z0: Vec, + + zi: Option>, + data_p: Option>, + + data_c_1: Option>, + data_c_2: Option>, + + E_new: Option>, + W_new: Option>, +} + +impl AugmentedCircuitInputs +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, +{ + pub fn new( + pp_digest: E1::Scalar, + i: E1::Base, + z0: Vec, + zi: Option>, + data_p: Option>, + data_c_1: Option>, + data_c_2: Option>, + E_new: Option>, + W_new: Option>, + ) -> Self { + Self { pp_digest, i, z0, zi, data_p, data_c_1, data_c_2, E_new, W_new } + } +} +pub struct AugmentedCircuit<'a, E1, E2, SC> +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, + SC: StepCircuit, { + params: &'a AugmentedCircuitParams, + ro_consts: ROConstantsCircuit, + inputs: Option>, + step_circuit: &'a SC, +} + +impl<'a, E1, E2, SC> AugmentedCircuit<'a, E1, E2, SC> +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, + SC: StepCircuit, +{ + pub const fn new( + params: &'a AugmentedCircuitParams, + ro_consts: ROConstantsCircuit, + inputs: Option>, + step_circuit: &'a SC, + ) -> Self { + Self { params, ro_consts, inputs, step_circuit } + } + + fn alloc_witness::Base>>( + &self, + mut cs: CS, + arity: usize, + ) -> Result< + ( + AllocatedNum, // pp_digest + AllocatedNum, // i + Vec>, // z0 + Vec>, // zi + emulated::AllocatedFoldingData, // data_p + AllocatedCycleFoldData, // data_c_1 + AllocatedCycleFoldData, // data_c_2 + emulated::AllocatedEmulPoint, // E_new + emulated::AllocatedEmulPoint, // W_new + ), + SynthesisError, + > { + let pp_digest = alloc_scalar_as_base::( + cs.namespace(|| "params"), + self.inputs.as_ref().map(|inputs| inputs.pp_digest), + )?; + + let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; + + let z_0 = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || Ok(self.inputs.get()?.z0[i])) + }) + .collect::>, _>>()?; + + // Allocate zi. If inputs.zi is not provided (base case) allocate default value + // 0 + let zero = vec![E1::Base::ZERO; arity]; + let z_i = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { + Ok(self.inputs.get()?.zi.as_ref().unwrap_or(&zero)[i]) + }) + }) + .collect::>, _>>()?; + + let data_p = emulated::AllocatedFoldingData::alloc( + cs.namespace(|| "data_p"), + self.inputs.as_ref().and_then(|inputs| inputs.data_p.as_ref()), + self.params.limb_width, + self.params.n_limbs, + )?; + + let data_c_1 = AllocatedCycleFoldData::alloc( + cs.namespace(|| "data_c_1"), + self.inputs.as_ref().and_then(|inputs| inputs.data_c_1.as_ref()), + self.params.limb_width, + self.params.n_limbs, + )?; + + let data_c_2 = AllocatedCycleFoldData::alloc( + cs.namespace(|| "data_c_2"), + self.inputs.as_ref().and_then(|inputs| inputs.data_c_2.as_ref()), + self.params.limb_width, + self.params.n_limbs, + )?; + + let E_new = emulated::AllocatedEmulPoint::alloc( + cs.namespace(|| "E_new"), + self + .inputs + .as_ref() + .and_then(|inputs| inputs.E_new.as_ref()) + .map(|E_new| E_new.to_coordinates()), + self.params.limb_width, + self.params.n_limbs, + )?; + + let W_new = emulated::AllocatedEmulPoint::alloc( + cs.namespace(|| "W_new"), + self + .inputs + .as_ref() + .and_then(|inputs| inputs.W_new.as_ref()) + .map(|W_new| W_new.to_coordinates()), + self.params.limb_width, + self.params.n_limbs, + )?; + + Ok((pp_digest, i, z_0, z_i, data_p, data_c_1, data_c_2, E_new, W_new)) + } + + pub fn synthesize_base_case::Base>>( + &self, + mut cs: CS, + ) -> Result< + ( + AllocatedRelaxedR1CSInstance, + emulated::AllocatedEmulRelaxedR1CSInstance, + ), + SynthesisError, + > { + let U_c_default = AllocatedRelaxedR1CSInstance::default( + cs.namespace(|| "Allocate U_c_default"), + self.params.limb_width, + self.params.n_limbs, + )?; + + let U_p_default = emulated::AllocatedEmulRelaxedR1CSInstance::default( + cs.namespace(|| "Allocated U_p_default"), + self.params.limb_width, + self.params.n_limbs, + )?; + + // In the first folding step return the default relaxed instances for both the + // CycleFold and primary running accumulators + Ok((U_c_default, U_p_default)) + } + + pub fn synthesize_non_base_case::Base>>( + &self, + mut cs: CS, + pp_digest: &AllocatedNum, + i: &AllocatedNum, + z_0: &[AllocatedNum], + z_i: &[AllocatedNum], + data_p: &emulated::AllocatedFoldingData, + data_c_1: &AllocatedCycleFoldData, + data_c_2: &AllocatedCycleFoldData, + E_new: emulated::AllocatedEmulPoint, + W_new: emulated::AllocatedEmulPoint, + arity: usize, + ) -> Result< + ( + AllocatedRelaxedR1CSInstance, + emulated::AllocatedEmulRelaxedR1CSInstance, + AllocatedBit, + ), + SynthesisError, + > { + // Follows the outline written down here https://hackmd.io/@argumentcomputer/HybHrnNFT + + // Calculate the hash of the non-deterministic advice for the primary circuit + let mut ro_p = + E1::ROCircuit::new(self.ro_consts.clone(), 2 + 2 * arity + 2 * NUM_FE_IN_EMULATED_POINT + 3); + + ro_p.absorb(pp_digest); + ro_p.absorb(i); + for e in z_0 { + ro_p.absorb(e) + } + for e in z_i { + ro_p.absorb(e) + } + data_p.U.absorb_in_ro(cs.namespace(|| "absorb U_p"), &mut ro_p)?; + + let hash_bits_p = ro_p.squeeze(cs.namespace(|| "primary hash bits"), NUM_HASH_BITS)?; + let hash_p = le_bits_to_num(cs.namespace(|| "primary hash"), &hash_bits_p)?; + + // check the hash matches the public IO from the last primary instance + let check_primary = alloc_num_equals( + cs.namespace(|| "u.X[0] = H(params, i, z0, zi, U_p)"), + &data_p.u_x0, + &hash_p, + )?; + + // Calculate the hash of the non-dterministic advice for the secondary circuit + let mut ro_c = E1::ROCircuit::new( + self.ro_consts.clone(), + 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + i + W + E + u + X + ); + + ro_c.absorb(pp_digest); + ro_c.absorb(i); + data_c_1.U.absorb_in_ro(cs.namespace(|| "absorb U_c"), &mut ro_c)?; + let hash_c_bits = ro_c.squeeze(cs.namespace(|| "cyclefold hash bits"), NUM_HASH_BITS)?; + let hash_c = le_bits_to_num(cs.namespace(|| "cyclefold hash"), &hash_c_bits)?; + + // check the hash matches the public IO from the last primary instance + let check_cyclefold = + alloc_num_equals(cs.namespace(|| "u.X[1] = H(params, U_c)"), &data_p.u_x1, &hash_c)?; + + let check_io = + AllocatedBit::and(cs.namespace(|| "both IOs match"), &check_primary, &check_cyclefold)?; + + // Run NIVC.V on U_c, u_c_1, T_c_1 + let U_int = data_c_1.apply_fold( + cs.namespace(|| "fold u_c_1 into U_c"), + pp_digest, + self.ro_consts.clone(), + self.params.limb_width, + self.params.n_limbs, + )?; + + // Calculate h_int = H(pp, U_c_int) + let mut ro_c_int = E1::ROCircuit::new( + self.ro_consts.clone(), + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + W + E + u + X + ); + ro_c_int.absorb(pp_digest); + U_int.absorb_in_ro(cs.namespace(|| "absorb U_c_int"), &mut ro_c_int)?; + let h_c_int_bits = + ro_c_int.squeeze(cs.namespace(|| "intermediate hash bits"), NUM_HASH_BITS)?; + let h_c_int = le_bits_to_num(cs.namespace(|| "intermediate hash"), &h_c_int_bits)?; + + // Calculate h_1 = H(pp, U_c_1) + let mut ro_c_1 = E1::ROCircuit::new( + self.ro_consts.clone(), + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + W + E + u + X + ); + + ro_c_1.absorb(pp_digest); + data_c_2.U.absorb_in_ro(cs.namespace(|| "absorb U_c_1"), &mut ro_c_1)?; + let h_c_1_bits = ro_c_1.squeeze(cs.namespace(|| "cyclefold_1 hash bits"), NUM_HASH_BITS)?; + let h_c_1 = le_bits_to_num(cs.namespace(|| "cyclefold_1 hash"), &h_c_1_bits)?; + + // Check the intermediate-calculated running instance matches the + // non-deterministic advice provided to the prover + let check_cyclefold_int = alloc_num_equals(cs.namespace(|| "h_int = h_c_1"), &h_c_int, &h_c_1)?; + + let checks_pass = + AllocatedBit::and(cs.namespace(|| "all checks passed"), &check_io, &check_cyclefold_int)?; + + // calculate the folded CycleFold accumulator + let U_c = data_c_2.apply_fold( + cs.namespace(|| "fold u_c_2 into U_c_1"), + pp_digest, + self.ro_consts.clone(), + self.params.limb_width, + self.params.n_limbs, + )?; + + // calculate the folded primary circuit accumulator + let U_p = data_p.U.fold_with_r1cs( + cs.namespace(|| "fold u_p into U_p"), + pp_digest, + W_new, + E_new, + &data_p.u_W, + &data_p.u_x0, + &data_p.u_x1, + &data_p.T, + self.ro_consts.clone(), + )?; + + Ok((U_c, U_p, checks_pass)) + } + + pub fn synthesize::Base>>( + self, + cs: &mut CS, + ) -> Result>, SynthesisError> { + // Circuit is documented here: https://hackmd.io/SBvAur_2RQmaduDi7gYbhw + let arity = self.step_circuit.arity(); + + // Allocate the witness + let (pp_digest, i, z_0, z_i, data_p, data_c_1, data_c_2, E_new, W_new) = + self.alloc_witness(cs.namespace(|| "alloc_witness"), arity)?; + + let zero = alloc_zero(cs.namespace(|| "zero")); + let is_base_case = alloc_num_equals(cs.namespace(|| "is base case"), &i, &zero)?; + + let (U_new_c_base, U_new_p_base) = self.synthesize_base_case(cs.namespace(|| "base case"))?; + + let (U_new_c_non_base, U_new_p_non_base, check_non_base_pass) = self.synthesize_non_base_case( + cs.namespace(|| "synthesize non base case"), + &pp_digest, + &i, + &z_0, + &z_i, + &data_p, + &data_c_1, + &data_c_2, + E_new, + W_new, + arity, + )?; + + let should_be_false = AllocatedBit::nor( + cs.namespace(|| "check_non_base_pass nor base_case"), + &check_non_base_pass, + &is_base_case, + )?; + cs.enforce( + || "check_non_base_pass nor base_case = false", + |lc| lc + should_be_false.get_variable(), + |lc| lc + CS::one(), + |lc| lc, + ); + + // select the new running primary instance + let Unew_p = U_new_p_base.conditionally_select( + cs.namespace(|| "compute Unew_p"), + &U_new_p_non_base, + &Boolean::from(is_base_case.clone()), + )?; + + // select the new running CycleFold instance + let Unew_c = U_new_c_base.conditionally_select( + cs.namespace(|| "compute Unew_c"), + &U_new_c_non_base, + &Boolean::from(is_base_case.clone()), + )?; + + // Compute i + 1 + let i_new = + AllocatedNum::alloc(cs.namespace(|| "i + 1"), || Ok(*i.get_value().get()? + E1::Base::ONE))?; + cs.enforce( + || "check i + 1", + |lc| lc, + |lc| lc, + |lc| lc + i_new.get_variable() - CS::one() - i.get_variable(), + ); + + // Compute z_{i+1} + let z_input = conditionally_select_slice( + cs.namespace(|| "select input to F"), + &z_0, + &z_i, + &Boolean::from(is_base_case), + )?; + + let (_pc, z_next) = self.step_circuit.synthesize(&mut cs.namespace(|| "F"), None, &z_input)?; + + if z_next.len() != arity { + return Err(SynthesisError::IncompatibleLengthVector("z_next".to_string())); + } + + // Calculate the first component of the public IO as the hash of the calculated + // primary running instance + let mut ro_p = E1::ROCircuit::new( + self.ro_consts.clone(), + 2 + 2 * arity + (2 * NUM_FE_IN_EMULATED_POINT + 3), // pp + i + z_0 + z_next + (U_p) + ); + ro_p.absorb(&pp_digest); + ro_p.absorb(&i_new); + for e in &z_0 { + ro_p.absorb(e); + } + for e in &z_next { + ro_p.absorb(e); + } + Unew_p.absorb_in_ro(cs.namespace(|| "absorb Unew_p"), &mut ro_p)?; + let hash_p_bits = ro_p.squeeze(cs.namespace(|| "hash_p_bits"), NUM_HASH_BITS)?; + let hash_p = le_bits_to_num(cs.namespace(|| "hash_p"), &hash_p_bits)?; + + // Calculate the second component of the public IO as the hash of the calculated + // CycleFold running instance + let mut ro_c = E1::ROCircuit::new( + self.ro_consts, + 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + i + W + E + u + X + ); + ro_c.absorb(&pp_digest); + ro_c.absorb(&i_new); + Unew_c.absorb_in_ro(cs.namespace(|| "absorb Unew_c"), &mut ro_c)?; + let hash_c_bits = ro_c.squeeze(cs.namespace(|| "hash_c_bits"), NUM_HASH_BITS)?; + let hash_c = le_bits_to_num(cs.namespace(|| "hash_c"), &hash_c_bits)?; + + hash_p.inputize(cs.namespace(|| "u_p.x[0] = hash_p"))?; + hash_c.inputize(cs.namespace(|| "u_p.x[1] = hash_c"))?; + + Ok(z_next) + } +} + +// #[cfg(test)] +// mod test { +// use expect_test::{expect, Expect}; + +// use super::*; +// use crate::{ +// bellpepper::test_shape_cs::TestShapeCS, +// constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, +// provider::{Bn256EngineKZG, PallasEngine, Secp256k1Engine}, +// traits::{circuit::TrivialCircuit, CurveCycleEquipped, Dual}, +// }; + +// fn test_augmented_circuit_size_with(expected_cons: &Expect, +// expected_var: &Expect) where +// E: CurveCycleEquipped, +// { +// let params = AugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS); + +// let ro_consts = ROConstantsCircuit::::default(); + +// let step_circuit = TrivialCircuit::::default(); + +// let circuit = AugmentedCircuit::, +// TrivialCircuit>::new( ¶ms, +// ro_consts, +// None, +// &step_circuit, +// ); +// let mut cs: TestShapeCS> = TestShapeCS::default(); + +// let res = circuit.synthesize(&mut cs); + +// res.unwrap(); + +// let num_constraints = cs.num_constraints(); +// let num_variables = cs.num_aux(); + +// expected_cons.assert_eq(&num_constraints.to_string()); +// expected_var.assert_eq(&num_variables.to_string()); +// } + +// #[test] +// fn test_augmented_circuit_size() { +// test_augmented_circuit_size_with::(&expect!["33289"], +// &expect!["33323"]); +// test_augmented_circuit_size_with::(&expect!["35125" +// ], &expect!["35159"]); +// test_augmented_circuit_size_with::(&expect!["33856"], +// &expect!["33890"]); } +// } diff --git a/prover/src/cyclefold/snark.rs b/prover/src/cyclefold/snark.rs new file mode 100644 index 0000000..1ac176a --- /dev/null +++ b/prover/src/cyclefold/snark.rs @@ -0,0 +1,555 @@ +//! This module defines the Cyclefold `RecursiveSNARK` type with its `new`, +//! `prove_step`, and `verify` methods. + +use bellpepper_core::{ConstraintSystem, SynthesisError}; +use ff::PrimeFieldBits; +use once_cell::sync::OnceCell; +use serde::{Deserialize, Serialize}; + +use super::{ + nifs::{CycleFoldNIFS, PrimaryNIFS}, + nova_circuit::{AugmentedCircuit, AugmentedCircuitInputs, AugmentedCircuitParams}, + util::{absorb_primary_relaxed_r1cs, FoldingData}, +}; +use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + constants::{ + BN_LIMB_WIDTH, BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT, + NUM_HASH_BITS, + }, + cyclefold::circuit::CycleFoldCircuit, + errors::NovaError, + gadgets::scalar_as_base, + r1cs::{ + self, CommitmentKeyHint, R1CSInstance, R1CSResult, R1CSWitness, RelaxedR1CSInstance, + RelaxedR1CSWitness, + }, + supernova::StepCircuit, + traits::{ + commitment::CommitmentTrait, AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, + ROConstantsCircuit, ROTrait, + }, + Commitment, CommitmentKey, DigestComputer, R1CSWithArity, ROConstants, ResourceBuffer, + SimpleDigestible, +}; + +/// The public parameters used in the CycleFold recursive SNARK proof and +/// verification +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct PublicParams +where E1: CurveCycleEquipped { + F_arity_primary: usize, + ro_consts_primary: ROConstants>, + ro_consts_circuit_primary: ROConstantsCircuit>, + ck_primary: CommitmentKey, + circuit_shape_primary: R1CSWithArity, + augmented_circuit_params: AugmentedCircuitParams, + + ro_consts_cyclefold: ROConstants>, + ck_cyclefold: CommitmentKey>, + circuit_shape_cyclefold: R1CSWithArity>, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} + +impl PublicParams +where E1: CurveCycleEquipped +{ + /// Builds the public parameters for the circuit `C1`. + /// The same note for public parameter hints apply as in the case for Nova's + /// public parameters: For some final compressing SNARKs the size of the + /// commitment key must be larger, so we include `ck_hint_primary` and + /// `ck_hint_cyclefold` parameters to accommodate this. + pub fn setup>( + c_primary: &C1, + ck_hint_primary: &CommitmentKeyHint, + ck_hint_cyclefold: &CommitmentKeyHint>, + ) -> Self { + let F_arity_primary = c_primary.arity(); + let ro_consts_primary = ROConstants::>::default(); + let ro_consts_circuit_primary = ROConstantsCircuit::>::default(); + + let augmented_circuit_params = AugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS); + let circuit_primary: AugmentedCircuit<'_, Dual, E1, C1> = AugmentedCircuit::new( + &augmented_circuit_params, + ro_consts_circuit_primary.clone(), + None, + c_primary, + ); + let mut cs: ShapeCS = ShapeCS::new(); + let _ = circuit_primary.synthesize(&mut cs); + let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape_and_key(ck_hint_primary); + let circuit_shape_primary = R1CSWithArity::new(r1cs_shape_primary, F_arity_primary); + + let ro_consts_cyclefold = ROConstants::>::default(); + let mut cs: ShapeCS> = ShapeCS::new(); + let circuit_cyclefold: CycleFoldCircuit = CycleFoldCircuit::default(); + let _ = circuit_cyclefold.synthesize(&mut cs); + let (r1cs_shape_cyclefold, ck_cyclefold) = cs.r1cs_shape_and_key(ck_hint_cyclefold); + let circuit_shape_cyclefold = R1CSWithArity::new(r1cs_shape_cyclefold, 0); + + Self { + F_arity_primary, + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + circuit_shape_primary, + augmented_circuit_params, + ro_consts_cyclefold, + ck_cyclefold, + circuit_shape_cyclefold, + digest: OnceCell::new(), + } + } + + /// Calculate the digest of the public parameters. + pub fn digest(&self) -> E1::Scalar { + self + .digest + .get_or_try_init(|| DigestComputer::new(self).digest()) + .cloned() + .expect("Failure in retrieving digest") + } + + /// Returns the number of constraints in the primary and cyclefold circuits + pub const fn num_constraints(&self) -> (usize, usize) { + ( + self.circuit_shape_primary.r1cs_shape.num_cons, + self.circuit_shape_cyclefold.r1cs_shape.num_cons, + ) + } + + /// Returns the number of variables in the primary and cyclefold circuits + pub const fn num_variables(&self) -> (usize, usize) { + ( + self.circuit_shape_primary.r1cs_shape.num_vars, + self.circuit_shape_cyclefold.r1cs_shape.num_vars, + ) + } +} + +impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} + +/// A SNARK that proves the correct execution of an incremental computation in +/// the CycleFold folding scheme. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RecursiveSNARK +where E1: CurveCycleEquipped { + // Input + z0_primary: Vec, + + // primary circuit data + r_W_primary: RelaxedR1CSWitness, + r_U_primary: RelaxedR1CSInstance, + l_w_primary: R1CSWitness, + l_u_primary: R1CSInstance, + + // cyclefold circuit data + r_W_cyclefold: RelaxedR1CSWitness>, + r_U_cyclefold: RelaxedR1CSInstance>, + + // memory buffers for folding steps + buffer_primary: ResourceBuffer, + buffer_cyclefold: ResourceBuffer>, + + i: usize, + zi_primary: Vec, +} + +impl RecursiveSNARK +where E1: CurveCycleEquipped +{ + /// Create a new instance of a recursive SNARK + pub fn new>( + pp: &PublicParams, + c_primary: &C1, + z0_primary: &[E1::Scalar], + ) -> Result { + if z0_primary.len() != pp.F_arity_primary { + return Err(NovaError::InvalidInitialInputLength); + } + + let r1cs_primary = &pp.circuit_shape_primary.r1cs_shape; + let r1cs_cyclefold = &pp.circuit_shape_cyclefold.r1cs_shape; + + let r_U_cyclefold = RelaxedR1CSInstance::default(&pp.ck_cyclefold, r1cs_cyclefold); + let r_W_cyclefold = RelaxedR1CSWitness::default(r1cs_cyclefold); + + let mut cs_primary = SatisfyingAssignment::::new(); + let inputs_primary: AugmentedCircuitInputs, E1> = AugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + as Engine>::Base::from(0u64), + z0_primary.to_vec(), + None, + None, + None, + None, + None, + None, + ); + + let circuit_primary = AugmentedCircuit::new( + &pp.augmented_circuit_params, + pp.ro_consts_circuit_primary.clone(), + Some(inputs_primary), + c_primary, + ); + + let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; + let (l_u_primary, l_w_primary) = + cs_primary.r1cs_instance_and_witness(r1cs_primary, &pp.ck_primary)?; + + let r_U_primary = RelaxedR1CSInstance::default(&pp.ck_primary, r1cs_primary); + let r_W_primary = RelaxedR1CSWitness::default(r1cs_primary); + + let zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect::, _>>()?; + + let buffer_primary = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(r1cs_primary.num_cons), + ABC_Z_2: R1CSResult::default(r1cs_primary.num_cons), + T: r1cs::default_T::(r1cs_primary.num_cons), + }; + + let buffer_cyclefold = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(r1cs_cyclefold.num_cons), + ABC_Z_2: R1CSResult::default(r1cs_cyclefold.num_cons), + T: r1cs::default_T::>(r1cs_cyclefold.num_cons), + }; + + Ok(Self { + z0_primary: z0_primary.to_vec(), + r_W_primary, + r_U_primary, + l_w_primary, + l_u_primary, + r_W_cyclefold, + r_U_cyclefold, + buffer_primary, + buffer_cyclefold, + i: 0, + zi_primary, + }) + } + + /// Update the `RecursiveSNARK` by proving a step of the incremental + /// computation. + pub fn prove_step>( + &mut self, + pp: &PublicParams, + c_primary: &C1, + ) -> Result<(), NovaError> { + if self.i == 0 { + self.i = 1; + return Ok(()); + } + + let (nifs_primary, (r_U_primary, r_W_primary), r) = PrimaryNIFS::>::prove( + &pp.ck_primary, + &pp.ro_consts_primary, + &pp.digest(), + &pp.circuit_shape_primary.r1cs_shape, + &self.r_U_primary, + &self.r_W_primary, + &self.l_u_primary, + &self.l_w_primary, + )?; + + let r_bools = r + .to_le_bits() + .iter() + .map(|b| Some(*b)) + .take(NUM_CHALLENGE_BITS) + .collect::>>() + .map(|v| v.try_into().unwrap()); + + let comm_T = Commitment::::decompress(&nifs_primary.comm_T)?; + let E_new = self.r_U_primary.comm_E + comm_T * r; + + let W_new = self.r_U_primary.comm_W + self.l_u_primary.comm_W * r; + + let mut cs_cyclefold_E = SatisfyingAssignment::>::with_capacity( + pp.circuit_shape_cyclefold.r1cs_shape.num_io + 1, + pp.circuit_shape_cyclefold.r1cs_shape.num_vars, + ); + + let circuit_cyclefold_E: CycleFoldCircuit = + CycleFoldCircuit::new(Some(self.r_U_primary.comm_E), Some(comm_T), r_bools); + + let _ = circuit_cyclefold_E.synthesize(&mut cs_cyclefold_E); + + let (l_u_cyclefold_E, l_w_cyclefold_E) = cs_cyclefold_E + .r1cs_instance_and_witness(&pp.circuit_shape_cyclefold.r1cs_shape, &pp.ck_cyclefold) + .map_err(|_| NovaError::UnSat)?; + + // TODO: check if this is better or worse than `prove_mut` with a clone of + // `self.r_U_cyclefold` + let (nifs_cyclefold_E, (r_U_cyclefold_E, r_W_cyclefold_E)) = CycleFoldNIFS::prove( + &pp.ck_cyclefold, + &pp.ro_consts_cyclefold, + &scalar_as_base::(pp.digest()), + &pp.circuit_shape_cyclefold.r1cs_shape, + &self.r_U_cyclefold, + &self.r_W_cyclefold, + &l_u_cyclefold_E, + &l_w_cyclefold_E, + )?; + + let comm_T_E = Commitment::>::decompress(&nifs_cyclefold_E.comm_T)?; + + let mut cs_cyclefold_W = SatisfyingAssignment::>::with_capacity( + pp.circuit_shape_cyclefold.r1cs_shape.num_io + 1, + pp.circuit_shape_cyclefold.r1cs_shape.num_vars, + ); + + let circuit_cyclefold_W: CycleFoldCircuit = + CycleFoldCircuit::new(Some(self.r_U_primary.comm_W), Some(self.l_u_primary.comm_W), r_bools); + + let _ = circuit_cyclefold_W.synthesize(&mut cs_cyclefold_W); + + let (l_u_cyclefold_W, l_w_cyclefold_W) = cs_cyclefold_W + .r1cs_instance_and_witness(&pp.circuit_shape_cyclefold.r1cs_shape, &pp.ck_cyclefold) + .map_err(|_| NovaError::UnSat)?; + + // TODO: check if this is better or worse than `prove_mut` with a clone of + // r_U_cyclefold_E + let (nifs_cyclefold_W, (r_U_cyclefold_W, r_W_cyclefold_W)) = CycleFoldNIFS::prove( + &pp.ck_cyclefold, + &pp.ro_consts_cyclefold, + &scalar_as_base::(pp.digest()), + &pp.circuit_shape_cyclefold.r1cs_shape, + &r_U_cyclefold_E, + &r_W_cyclefold_E, + &l_u_cyclefold_W, + &l_w_cyclefold_W, + )?; + + let comm_T_W = Commitment::>::decompress(&nifs_cyclefold_W.comm_T)?; + + let mut cs_primary = SatisfyingAssignment::::with_capacity( + pp.circuit_shape_primary.r1cs_shape.num_io + 1, + pp.circuit_shape_primary.r1cs_shape.num_vars, + ); + + let data_p = FoldingData::new(self.r_U_primary.clone(), self.l_u_primary.clone(), comm_T); + let data_c_E = FoldingData::new(self.r_U_cyclefold.clone(), l_u_cyclefold_E, comm_T_E); + let data_c_W = FoldingData::new(r_U_cyclefold_E, l_u_cyclefold_W, comm_T_W); + + let inputs_primary: AugmentedCircuitInputs, E1> = AugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + as Engine>::Base::from(self.i as u64), + self.z0_primary.clone(), + Some(self.zi_primary.clone()), + Some(data_p), + Some(data_c_E), + Some(data_c_W), + Some(E_new), + Some(W_new), + ); + + let circuit_primary: AugmentedCircuit<'_, Dual, E1, C1> = AugmentedCircuit::new( + &pp.augmented_circuit_params, + pp.ro_consts_circuit_primary.clone(), + Some(inputs_primary), + c_primary, + ); + + let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; + + let (l_u_primary, l_w_primary) = cs_primary + .r1cs_instance_and_witness(&pp.circuit_shape_primary.r1cs_shape, &pp.ck_primary) + .map_err(|_| NovaError::UnSat)?; + + self.zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect::, _>>()?; + + self.r_U_primary = r_U_primary; + self.r_W_primary = r_W_primary; + self.l_u_primary = l_u_primary; + self.l_w_primary = l_w_primary; + self.r_U_cyclefold = r_U_cyclefold_W; + self.r_W_cyclefold = r_W_cyclefold_W; + + self.i += 1; + + Ok(()) + } + + /// Verify the correctness of the `RecursiveSNARK` + pub fn verify( + &self, + pp: &PublicParams, + num_steps: usize, + z0_primary: &[E1::Scalar], + ) -> Result, NovaError> { + // number of steps cannot be zero + let is_num_steps_zero = num_steps == 0; + + // check if the provided proof has executed num_steps + let is_num_steps_not_match = self.i != num_steps; + + // check if the initial inputs match + let is_inputs_not_match = self.z0_primary != z0_primary; + + // check if the (relaxed) R1CS instances have two public outputs + let is_instance_has_two_outputs = self.r_U_primary.X.len() != 2; + + if is_num_steps_zero + || is_num_steps_not_match + || is_inputs_not_match + || is_instance_has_two_outputs + { + return Err(NovaError::ProofVerifyError); + } + + // Calculate the hashes of the primary running instance and cyclefold running + // instance + let (hash_primary, hash_cyclefold) = { + let mut hasher = as Engine>::RO::new( + pp.ro_consts_primary.clone(), + 2 + 2 * pp.F_arity_primary + 2 * NUM_FE_IN_EMULATED_POINT + 3, + ); + hasher.absorb(pp.digest()); + hasher.absorb(E1::Scalar::from(num_steps as u64)); + for e in z0_primary { + hasher.absorb(*e); + } + for e in &self.zi_primary { + hasher.absorb(*e); + } + absorb_primary_relaxed_r1cs::>(&self.r_U_primary, &mut hasher); + let hash_primary = hasher.squeeze(NUM_HASH_BITS); + + let mut hasher = as Engine>::RO::new( + pp.ro_consts_cyclefold.clone(), + 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, + ); + hasher.absorb(pp.digest()); + hasher.absorb(E1::Scalar::from(num_steps as u64)); + self.r_U_cyclefold.absorb_in_ro(&mut hasher); + let hash_cyclefold = hasher.squeeze(NUM_HASH_BITS); + + (hash_primary, hash_cyclefold) + }; + + // Verify the hashes equal the public IO for the final primary instance + if scalar_as_base::>(hash_primary) != self.l_u_primary.X[0] + || scalar_as_base::>(hash_cyclefold) != self.l_u_primary.X[1] + { + return Err(NovaError::ProofVerifyError); + } + + // Verify the satisfiability of running relaxed instances, and the final primary + // instance. + let (res_r_primary, (res_l_primary, res_r_cyclefold)) = rayon::join( + || { + pp.circuit_shape_primary.r1cs_shape.is_sat_relaxed( + &pp.ck_primary, + &self.r_U_primary, + &self.r_W_primary, + ) + }, + || { + rayon::join( + || { + pp.circuit_shape_primary.r1cs_shape.is_sat( + &pp.ck_primary, + &self.l_u_primary, + &self.l_w_primary, + ) + }, + || { + pp.circuit_shape_cyclefold.r1cs_shape.is_sat_relaxed( + &pp.ck_cyclefold, + &self.r_U_cyclefold, + &self.r_W_cyclefold, + ) + }, + ) + }, + ); + + res_r_primary?; + res_l_primary?; + res_r_cyclefold?; + + Ok(self.zi_primary.clone()) + } +} + +// #[cfg(test)] +// mod test { +// use std::marker::PhantomData; + +// use bellpepper_core::num::AllocatedNum; + +// use super::*; +// use crate::{ +// provider::{Bn256EngineKZG, PallasEngine, Secp256k1Engine}, +// traits::snark::default_ck_hint, +// }; + +// #[derive(Clone)] +// struct SquareCircuit { +// _p: PhantomData, +// } + +// impl StepCircuit for SquareCircuit { +// fn arity(&self) -> usize { +// 1 +// } + +// fn synthesize>( +// &self, +// cs: &mut CS, +// z: &[AllocatedNum], +// ) -> Result>, SynthesisError> { +// let x = &z[0]; +// let x_sq = x.square(cs.namespace(|| "x_sq"))?; + +// Ok(vec![x_sq]) +// } +// } + +// fn test_trivial_cyclefold_prove_verify_with() { +// let primary_circuit = SquareCircuit:: { _p: PhantomData }; + +// let pp = +// PublicParams::::setup(&primary_circuit, &*default_ck_hint(), +// &*default_ck_hint()); + +// let z0 = vec![E::Scalar::from(2u64)]; + +// let mut recursive_snark = RecursiveSNARK::new(&pp, &primary_circuit, +// &z0).unwrap(); + +// (1..5).for_each(|iter| { +// let res_proof = recursive_snark.prove_step(&pp, +// &primary_circuit); res_proof.unwrap(); + +// let res_verify = recursive_snark.verify(&pp, iter, &z0); +// res_verify.unwrap(); +// }); +// } + +// #[test] +// fn test_cyclefold_prove_verify() { +// test_trivial_cyclefold_prove_verify_with::(); +// test_trivial_cyclefold_prove_verify_with::(); +// test_trivial_cyclefold_prove_verify_with::(); +// } +// } diff --git a/prover/src/cyclefold/util.rs b/prover/src/cyclefold/util.rs new file mode 100644 index 0000000..a23e8b4 --- /dev/null +++ b/prover/src/cyclefold/util.rs @@ -0,0 +1,87 @@ +//! This module defines some useful utilities for RO absorbing, and the Folding +//! data used in the CycleFold folding scheme. + +use ff::Field; +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, + gadgets::{f_to_nat, nat_to_limbs, scalar_as_base}, + r1cs::{R1CSInstance, RelaxedR1CSInstance}, + traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROTrait}, + Commitment, +}; + +/// Absorb a commitment over engine `E1` into an RO over engine `E2` by +/// absorbing the limbs +pub(super) fn absorb_primary_commitment( + comm: &impl CommitmentTrait, + ro: &mut impl ROTrait, +) where + E1: Engine::Scalar>, + E2: Engine::Scalar>, +{ + let (x, y, is_infinity) = comm.to_coordinates(); + + let x_limbs = nat_to_limbs(&f_to_nat(&x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); + let y_limbs = nat_to_limbs(&f_to_nat(&y), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); + + for limb in x_limbs { + ro.absorb(scalar_as_base::(limb)); + } + for limb in y_limbs { + ro.absorb(scalar_as_base::(limb)); + } + if is_infinity { + ro.absorb(::Scalar::ONE); + } else { + ro.absorb(::Scalar::ZERO); + } +} + +pub(super) fn absorb_primary_r1cs( + u: &R1CSInstance, + ro: &mut impl ROTrait, +) where + E1: Engine::Scalar>, + E2: Engine::Scalar>, +{ + absorb_primary_commitment::(&u.comm_W, ro); + for x in &u.X { + ro.absorb(*x); + } +} + +pub(super) fn absorb_cyclefold_r1cs(u: &R1CSInstance, ro: &mut E::RO) { + u.comm_W.absorb_in_ro(ro); + u.X.iter().for_each(|x| { + let limbs: Vec = nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); + limbs.into_iter().for_each(|limb| ro.absorb(scalar_as_base::(limb))); + }); +} + +pub(super) fn absorb_primary_relaxed_r1cs(U: &RelaxedR1CSInstance, ro: &mut E2::RO) +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, { + absorb_primary_commitment::(&U.comm_W, ro); + absorb_primary_commitment::(&U.comm_E, ro); + ro.absorb(U.u); + for e in &U.X { + ro.absorb(*e); + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub(super) struct FoldingData { + pub U: RelaxedR1CSInstance, + pub u: R1CSInstance, + pub T: Commitment, +} + +impl FoldingData { + pub fn new(U: RelaxedR1CSInstance, u: R1CSInstance, T: Commitment) -> Self { + Self { U, u, T } + } +} diff --git a/src/digest.rs b/prover/src/digest.rs similarity index 66% rename from src/digest.rs rename to prover/src/digest.rs index 48b32f9..6be08e3 100644 --- a/src/digest.rs +++ b/prover/src/digest.rs @@ -10,8 +10,8 @@ use crate::constants::NUM_HASH_BITS; /// Trait for components with potentially discrete digests to be included in /// their container's digest. pub trait Digestible { - /// Write the byte representation of Self in a byte buffer - fn write_bytes(&self, byte_sink: &mut W) -> Result<(), io::Error>; + /// Write the byte representation of Self in a byte buffer + fn write_bytes(&self, byte_sink: &mut W) -> Result<(), io::Error>; } /// Marker trait to be implemented for types that implement `Digestible` and @@ -19,61 +19,52 @@ pub trait Digestible { pub trait SimpleDigestible: Serialize {} impl Digestible for T { - fn write_bytes(&self, byte_sink: &mut W) -> Result<(), io::Error> { - let config = bincode::DefaultOptions::new() - .with_little_endian() - .with_fixint_encoding(); - // Note: bincode recursively length-prefixes every field! - config - .serialize_into(byte_sink, self) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) - } + fn write_bytes(&self, byte_sink: &mut W) -> Result<(), io::Error> { + let config = bincode::DefaultOptions::new().with_little_endian().with_fixint_encoding(); + // Note: bincode recursively length-prefixes every field! + config + .serialize_into(byte_sink, self) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + } } pub struct DigestComputer<'a, F, T> { - inner: &'a T, - _phantom: PhantomData, + inner: &'a T, + _phantom: PhantomData, } impl<'a, F: PrimeField, T: Digestible> DigestComputer<'a, F, T> { - fn hasher() -> Sha3_256 { - Sha3_256::new() - } - - fn map_to_field(digest: &[u8]) -> F { - let bv = (0..NUM_HASH_BITS).map(|i| { - let (byte_pos, bit_pos) = (i / 8, i % 8); - let bit = (digest[byte_pos] >> bit_pos) & 1; - bit == 1 - }); - - // turn the bit vector into a scalar - let mut digest = F::ZERO; - let mut coeff = F::ONE; - for bit in bv { - if bit { - digest += coeff; - } - coeff += coeff; - } - digest - } - - /// Create a new `DigestComputer` - pub fn new(inner: &'a T) -> Self { - DigestComputer { - inner, - _phantom: PhantomData, - } - } - - /// Compute the digest of a `Digestible` instance. - pub fn digest(&self) -> Result { - let mut hasher = Self::hasher(); - self.inner.write_bytes(&mut hasher)?; - let bytes: [u8; 32] = hasher.finalize().into(); - Ok(Self::map_to_field(&bytes)) + fn hasher() -> Sha3_256 { Sha3_256::new() } + + fn map_to_field(digest: &[u8]) -> F { + let bv = (0..NUM_HASH_BITS).map(|i| { + let (byte_pos, bit_pos) = (i / 8, i % 8); + let bit = (digest[byte_pos] >> bit_pos) & 1; + bit == 1 + }); + + // turn the bit vector into a scalar + let mut digest = F::ZERO; + let mut coeff = F::ONE; + for bit in bv { + if bit { + digest += coeff; + } + coeff += coeff; } + digest + } + + /// Create a new `DigestComputer` + pub fn new(inner: &'a T) -> Self { DigestComputer { inner, _phantom: PhantomData } } + + /// Compute the digest of a `Digestible` instance. + pub fn digest(&self) -> Result { + let mut hasher = Self::hasher(); + self.inner.write_bytes(&mut hasher)?; + let bytes: [u8; 32] = hasher.finalize().into(); + Ok(Self::map_to_field(&bytes)) + } } // #[cfg(test)] diff --git a/prover/src/errors.rs b/prover/src/errors.rs new file mode 100644 index 0000000..1e91e53 --- /dev/null +++ b/prover/src/errors.rs @@ -0,0 +1,99 @@ +//! This module defines errors returned by the library. +use core::fmt::Debug; + +use thiserror::Error; + +/// Errors returned by Nova +#[derive(Debug, Eq, PartialEq, Error)] +#[non_exhaustive] +pub enum NovaError { + /// returned if the supplied row or col in (row,col,val) tuple is out of + /// range + #[error("InvalidIndex")] + InvalidIndex, + /// returned if the step circuit calls inputize or alloc_io in its + /// synthesize method instead of passing output with the return value + #[error("InvalidStepCircuitIO")] + InvalidStepCircuitIO, + /// returned if the supplied input is not of the right length + #[error("InvalidInputLength")] + InvalidInputLength, + /// returned if the supplied witness is not of the right length + #[error("InvalidWitnessLength")] + InvalidWitnessLength, + /// returned if the supplied witness is not a satisfying witness to a given + /// shape and instance + #[error("UnSat")] + UnSat, + /// returned if the supplied witness is not a satisfying witness to a given + /// shape and instance, with error constraint index + #[error("UnSatIndex")] + UnSatIndex(usize), + /// returned when the supplied compressed commitment cannot be decompressed + #[error("DecompressionError")] + DecompressionError, + /// returned if proof verification fails + #[error("ProofVerifyError")] + ProofVerifyError, + /// returned if the provided commitment key is not of sufficient length + #[error("InvalidCommitmentKeyLength")] + InvalidCommitmentKeyLength, + /// returned if the provided number of steps is zero + #[error("InvalidNumSteps")] + InvalidNumSteps, + /// returned if there is an error in the proof/verification of a PCS + #[error("PCSError")] + PCSError(#[from] PCSError), + /// returned when an invalid sum-check proof is provided + #[error("InvalidSumcheckProof")] + InvalidSumcheckProof, + /// returned when the initial input to an incremental computation differs + /// from a previously declared arity + #[error("InvalidInitialInputLength")] + InvalidInitialInputLength, + /// returned when the step execution produces an output whose length differs + /// from a previously declared arity + #[error("InvalidStepOutputLength")] + InvalidStepOutputLength, + /// returned when the transcript engine encounters an overflow of the round + /// number + #[error("InternalTranscriptError")] + InternalTranscriptError, + /// returned when the multiset check fails + #[error("InvalidMultisetProof")] + InvalidMultisetProof, + /// returned when the product proof check fails + #[error("InvalidProductProof")] + InvalidProductProof, + /// returned when the consistency with public IO and assignment used fails + #[error("IncorrectWitness")] + IncorrectWitness, + /// return when error during synthesis + #[error("SynthesisError: {0}")] + SynthesisError(String), + /// returned when there is an error creating a digest + #[error("DigestError")] + DigestError, + /// returned when the prover cannot prove the provided statement due to + /// completeness error + #[error("InternalError")] + InternalError, +} + +/// Errors specific to the Polynomial commitment scheme +#[derive(Debug, Eq, PartialEq, Error)] +pub enum PCSError { + /// returned when an invalid PCS evaluation argument is provided + #[error("InvalidPCS")] + InvalidPCS, + /// returned when there is a Zeromorph error + #[error("ZMError")] + ZMError, + /// returned when a length check fails in a PCS + #[error("LengthError")] + LengthError, +} + +impl From for NovaError { + fn from(err: bellpepper_core::SynthesisError) -> Self { Self::SynthesisError(err.to_string()) } +} diff --git a/prover/src/fast_serde.rs b/prover/src/fast_serde.rs new file mode 100644 index 0000000..4c6facc --- /dev/null +++ b/prover/src/fast_serde.rs @@ -0,0 +1,100 @@ +//! This module implements fast serde for reading and writing +//! key objects requires for proof generation and verification. +//! With WASM in particular, serializing via standard binary serializers +//! like bincode causes a dramatic decrease in performance. This simple +//! serializers parses in bytes very efficiently. +//! +//! In the future, it can be extended to do direct memory access to the +//! javascript runtime. For now it does a single copy of the data into +//! the rust runtime. + +use std::io::{Cursor, Read}; + +use thiserror::Error; + +pub static MAGIC_NUMBER: [u8; 4] = [0x50, 0x4C, 0x55, 0x54]; +pub enum SerdeByteTypes { + AuxParams = 0x01, + UniversalKZGParam = 0x02, + CommitmentKey = 0x03, + ProverParams = 0x04, +} + +#[derive(Debug, Error)] +pub enum SerdeByteError { + #[error("{}", "invalid magic number")] + InvalidMagicNumber, + #[error("{}", "invalid serde type")] + InvalidSerdeType, + #[error("{}", "invalid section count")] + InvalidSectionCount, + #[error("{}", "invalid section type")] + InvalidSectionType, + #[error("{}", "invalid section size")] + InvalidSectionSize, + #[error(transparent)] + IoError(#[from] std::io::Error), + #[error(transparent)] + BincodeError(#[from] Box), + #[error("{}", "g1 decode error")] + G1DecodeError, + #[error("{}", "g2 decode error")] + G2DecodeError, +} + +/// A trait for fast conversions to bytes +pub trait FastSerde: Sized { + fn to_bytes(&self) -> Vec; + fn from_bytes(bytes: &Vec) -> Result; + + fn validate_header( + cursor: &mut Cursor<&Vec>, + expected_type: SerdeByteTypes, + expected_sections: u8, + ) -> Result<(), SerdeByteError> { + let mut magic = [0u8; 4]; + cursor.read_exact(&mut magic)?; + if magic != MAGIC_NUMBER { + return Err(SerdeByteError::InvalidMagicNumber); + } + + let mut serde_type = [0u8; 1]; + cursor.read_exact(&mut serde_type)?; + if serde_type[0] != expected_type as u8 { + return Err(SerdeByteError::InvalidSerdeType); + } + + let mut num_sections = [0u8; 1]; + cursor.read_exact(&mut num_sections)?; + if num_sections[0] != expected_sections { + return Err(SerdeByteError::InvalidSectionCount); + } + + Ok(()) + } + + fn read_section_bytes( + cursor: &mut Cursor<&Vec>, + expected_type: u8, + ) -> Result, SerdeByteError> { + let mut section_type = [0u8; 1]; + cursor.read_exact(&mut section_type)?; + if section_type[0] != expected_type { + return Err(SerdeByteError::InvalidSectionType); + } + + let mut section_size = [0u8; 4]; + cursor.read_exact(&mut section_size)?; + let size = u32::from_le_bytes(section_size) as usize; + let mut section_data = vec![0u8; size]; + cursor.read_exact(&mut section_data)?; + + Ok(section_data) + } + + fn write_section_bytes(out: &mut Vec, section_type: u8, data: &Vec) { + out.push(section_type); + out.extend_from_slice(&(data.len() as u32).to_le_bytes()); + out.extend_from_slice(data); + } +} diff --git a/prover/src/gadgets/ecc.rs b/prover/src/gadgets/ecc.rs new file mode 100644 index 0000000..ee6d182 --- /dev/null +++ b/prover/src/gadgets/ecc.rs @@ -0,0 +1,1071 @@ +//! This module implements various elliptic curve gadgets +#![allow(non_snake_case)] +use bellpepper::gadgets::{boolean_utils::conditionally_select, Assignment}; +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, SynthesisError, +}; +use ff::{Field, PrimeField}; + +use crate::{ + gadgets::utils::{ + alloc_num_equals, alloc_one, alloc_zero, conditionally_select2, select_num_or_one, + select_num_or_zero, select_num_or_zero2, select_one_or_diff2, select_one_or_num2, + select_zero_or_num2, + }, + traits::Group, +}; + +/// `AllocatedPoint` provides an elliptic curve abstraction inside a circuit. +#[derive(Debug, Clone)] +pub struct AllocatedPoint { + pub(crate) x: AllocatedNum, + pub(crate) y: AllocatedNum, + pub(crate) is_infinity: AllocatedNum, +} + +impl AllocatedPoint { + /// Allocates a new point on the curve using coordinates provided by + /// `coords`. If coords = None, it allocates the default infinity point + pub fn alloc>( + mut cs: CS, + coords: Option<(G::Base, G::Base, bool)>, + ) -> Result { + let x = + AllocatedNum::alloc(cs.namespace(|| "x"), || Ok(coords.map_or(G::Base::ZERO, |c| c.0)))?; + let y = + AllocatedNum::alloc(cs.namespace(|| "y"), || Ok(coords.map_or(G::Base::ZERO, |c| c.1)))?; + let is_infinity = AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { + Ok(if coords.map_or(true, |c| c.2) { G::Base::ONE } else { G::Base::ZERO }) + })?; + cs.enforce( + || "is_infinity is bit", + |lc| lc + is_infinity.get_variable(), + |lc| lc + CS::one() - is_infinity.get_variable(), + |lc| lc, + ); + + Ok(Self { x, y, is_infinity }) + } + + /// checks if `self` is on the curve or if it is infinity + pub fn check_on_curve(&self, mut cs: CS) -> Result<(), SynthesisError> + where CS: ConstraintSystem { + // check that (x,y) is on the curve if it is not infinity + // we will check that (1- is_infinity) * y^2 = (1-is_infinity) * (x^3 + Ax + B) + // note that is_infinity is already restricted to be in the set {0, 1} + let y_square = self.y.square(cs.namespace(|| "y_square"))?; + let x_square = self.x.square(cs.namespace(|| "x_square"))?; + let x_cube = self.x.mul(cs.namespace(|| "x_cube"), &x_square)?; + + let rhs = AllocatedNum::alloc(cs.namespace(|| "rhs"), || { + if *self.is_infinity.get_value().get()? == G::Base::ONE { + Ok(G::Base::ZERO) + } else { + Ok( + *x_cube.get_value().get()? + + *self.x.get_value().get()? * G::group_params().0 + + G::group_params().1, + ) + } + })?; + + cs.enforce( + || "rhs = (1-is_infinity) * (x^3 + Ax + B)", + |lc| { + lc + x_cube.get_variable() + + (G::group_params().0, self.x.get_variable()) + + (G::group_params().1, CS::one()) + }, + |lc| lc + CS::one() - self.is_infinity.get_variable(), + |lc| lc + rhs.get_variable(), + ); + + // check that (1-infinity) * y_square = rhs + cs.enforce( + || "check that y_square * (1 - is_infinity) = rhs", + |lc| lc + y_square.get_variable(), + |lc| lc + CS::one() - self.is_infinity.get_variable(), + |lc| lc + rhs.get_variable(), + ); + + Ok(()) + } + + /// Allocates a default point on the curve, set to the identity point. + pub fn default>(mut cs: CS) -> Self { + let zero = alloc_zero(cs.namespace(|| "zero")); + let one = alloc_one(cs.namespace(|| "one")); + + Self { x: zero.clone(), y: zero, is_infinity: one } + } + + /// Returns coordinates associated with the point. + #[allow(unused)] + pub const fn get_coordinates( + &self, + ) -> (&AllocatedNum, &AllocatedNum, &AllocatedNum) { + (&self.x, &self.y, &self.is_infinity) + } + + /// Negates the provided point + pub fn negate>(&self, mut cs: CS) -> Result { + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || Ok(-*self.y.get_value().get()?))?; + + cs.enforce( + || "check y = - self.y", + |lc| lc + self.y.get_variable(), + |lc| lc + CS::one(), + |lc| lc - y.get_variable(), + ); + + Ok(Self { x: self.x.clone(), y, is_infinity: self.is_infinity.clone() }) + } + + /// Add two points (may be equal) + pub fn add>( + &self, + mut cs: CS, + other: &Self, + ) -> Result { + // Compute boolean equal indicating if self = other + + let equal_x = alloc_num_equals(cs.namespace(|| "check self.x == other.x"), &self.x, &other.x)?; + + let equal_y = alloc_num_equals(cs.namespace(|| "check self.y == other.y"), &self.y, &other.y)?; + + // Compute the result of the addition and the result of double self + let result_from_add = self.add_internal(cs.namespace(|| "add internal"), other, &equal_x)?; + let result_from_double = self.double(cs.namespace(|| "double"))?; + + // Output: + // If (self == other) { + // return double(self) + // }else { + // if (self.x == other.x){ + // return infinity [negation] + // } else { + // return add(self, other) + // } + // } + let result_for_equal_x = Self::select_point_or_infinity( + cs.namespace(|| "equal_y ? result_from_double : infinity"), + &result_from_double, + &Boolean::from(equal_y), + )?; + + Self::conditionally_select( + cs.namespace(|| "equal ? result_from_double : result_from_add"), + &result_for_equal_x, + &result_from_add, + &Boolean::from(equal_x), + ) + } + + /// Adds other point to this point and returns the result. Assumes that the + /// two points are different and that both `other.is_infinity` and + /// `this.is_infinity` are bits + pub fn add_internal>( + &self, + mut cs: CS, + other: &Self, + equal_x: &AllocatedBit, + ) -> Result { + //************************************************************************/ + // lambda = (other.y - self.y) * (other.x - self.x).invert().unwrap(); + //************************************************************************/ + // First compute (other.x - self.x).inverse() + // If either self or other are the infinity point or self.x = other.x then + // compute bogus values Specifically, + // x_diff = self != inf && other != inf && self.x == other.x ? (other.x - + // self.x) : 1 + + // Compute self.is_infinity OR other.is_infinity = + // NOT(NOT(self.is_ifninity) AND NOT(other.is_infinity)) + let at_least_one_inf = AllocatedNum::alloc(cs.namespace(|| "at least one inf"), || { + Ok( + G::Base::ONE + - (G::Base::ONE - *self.is_infinity.get_value().get()?) + * (G::Base::ONE - *other.is_infinity.get_value().get()?), + ) + })?; + cs.enforce( + || "1 - at least one inf = (1-self.is_infinity) * (1-other.is_infinity)", + |lc| lc + CS::one() - self.is_infinity.get_variable(), + |lc| lc + CS::one() - other.is_infinity.get_variable(), + |lc| lc + CS::one() - at_least_one_inf.get_variable(), + ); + + // Now compute x_diff_is_actual = at_least_one_inf OR equal_x + let x_diff_is_actual = + AllocatedNum::alloc(cs.namespace(|| "allocate x_diff_is_actual"), || { + Ok(if *equal_x.get_value().get()? { + G::Base::ONE + } else { + *at_least_one_inf.get_value().get()? + }) + })?; + cs.enforce( + || "1 - x_diff_is_actual = (1-equal_x) * (1-at_least_one_inf)", + |lc| lc + CS::one() - at_least_one_inf.get_variable(), + |lc| lc + CS::one() - equal_x.get_variable(), + |lc| lc + CS::one() - x_diff_is_actual.get_variable(), + ); + + // x_diff = 1 if either self.is_infinity or other.is_infinity or self.x = + // other.x else self.x - other.x + let x_diff = + select_one_or_diff2(cs.namespace(|| "Compute x_diff"), &other.x, &self.x, &x_diff_is_actual)?; + + let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { + let x_diff_inv = if *x_diff_is_actual.get_value().get()? == G::Base::ONE { + // Set to default + G::Base::ONE + } else { + // Set to the actual inverse + (*other.x.get_value().get()? - *self.x.get_value().get()?).invert().unwrap() + }; + + Ok((*other.y.get_value().get()? - *self.y.get_value().get()?) * x_diff_inv) + })?; + cs.enforce( + || "Check that lambda is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + x_diff.get_variable(), + |lc| lc + other.y.get_variable() - self.y.get_variable(), + ); + + //************************************************************************/ + // x = lambda * lambda - self.x - other.x; + //************************************************************************/ + let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { + Ok( + *lambda.get_value().get()? * lambda.get_value().get()? + - *self.x.get_value().get()? + - *other.x.get_value().get()?, + ) + })?; + cs.enforce( + || "check that x is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + lambda.get_variable(), + |lc| lc + x.get_variable() + self.x.get_variable() + other.x.get_variable(), + ); + + //************************************************************************/ + // y = lambda * (self.x - x) - self.y; + //************************************************************************/ + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok( + *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) + - *self.y.get_value().get()?, + ) + })?; + + cs.enforce( + || "Check that y is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + self.x.get_variable() - x.get_variable(), + |lc| lc + y.get_variable() + self.y.get_variable(), + ); + + //************************************************************************/ + // We only return the computed x, y if neither of the points is infinity and + // self.x != other.y if self.is_infinity return other.clone() + // elif other.is_infinity return self.clone() + // elif self.x == other.x return infinity + // Otherwise return the computed points. + //************************************************************************/ + // Now compute the output x + + let x1 = conditionally_select2( + cs.namespace(|| "x1 = other.is_infinity ? self.x : x"), + &self.x, + &x, + &other.is_infinity, + )?; + + let x = conditionally_select2( + cs.namespace(|| "x = self.is_infinity ? other.x : x1"), + &other.x, + &x1, + &self.is_infinity, + )?; + + let y1 = conditionally_select2( + cs.namespace(|| "y1 = other.is_infinity ? self.y : y"), + &self.y, + &y, + &other.is_infinity, + )?; + + let y = conditionally_select2( + cs.namespace(|| "y = self.is_infinity ? other.y : y1"), + &other.y, + &y1, + &self.is_infinity, + )?; + + let is_infinity1 = select_num_or_zero2( + cs.namespace(|| "is_infinity1 = other.is_infinity ? self.is_infinity : 0"), + &self.is_infinity, + &other.is_infinity, + )?; + + let is_infinity = conditionally_select2( + cs.namespace(|| "is_infinity = self.is_infinity ? other.is_infinity : is_infinity1"), + &other.is_infinity, + &is_infinity1, + &self.is_infinity, + )?; + + Ok(Self { x, y, is_infinity }) + } + + /// Doubles the supplied point. + pub fn double>(&self, mut cs: CS) -> Result { + //*************************************************************/ + // lambda = (G::Base::from(3) * self.x * self.x + G::GG::A()) + // * (G::Base::from(2)) * self.y).invert().unwrap(); + // ********************************************************** + + // Compute tmp = (G::Base::ONE + G::Base::ONE)* self.y ? self != inf : 1 + let tmp_actual = AllocatedNum::alloc(cs.namespace(|| "tmp_actual"), || { + Ok(*self.y.get_value().get()? + *self.y.get_value().get()?) + })?; + cs.enforce( + || "check tmp_actual", + |lc| lc + CS::one() + CS::one(), + |lc| lc + self.y.get_variable(), + |lc| lc + tmp_actual.get_variable(), + ); + + let tmp = select_one_or_num2(cs.namespace(|| "tmp"), &tmp_actual, &self.is_infinity)?; + + // Now compute lambda as (G::Base::from(3) * self.x * self.x + G::GG::A()) * + // tmp_inv + + let prod_1 = AllocatedNum::alloc(cs.namespace(|| "alloc prod 1"), || { + Ok(G::Base::from(3) * self.x.get_value().get()? * self.x.get_value().get()?) + })?; + cs.enforce( + || "Check prod 1", + |lc| lc + (G::Base::from(3), self.x.get_variable()), + |lc| lc + self.x.get_variable(), + |lc| lc + prod_1.get_variable(), + ); + + let lambda = AllocatedNum::alloc(cs.namespace(|| "alloc lambda"), || { + let tmp_inv = if *self.is_infinity.get_value().get()? == G::Base::ONE { + // Return default value 1 + G::Base::ONE + } else { + // Return the actual inverse + (*tmp.get_value().get()?).invert().unwrap() + }; + + Ok(tmp_inv * (*prod_1.get_value().get()? + G::group_params().0)) + })?; + + cs.enforce( + || "Check lambda", + |lc| lc + tmp.get_variable(), + |lc| lc + lambda.get_variable(), + |lc| lc + prod_1.get_variable() + (G::group_params().0, CS::one()), + ); + + // ********************************************************** + // x = lambda * lambda - self.x - self.x; + // ********************************************************** + let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { + Ok( + ((*lambda.get_value().get()?) * (*lambda.get_value().get()?)) + - *self.x.get_value().get()? + - self.x.get_value().get()?, + ) + })?; + cs.enforce( + || "Check x", + |lc| lc + lambda.get_variable(), + |lc| lc + lambda.get_variable(), + |lc| lc + x.get_variable() + self.x.get_variable() + self.x.get_variable(), + ); + + // ********************************************************** + // y = lambda * (self.x - x) - self.y; + // ********************************************************** + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok( + (*lambda.get_value().get()?) * (*self.x.get_value().get()? - x.get_value().get()?) + - self.y.get_value().get()?, + ) + })?; + cs.enforce( + || "Check y", + |lc| lc + lambda.get_variable(), + |lc| lc + self.x.get_variable() - x.get_variable(), + |lc| lc + y.get_variable() + self.y.get_variable(), + ); + + // ********************************************************** + // Only return the computed x and y if the point is not infinity + // ********************************************************** + // x + let x = select_zero_or_num2(cs.namespace(|| "final x"), &x, &self.is_infinity)?; + + // y + let y = select_zero_or_num2(cs.namespace(|| "final y"), &y, &self.is_infinity)?; + + // is_infinity + let is_infinity = self.is_infinity.clone(); + + Ok(Self { x, y, is_infinity }) + } + + /// A gadget for scalar multiplication, optimized to use incomplete addition + /// law. The optimization here is analogous to , + /// except we use complete addition law over affine coordinates instead of + /// projective coordinates for the tail bits + pub fn scalar_mul>( + &self, + mut cs: CS, + scalar_bits: &[AllocatedBit], + ) -> Result { + let split_len = core::cmp::min(scalar_bits.len(), (G::Base::NUM_BITS - 2) as usize); + let (incomplete_bits, complete_bits) = scalar_bits.split_at(split_len); + + // we convert AllocatedPoint into AllocatedPointNonInfinity; we deal with the + // case where self.is_infinity = 1 below + let mut p = AllocatedPointNonInfinity::from_allocated_point(self); + + // we assume the first bit to be 1, so we must initialize acc to self and double + // it we remove this assumption below + let mut acc = p; + p = acc.double_incomplete(cs.namespace(|| "double"))?; + + // perform the double-and-add loop to compute the scalar mul using incomplete + // addition law + for (i, bit) in incomplete_bits.iter().enumerate().skip(1) { + let temp = acc.add_incomplete(cs.namespace(|| format!("add {i}")), &p)?; + acc = AllocatedPointNonInfinity::conditionally_select( + cs.namespace(|| format!("acc_iteration_{i}")), + &temp, + &acc, + &Boolean::from(bit.clone()), + )?; + + p = p.double_incomplete(cs.namespace(|| format!("double {i}")))?; + } + + // convert back to AllocatedPoint + let res = { + // we set acc.is_infinity = self.is_infinity + let acc = acc.to_allocated_point(&self.is_infinity); + + // we remove the initial slack if bits[0] is as not as assumed (i.e., it is not + // 1) + let acc_minus_initial = { + let neg = self.negate(cs.namespace(|| "negate"))?; + acc.add(cs.namespace(|| "res minus self"), &neg) + }?; + + Self::conditionally_select( + cs.namespace(|| "remove slack if necessary"), + &acc, + &acc_minus_initial, + &Boolean::from(scalar_bits[0].clone()), + )? + }; + + // when self.is_infinity = 1, return the default point, else return res + // we already set res.is_infinity to be self.is_infinity, so we do not need to + // set it here + let default = Self::default(cs.namespace(|| "default")); + let x = conditionally_select2( + cs.namespace(|| "check if self.is_infinity is zero (x)"), + &default.x, + &res.x, + &self.is_infinity, + )?; + + let y = conditionally_select2( + cs.namespace(|| "check if self.is_infinity is zero (y)"), + &default.y, + &res.y, + &self.is_infinity, + )?; + + // we now perform the remaining scalar mul using complete addition law + let mut acc = Self { x, y, is_infinity: res.is_infinity }; + let mut p_complete = p.to_allocated_point(&self.is_infinity); + + for (i, bit) in complete_bits.iter().enumerate() { + let temp = acc.add(cs.namespace(|| format!("add_complete {i}")), &p_complete)?; + acc = Self::conditionally_select( + cs.namespace(|| format!("acc_complete_iteration_{i}")), + &temp, + &acc, + &Boolean::from(bit.clone()), + )?; + + p_complete = p_complete.double(cs.namespace(|| format!("double_complete {i}")))?; + } + + Ok(acc) + } + + /// If condition outputs a otherwise outputs b + pub fn conditionally_select>( + mut cs: CS, + a: &Self, + b: &Self, + condition: &Boolean, + ) -> Result { + let x = conditionally_select(cs.namespace(|| "select x"), &a.x, &b.x, condition)?; + + let y = conditionally_select(cs.namespace(|| "select y"), &a.y, &b.y, condition)?; + + let is_infinity = conditionally_select( + cs.namespace(|| "select is_infinity"), + &a.is_infinity, + &b.is_infinity, + condition, + )?; + + Ok(Self { x, y, is_infinity }) + } + + /// If condition outputs a otherwise infinity + pub fn select_point_or_infinity>( + mut cs: CS, + a: &Self, + condition: &Boolean, + ) -> Result { + let x = select_num_or_zero(cs.namespace(|| "select x"), &a.x, condition)?; + + let y = select_num_or_zero(cs.namespace(|| "select y"), &a.y, condition)?; + + let is_infinity = + select_num_or_one(cs.namespace(|| "select is_infinity"), &a.is_infinity, condition)?; + + Ok(Self { x, y, is_infinity }) + } +} + +#[derive(Clone, Debug)] +/// `AllocatedPoint` but one that is guaranteed to be not infinity +pub struct AllocatedPointNonInfinity { + x: AllocatedNum, + y: AllocatedNum, +} + +impl AllocatedPointNonInfinity { + /// Creates a new `AllocatedPointNonInfinity` from the specified coordinates + #[allow(unused)] + pub const fn new(x: AllocatedNum, y: AllocatedNum) -> Self { Self { x, y } } + + /// Allocates a new point on the curve using coordinates provided by + /// `coords`. + #[allow(unused)] + pub fn alloc>( + mut cs: CS, + coords: Option<(G::Base, G::Base)>, + ) -> Result { + let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { + coords.map_or(Err(SynthesisError::AssignmentMissing), |c| Ok(c.0)) + })?; + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + coords.map_or(Err(SynthesisError::AssignmentMissing), |c| Ok(c.1)) + })?; + + Ok(Self { x, y }) + } + + /// Turns an `AllocatedPoint` into an `AllocatedPointNonInfinity` (assumes + /// it is not infinity) + pub fn from_allocated_point(p: &AllocatedPoint) -> Self { + Self { x: p.x.clone(), y: p.y.clone() } + } + + /// Returns an `AllocatedPoint` from an `AllocatedPointNonInfinity` + pub fn to_allocated_point(&self, is_infinity: &AllocatedNum) -> AllocatedPoint { + AllocatedPoint { + x: self.x.clone(), + y: self.y.clone(), + is_infinity: is_infinity.clone(), + } + } + + /// Returns coordinates associated with the point. + #[allow(unused)] + pub const fn get_coordinates(&self) -> (&AllocatedNum, &AllocatedNum) { + (&self.x, &self.y) + } + + /// Add two points assuming self != +/- other + pub fn add_incomplete(&self, mut cs: CS, other: &Self) -> Result + where CS: ConstraintSystem { + // allocate a free variable that an honest prover sets to lambda = + // (y2-y1)/(x2-x1) + let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { + if *other.x.get_value().get()? == *self.x.get_value().get()? { + Ok(G::Base::ONE) + } else { + Ok( + (*other.y.get_value().get()? - *self.y.get_value().get()?) + * (*other.x.get_value().get()? - *self.x.get_value().get()?).invert().unwrap(), + ) + } + })?; + cs.enforce( + || "Check that lambda is computed correctly", + |lc| lc + lambda.get_variable(), + |lc| lc + other.x.get_variable() - self.x.get_variable(), + |lc| lc + other.y.get_variable() - self.y.get_variable(), + ); + + //************************************************************************/ + // x = lambda * lambda - self.x - other.x; + //************************************************************************/ + let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { + Ok( + *lambda.get_value().get()? * lambda.get_value().get()? + - *self.x.get_value().get()? + - *other.x.get_value().get()?, + ) + })?; + cs.enforce( + || "check that x is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + lambda.get_variable(), + |lc| lc + x.get_variable() + self.x.get_variable() + other.x.get_variable(), + ); + + //************************************************************************/ + // y = lambda * (self.x - x) - self.y; + //************************************************************************/ + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok( + *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) + - *self.y.get_value().get()?, + ) + })?; + + cs.enforce( + || "Check that y is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + self.x.get_variable() - x.get_variable(), + |lc| lc + y.get_variable() + self.y.get_variable(), + ); + + Ok(Self { x, y }) + } + + /// doubles the point; since this is called with a point not at infinity, it + /// is guaranteed to be not infinity + pub fn double_incomplete>( + &self, + mut cs: CS, + ) -> Result { + // lambda = (3 x^2 + a) / 2 * y + + let x_sq = self.x.square(cs.namespace(|| "x_sq"))?; + + let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { + let n = G::Base::from(3) * x_sq.get_value().get()? + G::group_params().0; + let d = G::Base::from(2) * *self.y.get_value().get()?; + if d == G::Base::ZERO { + Ok(G::Base::ONE) + } else { + Ok(n * d.invert().unwrap()) + } + })?; + cs.enforce( + || "Check that lambda is computed correctly", + |lc| lc + lambda.get_variable(), + |lc| lc + (G::Base::from(2), self.y.get_variable()), + |lc| lc + (G::Base::from(3), x_sq.get_variable()) + (G::group_params().0, CS::one()), + ); + + let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { + Ok( + *lambda.get_value().get()? * *lambda.get_value().get()? + - *self.x.get_value().get()? + - *self.x.get_value().get()?, + ) + })?; + + cs.enforce( + || "check that x is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + lambda.get_variable(), + |lc| lc + x.get_variable() + (G::Base::from(2), self.x.get_variable()), + ); + + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok( + *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) + - *self.y.get_value().get()?, + ) + })?; + + cs.enforce( + || "Check that y is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + self.x.get_variable() - x.get_variable(), + |lc| lc + y.get_variable() + self.y.get_variable(), + ); + + Ok(Self { x, y }) + } + + /// If condition outputs a otherwise outputs b + pub fn conditionally_select>( + mut cs: CS, + a: &Self, + b: &Self, + condition: &Boolean, + ) -> Result { + let x = conditionally_select(cs.namespace(|| "select x"), &a.x, &b.x, condition)?; + let y = conditionally_select(cs.namespace(|| "select y"), &a.y, &b.y, condition)?; + + Ok(Self { x, y }) + } +} + +#[cfg(test)] +mod tests { + use expect_test::{expect, Expect}; + use ff::{Field, PrimeFieldBits}; + use group::Curve; + use halo2curves::CurveAffine; + use rand::rngs::OsRng; + + use super::*; + use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + solver::SatisfyingAssignment, + test_shape_cs::TestShapeCS, + }, + provider::{ + bn256_grumpkin::{bn256, grumpkin}, + Bn256EngineIPA, Bn256EngineKZG, GrumpkinEngine, + }, + traits::{snark::default_ck_hint, Engine}, + }; + + #[derive(Debug, Clone)] + pub struct Point { + x: G::Base, + y: G::Base, + is_infinity: bool, + } + + impl Point { + pub fn new(x: G::Base, y: G::Base, is_infinity: bool) -> Self { Self { x, y, is_infinity } } + + pub fn random_vartime() -> Self { + loop { + let x = G::Base::random(&mut OsRng); + let y = (x.square() * x + G::group_params().1).sqrt(); + if y.is_some().unwrap_u8() == 1 { + return Self { x, y: y.unwrap(), is_infinity: false }; + } + } + } + + /// Add any two points + pub fn add(&self, other: &Self) -> Self { + if self.x == other.x { + // If self == other then call double + if self.y == other.y { + self.double() + } else { + // if self.x == other.x and self.y != other.y then return infinity + Self { x: G::Base::ZERO, y: G::Base::ZERO, is_infinity: true } + } + } else { + self.add_internal(other) + } + } + + /// Add two different points + pub fn add_internal(&self, other: &Self) -> Self { + if self.is_infinity { + return other.clone(); + } + + if other.is_infinity { + return self.clone(); + } + + let lambda = (other.y - self.y) * (other.x - self.x).invert().unwrap(); + let x = lambda * lambda - self.x - other.x; + let y = lambda * (self.x - x) - self.y; + Self { x, y, is_infinity: false } + } + + pub fn double(&self) -> Self { + if self.is_infinity { + return Self { x: G::Base::ZERO, y: G::Base::ZERO, is_infinity: true }; + } + + let lambda = G::Base::from(3) + * self.x + * self.x + * ((G::Base::ONE + G::Base::ONE) * self.y).invert().unwrap(); + let x = lambda * lambda - self.x - self.x; + let y = lambda * (self.x - x) - self.y; + Self { x, y, is_infinity: false } + } + + pub fn scalar_mul(&self, scalar: &G::Scalar) -> Self { + let mut res = + Self { x: G::Base::ZERO, y: G::Base::ZERO, is_infinity: true }; + + let bits = scalar.to_le_bits(); + for i in (0..bits.len()).rev() { + res = res.double(); + if bits[i] { + res = self.add(&res); + } + } + res + } + } + + // Allocate a random point. Only used for testing + pub fn alloc_random_point>( + mut cs: CS, + ) -> Result, SynthesisError> { + // get a random point + let p = Point::::random_vartime(); + AllocatedPoint::alloc(cs.namespace(|| "alloc p"), Some((p.x, p.y, p.is_infinity))) + } + + /// Make the point io + pub fn inputize_allocated_point>( + p: &AllocatedPoint, + mut cs: CS, + ) { + let _ = p.x.inputize(cs.namespace(|| "Input point.x")); + let _ = p.y.inputize(cs.namespace(|| "Input point.y")); + let _ = p.is_infinity.inputize(cs.namespace(|| "Input point.is_infinity")); + } + + #[test] + fn test_ecc_ops() { + test_ecc_ops_with::::GE>(); + test_ecc_ops_with::::GE>(); + } + + fn test_ecc_ops_with() + where + G: Group, + C: CurveAffine, { + // perform some curve arithmetic + let a = Point::::random_vartime(); + let b = Point::::random_vartime(); + let c = a.add(&b); + let d = a.double(); + let s = G::Scalar::random(&mut OsRng); + let e = a.scalar_mul(&s); + + // perform the same computation by translating to curve types + let a_curve = C::from_xy( + C::Base::from_repr(a.x.to_repr()).unwrap(), + C::Base::from_repr(a.y.to_repr()).unwrap(), + ) + .unwrap(); + let b_curve = C::from_xy( + C::Base::from_repr(b.x.to_repr()).unwrap(), + C::Base::from_repr(b.y.to_repr()).unwrap(), + ) + .unwrap(); + let c_curve = (a_curve + b_curve).to_affine(); + let d_curve = (a_curve + a_curve).to_affine(); + let e_curve = a_curve.mul(C::Scalar::from_repr(s.to_repr()).unwrap()).to_affine(); + + // transform c, d, and e into curve types + let c_curve_2 = C::from_xy( + C::Base::from_repr(c.x.to_repr()).unwrap(), + C::Base::from_repr(c.y.to_repr()).unwrap(), + ) + .unwrap(); + let d_curve_2 = C::from_xy( + C::Base::from_repr(d.x.to_repr()).unwrap(), + C::Base::from_repr(d.y.to_repr()).unwrap(), + ) + .unwrap(); + let e_curve_2 = C::from_xy( + C::Base::from_repr(e.x.to_repr()).unwrap(), + C::Base::from_repr(e.y.to_repr()).unwrap(), + ) + .unwrap(); + + // check that we have the same outputs + assert_eq!(c_curve, c_curve_2); + assert_eq!(d_curve, d_curve_2); + assert_eq!(e_curve, e_curve_2); + } + + fn synthesize_smul(mut cs: CS) -> (AllocatedPoint, AllocatedPoint, G::Scalar) + where + G: Group, + CS: ConstraintSystem, { + let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); + inputize_allocated_point(&a, cs.namespace(|| "inputize a")); + + let s = G::Scalar::random(&mut OsRng); + // Allocate bits for s + let bits: Vec = s + .to_le_bits() + .into_iter() + .enumerate() + .map(|(i, bit)| AllocatedBit::alloc(cs.namespace(|| format!("bit {i}")), Some(bit))) + .collect::, SynthesisError>>() + .unwrap(); + let e = a.scalar_mul(cs.namespace(|| "Scalar Mul"), &bits).unwrap(); + inputize_allocated_point(&e, cs.namespace(|| "inputize e")); + (a, e, s) + } + + #[test] + fn test_ecc_circuit_ops() { + test_ecc_circuit_ops_with::(&expect!["2738"], &expect!["2724"]); + test_ecc_circuit_ops_with::(&expect!["2738"], &expect!["2724"]); + } + + fn test_ecc_circuit_ops_with(expected_constraints: &Expect, expected_variables: &Expect) + where + E1: Engine::Scalar>, + E2: Engine::Scalar>, { + // First create the shape + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_smul::(cs.namespace(|| "synthesize")); + expected_constraints.assert_eq(&cs.num_constraints().to_string()); + expected_variables.assert_eq(&cs.num_aux().to_string()); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + // Then the satisfying assignment + let mut cs = SatisfyingAssignment::::new(); + let (a, e, s) = synthesize_smul::(cs.namespace(|| "synthesize")); + let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + + let a_p: Point = Point::new( + a.x.get_value().unwrap(), + a.y.get_value().unwrap(), + a.is_infinity.get_value().unwrap() == ::Base::ONE, + ); + let e_p: Point = Point::new( + e.x.get_value().unwrap(), + e.y.get_value().unwrap(), + e.is_infinity.get_value().unwrap() == ::Base::ONE, + ); + let e_new = a_p.scalar_mul(&s); + assert!(e_p.x == e_new.x && e_p.y == e_new.y); + // Make sure that this is satisfiable + shape.is_sat(&ck, &inst, &witness).unwrap(); + } + + fn synthesize_add_equal(mut cs: CS) -> (AllocatedPoint, AllocatedPoint) + where + G: Group, + CS: ConstraintSystem, { + let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); + inputize_allocated_point(&a, cs.namespace(|| "inputize a")); + let e = a.add(cs.namespace(|| "add a to a"), &a).unwrap(); + inputize_allocated_point(&e, cs.namespace(|| "inputize e")); + (a, e) + } + + #[test] + fn test_ecc_circuit_add_equal() { + test_ecc_circuit_add_equal_with::(); + test_ecc_circuit_add_equal_with::(); + } + + fn test_ecc_circuit_add_equal_with() + where + E1: Engine::Scalar>, + E2: Engine::Scalar>, { + // First create the shape + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); + println!("Number of constraints: {}", cs.num_constraints()); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + // Then the satisfying assignment + let mut cs = SatisfyingAssignment::::new(); + let (a, e) = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); + let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + let a_p: Point = Point::new( + a.x.get_value().unwrap(), + a.y.get_value().unwrap(), + a.is_infinity.get_value().unwrap() == ::Base::ONE, + ); + let e_p: Point = Point::new( + e.x.get_value().unwrap(), + e.y.get_value().unwrap(), + e.is_infinity.get_value().unwrap() == ::Base::ONE, + ); + let e_new = a_p.add(&a_p); + assert!(e_p.x == e_new.x && e_p.y == e_new.y); + // Make sure that it is satisfiable + shape.is_sat(&ck, &inst, &witness).unwrap(); + } + + fn synthesize_add_negation(mut cs: CS) -> AllocatedPoint + where + G: Group, + CS: ConstraintSystem, { + let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); + inputize_allocated_point(&a, cs.namespace(|| "inputize a")); + let b = &mut a.clone(); + b.y = + AllocatedNum::alloc(cs.namespace(|| "allocate negation of a"), || Ok(G::Base::ZERO)).unwrap(); + inputize_allocated_point(b, cs.namespace(|| "inputize b")); + let e = a.add(cs.namespace(|| "add a to b"), b).unwrap(); + e + } + + #[test] + fn test_ecc_circuit_add_negation() { + test_ecc_circuit_add_negation_with::(&expect!["39"], &expect![ + "34" + ]); + test_ecc_circuit_add_negation_with::(&expect!["39"], &expect![ + "34" + ]); + } + + fn test_ecc_circuit_add_negation_with( + expected_constraints: &Expect, + expected_variables: &Expect, + ) where + E1: Engine::Scalar>, + E2: Engine::Scalar>, + { + // First create the shape + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_add_negation::(cs.namespace(|| "synthesize add equal")); + expected_constraints.assert_eq(&cs.num_constraints().to_string()); + expected_variables.assert_eq(&cs.num_aux().to_string()); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + // Then the satisfying assignment + let mut cs = SatisfyingAssignment::::new(); + let e = synthesize_add_negation::(cs.namespace(|| "synthesize add negation")); + let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + let e_p: Point = Point::new( + e.x.get_value().unwrap(), + e.y.get_value().unwrap(), + e.is_infinity.get_value().unwrap() == ::Base::ONE, + ); + assert!(e_p.is_infinity); + // Make sure that it is satisfiable + shape.is_sat(&ck, &inst, &witness).unwrap(); + } +} diff --git a/prover/src/gadgets/mod.rs b/prover/src/gadgets/mod.rs new file mode 100644 index 0000000..4345bbd --- /dev/null +++ b/prover/src/gadgets/mod.rs @@ -0,0 +1,24 @@ +//! This module implements various gadgets necessary for Nova and applications +//! built with Nova. +mod ecc; +pub(crate) use ecc::AllocatedPoint; + +mod nonnative; +pub(crate) use nonnative::{ + bignat::{nat_to_limbs, BigNat}, + util::{f_to_nat, Num}, +}; + +mod r1cs; +pub(crate) use r1cs::{ + conditionally_select_alloc_relaxed_r1cs, + conditionally_select_vec_allocated_relaxed_r1cs_instance, AllocatedR1CSInstance, + AllocatedRelaxedR1CSInstance, +}; + +mod utils; +#[cfg(test)] pub(crate) use utils::alloc_one; +pub(crate) use utils::{ + alloc_bignat_constant, alloc_num_equals, alloc_scalar_as_base, alloc_zero, + conditionally_select_allocated_bit, conditionally_select_bignat, le_bits_to_num, scalar_as_base, +}; diff --git a/prover/src/gadgets/nonnative/bignat.rs b/prover/src/gadgets/nonnative/bignat.rs new file mode 100644 index 0000000..2561777 --- /dev/null +++ b/prover/src/gadgets/nonnative/bignat.rs @@ -0,0 +1,849 @@ +use std::{ + borrow::Borrow, + cmp::{max, min}, + convert::From, +}; + +use bellpepper_core::{ConstraintSystem, LinearCombination, SynthesisError}; +use ff::PrimeField; +use itertools::Itertools as _; +use num_bigint::BigInt; +use num_traits::cast::ToPrimitive; + +use super::{ + util::{f_to_nat, nat_to_f, Bitvector, Num}, + OptionExt, +}; + +/// Compute the natural number represented by an array of limbs. +/// The limbs are assumed to be based the `limb_width` power of 2. +pub fn limbs_to_nat, I: DoubleEndedIterator>( + limbs: I, + limb_width: usize, +) -> BigInt { + limbs.rev().fold(BigInt::from(0), |mut acc, limb| { + acc <<= limb_width as u32; + acc += f_to_nat(limb.borrow()); + acc + }) +} + +fn int_with_n_ones(n: usize) -> BigInt { + let mut m = BigInt::from(1); + m <<= n as u32; + m -= 1; + m +} + +/// Compute the limbs encoding a natural number. +/// The limbs are assumed to be based the `limb_width` power of 2. +pub fn nat_to_limbs( + nat: &BigInt, + limb_width: usize, + n_limbs: usize, +) -> Result, SynthesisError> { + let mask = int_with_n_ones(limb_width); + let mut nat = nat.clone(); + if nat.bits() as usize <= n_limbs * limb_width { + Ok( + (0..n_limbs) + .map(|_| { + let r = &nat & &mask; + nat >>= limb_width as u32; + nat_to_f(&r).unwrap() + }) + .collect(), + ) + } else { + eprintln!("nat {nat} does not fit in {n_limbs} limbs of width {limb_width}"); + Err(SynthesisError::Unsatisfiable) + } +} + +#[derive(Clone, PartialEq, Eq)] +pub struct BigNatParams { + pub min_bits: usize, + pub max_word: BigInt, + pub limb_width: usize, + pub n_limbs: usize, +} + +impl BigNatParams { + pub fn new(limb_width: usize, n_limbs: usize) -> Self { + let mut max_word = BigInt::from(1) << limb_width as u32; + max_word -= 1; + Self { max_word, n_limbs, limb_width, min_bits: 0 } + } +} + +/// A representation of a large natural number (a member of {0, 1, 2, ... }) +#[derive(Clone)] +pub struct BigNat { + /// The linear combinations which constrain the value of each limb of the + /// number + pub limbs: Vec>, + /// The witness values for each limb (filled at witness-time) + pub limb_values: Option>, + /// The value of the whole number (filled at witness-time) + pub value: Option, + /// Parameters + pub params: BigNatParams, +} + +impl PartialEq for BigNat { + fn eq(&self, other: &Self) -> bool { self.value == other.value && self.params == other.params } +} +impl Eq for BigNat {} + +impl From> for Polynomial { + fn from(other: BigNat) -> Self { + Self { coefficients: other.limbs, values: other.limb_values } + } +} + +impl BigNat { + /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width + /// `limb_width` each. If `max_word` is missing, then it is assumed to + /// be `(2 << limb_width) - 1`. The value is provided by a closure + /// returning limb values. + pub fn alloc_from_limbs( + mut cs: CS, + f: F, + max_word: Option, + limb_width: usize, + n_limbs: usize, + ) -> Result + where + CS: ConstraintSystem, + F: FnOnce() -> Result, SynthesisError>, + { + let values_cell = f(); + let mut value = None; + let mut limb_values = None; + let limbs = (0..n_limbs) + .map(|limb_i| { + cs.alloc( + || format!("limb {limb_i}"), + || match values_cell { + Ok(ref vs) => { + if vs.len() != n_limbs { + eprintln!("Values do not match stated limb count"); + return Err(SynthesisError::Unsatisfiable); + } + if value.is_none() { + value = Some(limbs_to_nat::(vs.iter(), limb_width)); + } + if limb_values.is_none() { + limb_values = Some(vs.clone()); + } + Ok(vs[limb_i]) + }, + // Hack b/c SynthesisError and io::Error don't implement Clone + Err(ref e) => Err(SynthesisError::from(std::io::Error::new( + std::io::ErrorKind::Other, + format!("{e}"), + ))), + }, + ) + .map(|v| LinearCombination::zero() + v) + }) + .collect::, _>>()?; + Ok(Self { + value, + limb_values, + limbs, + params: BigNatParams { + min_bits: 0, + n_limbs, + max_word: max_word.unwrap_or_else(|| int_with_n_ones(limb_width)), + limb_width, + }, + }) + } + + /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width + /// `limb_width` each. The `max_word` is guaranteed to be `(2 << + /// limb_width) - 1`. The value is provided by a closure returning a + /// natural number. + pub fn alloc_from_nat( + mut cs: CS, + f: F, + limb_width: usize, + n_limbs: usize, + ) -> Result + where + CS: ConstraintSystem, + F: FnOnce() -> Result, + { + let all_values_cell = + f().and_then(|v| Ok((nat_to_limbs::(&v, limb_width, n_limbs)?, v))); + let mut value = None; + let mut limb_values = Vec::new(); + let limbs = (0..n_limbs) + .map(|limb_i| { + cs.alloc( + || format!("limb {limb_i}"), + || match all_values_cell { + Ok((ref vs, ref v)) => { + if value.is_none() { + value = Some(v.clone()); + } + limb_values.push(vs[limb_i]); + Ok(vs[limb_i]) + }, + // Hack b/c SynthesisError and io::Error don't implement Clone + Err(ref e) => Err(SynthesisError::from(std::io::Error::new( + std::io::ErrorKind::Other, + format!("{e}"), + ))), + }, + ) + .map(|v| LinearCombination::zero() + v) + }) + .collect::, _>>()?; + Ok(Self { + value, + limb_values: (!limb_values.is_empty()).then_some(limb_values), + limbs, + params: BigNatParams::new(limb_width, n_limbs), + }) + } + + /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width + /// `limb_width` each. The `max_word` is guaranteed to be `(2 << + /// limb_width) - 1`. The value is provided by an allocated number + pub fn from_num>( + mut cs: CS, + n: &Num, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let bignat = Self::alloc_from_nat( + cs.namespace(|| "bignat"), + || Ok({ n.value.as_ref().map(|n| f_to_nat(n)).ok_or(SynthesisError::AssignmentMissing)? }), + limb_width, + n_limbs, + )?; + + // check if bignat equals n + // (1) decompose `bignat` into a bitvector `bv` + let bv = bignat.decompose(cs.namespace(|| "bv"))?; + // (2) recompose bits and check if it equals n + n.is_equal(cs.namespace(|| "n"), &bv); + + Ok(bignat) + } + + pub fn as_limbs(&self) -> Vec> { + let mut limbs = Vec::new(); + for (i, lc) in self.limbs.iter().enumerate() { + limbs.push(Num::new(self.limb_values.as_ref().map(|vs| vs[i]), lc.clone())); + } + limbs + } + + pub fn assert_well_formed>( + &self, + mut cs: CS, + ) -> Result<(), SynthesisError> { + // swap the option and iterator + let limb_values_split = + (0..self.limbs.len()).map(|i| self.limb_values.as_ref().map(|vs| vs[i])); + for (i, (limb, limb_value)) in self.limbs.iter().zip_eq(limb_values_split).enumerate() { + Num::new(limb_value, limb.clone()) + .fits_in_bits(cs.namespace(|| format!("{i}")), self.params.limb_width)?; + } + Ok(()) + } + + /// Break `self` up into a bit-vector. + pub fn decompose>( + &self, + mut cs: CS, + ) -> Result, SynthesisError> { + let limb_values_split = + (0..self.limbs.len()).map(|i| self.limb_values.as_ref().map(|vs| vs[i])); + let bitvectors: Vec> = self + .limbs + .iter() + .zip_eq(limb_values_split) + .enumerate() + .map(|(i, (limb, limb_value))| { + Num::new(limb_value, limb.clone()) + .decompose(cs.namespace(|| format!("subdecmop {i}")), self.params.limb_width) + }) + .collect::, _>>()?; + let mut bits = Vec::new(); + let mut values = Vec::new(); + let mut allocations = Vec::new(); + for bv in bitvectors { + bits.extend(bv.bits); + if let Some(vs) = bv.values { + values.extend(vs) + }; + allocations.extend(bv.allocations); + } + let values = (!values.is_empty()).then_some(values); + Ok(Bitvector { bits, values, allocations }) + } + + pub fn enforce_limb_width_agreement( + &self, + other: &Self, + location: &str, + ) -> Result { + if self.params.limb_width == other.params.limb_width { + Ok(self.params.limb_width) + } else { + eprintln!( + "Limb widths {}, {}, do not agree at {}", + self.params.limb_width, other.params.limb_width, location + ); + Err(SynthesisError::Unsatisfiable) + } + } + + pub fn from_poly(poly: Polynomial, limb_width: usize, max_word: BigInt) -> Self { + Self { + params: BigNatParams { + min_bits: 0, + max_word, + n_limbs: poly.coefficients.len(), + limb_width, + }, + limbs: poly.coefficients, + value: poly + .values + .as_ref() + .map(|limb_values| limbs_to_nat::(limb_values.iter(), limb_width)), + limb_values: poly.values, + } + } + + /// Constrain `self` to be equal to `other`, after carrying both. + pub fn equal_when_carried>( + &self, + mut cs: CS, + other: &Self, + ) -> Result<(), SynthesisError> { + self.enforce_limb_width_agreement(other, "equal_when_carried")?; + + // We'll propagate carries over the first `n` limbs. + let n = min(self.limbs.len(), other.limbs.len()); + let target_base = BigInt::from(1u8) << self.params.limb_width as u32; + let mut accumulated_extra = BigInt::from(0usize); + let max_word = max(&self.params.max_word, &other.params.max_word); + let carry_bits = (((max_word.to_f64().unwrap() * 2.0).log2() - self.params.limb_width as f64) + .ceil() + + 0.1) as usize; + let mut carry_in = Num::new(Some(Scalar::ZERO), LinearCombination::zero()); + + for i in 0..n { + let carry = Num::alloc(cs.namespace(|| format!("carry value {i}")), || { + Ok( + nat_to_f( + &((f_to_nat(&self.limb_values.grab()?[i]) + + f_to_nat(&carry_in.value.unwrap()) + + max_word + - f_to_nat(&other.limb_values.grab()?[i])) + / &target_base), + ) + .unwrap(), + ) + })?; + accumulated_extra += max_word; + + cs.enforce( + || format!("carry {i}"), + |lc| lc, + |lc| lc, + |lc| { + lc + &carry_in.num + &self.limbs[i] - &other.limbs[i] + + (nat_to_f(max_word).unwrap(), CS::one()) + - (nat_to_f(&target_base).unwrap(), &carry.num) + - (nat_to_f(&(&accumulated_extra % &target_base)).unwrap(), CS::one()) + }, + ); + + accumulated_extra /= &target_base; + + if i < n - 1 { + carry.fits_in_bits(cs.namespace(|| format!("carry {i} decomp")), carry_bits)?; + } else { + cs.enforce( + || format!("carry {i} is out"), + |lc| lc, + |lc| lc, + |lc| lc + &carry.num - (nat_to_f(&accumulated_extra).unwrap(), CS::one()), + ); + } + carry_in = carry; + } + + for (i, zero_limb) in self.limbs.iter().enumerate().skip(n) { + cs.enforce(|| format!("zero self {i}"), |lc| lc, |lc| lc, |lc| lc + zero_limb); + } + for (i, zero_limb) in other.limbs.iter().enumerate().skip(n) { + cs.enforce(|| format!("zero other {i}"), |lc| lc, |lc| lc, |lc| lc + zero_limb); + } + Ok(()) + } + + /// Constrain `self` to be equal to `other`, after carrying both. + /// Uses regrouping internally to take full advantage of the field size and + /// reduce the amount of carrying. + pub fn equal_when_carried_regroup>( + &self, + mut cs: CS, + other: &Self, + ) -> Result<(), SynthesisError> { + self.enforce_limb_width_agreement(other, "equal_when_carried_regroup")?; + let max_word = max(&self.params.max_word, &other.params.max_word); + let carry_bits = (((max_word.to_f64().unwrap() * 2.0).log2() - self.params.limb_width as f64) + .ceil() + + 0.1) as usize; + let limbs_per_group = max((Scalar::CAPACITY as usize - carry_bits) / self.params.limb_width, 1); + + let self_grouped = self.group_limbs(limbs_per_group); + let other_grouped = other.group_limbs(limbs_per_group); + self_grouped.equal_when_carried(cs.namespace(|| "grouped"), &other_grouped) + } + + pub fn add(&self, other: &Self) -> Result { + self.enforce_limb_width_agreement(other, "add")?; + let n_limbs = max(self.params.n_limbs, other.params.n_limbs); + let max_word = &self.params.max_word + &other.params.max_word; + let limbs: Vec> = (0..n_limbs) + .map(|i| match (self.limbs.get(i), other.limbs.get(i)) { + (Some(a), Some(b)) => a.clone() + b, + (Some(a), None) => a.clone(), + (None, Some(b)) => b.clone(), + (None, None) => unreachable!(), + }) + .collect(); + let limb_values: Option> = self.limb_values.as_ref().and_then(|x| { + other.limb_values.as_ref().map(|y| { + (0..n_limbs) + .map(|i| match (x.get(i), y.get(i)) { + (Some(a), Some(b)) => { + let mut t = *a; + t.add_assign(b); + t + }, + (Some(a), None) | (None, Some(a)) => *a, + (None, None) => unreachable!(), + }) + .collect() + }) + }); + let value = self.value.as_ref().and_then(|x| other.value.as_ref().map(|y| x + y)); + Ok(Self { + limb_values, + value, + limbs, + params: BigNatParams { + min_bits: max(self.params.min_bits, other.params.min_bits), + n_limbs, + max_word, + limb_width: self.params.limb_width, + }, + }) + } + + /// Compute a `BigNat` constrained to be equal to `self * other % modulus`. + pub fn mult_mod>( + &self, + mut cs: CS, + other: &Self, + modulus: &Self, + ) -> Result<(Self, Self), SynthesisError> { + self.enforce_limb_width_agreement(other, "mult_mod")?; + let limb_width = self.params.limb_width; + let quotient_bits = (self.n_bits() + other.n_bits()).saturating_sub(modulus.params.min_bits); + let quotient_limbs = quotient_bits.saturating_sub(1) / limb_width + 1; + let quotient = Self::alloc_from_nat( + cs.namespace(|| "quotient"), + || { + Ok({ + let mut x = self.value.grab()?.clone(); + x *= other.value.grab()?; + x /= modulus.value.grab()?; + x + }) + }, + self.params.limb_width, + quotient_limbs, + )?; + quotient.assert_well_formed(cs.namespace(|| "quotient rangecheck"))?; + let remainder = Self::alloc_from_nat( + cs.namespace(|| "remainder"), + || { + Ok({ + let mut x = self.value.grab()?.clone(); + x *= other.value.grab()?; + x %= modulus.value.grab()?; + x + }) + }, + self.params.limb_width, + modulus.limbs.len(), + )?; + remainder.assert_well_formed(cs.namespace(|| "remainder rangecheck"))?; + let a_poly = Polynomial::from(self.clone()); + let b_poly = Polynomial::from(other.clone()); + let mod_poly = Polynomial::from(modulus.clone()); + let q_poly = Polynomial::from(quotient.clone()); + let r_poly = Polynomial::from(remainder.clone()); + + // a * b + let left = a_poly.alloc_product(cs.namespace(|| "left"), &b_poly)?; + let right_product = q_poly.alloc_product(cs.namespace(|| "right_product"), &mod_poly)?; + // q * m + r + let right = right_product.sum(&r_poly); + + let left_max_word = { + let mut x = BigInt::from(min(self.limbs.len(), other.limbs.len())); + x *= &self.params.max_word; + x *= &other.params.max_word; + x + }; + let right_max_word = { + let mut x = BigInt::from(min(quotient.limbs.len(), modulus.limbs.len())); + x *= "ient.params.max_word; + x *= &modulus.params.max_word; + x += &remainder.params.max_word; + x + }; + + let left_int = Self::from_poly(left, limb_width, left_max_word); + let right_int = Self::from_poly(right, limb_width, right_max_word); + left_int.equal_when_carried_regroup(cs.namespace(|| "carry"), &right_int)?; + Ok((quotient, remainder)) + } + + /// Compute a `BigNat` constrained to be equal to `self * other % modulus`. + pub fn red_mod>( + &self, + mut cs: CS, + modulus: &Self, + ) -> Result { + self.enforce_limb_width_agreement(modulus, "red_mod")?; + let limb_width = self.params.limb_width; + let quotient_bits = self.n_bits().saturating_sub(modulus.params.min_bits); + let quotient_limbs = quotient_bits.saturating_sub(1) / limb_width + 1; + let quotient = Self::alloc_from_nat( + cs.namespace(|| "quotient"), + || Ok(self.value.grab()? / modulus.value.grab()?), + self.params.limb_width, + quotient_limbs, + )?; + quotient.assert_well_formed(cs.namespace(|| "quotient rangecheck"))?; + let remainder = Self::alloc_from_nat( + cs.namespace(|| "remainder"), + || Ok(self.value.grab()? % modulus.value.grab()?), + self.params.limb_width, + modulus.limbs.len(), + )?; + remainder.assert_well_formed(cs.namespace(|| "remainder rangecheck"))?; + let mod_poly = Polynomial::from(modulus.clone()); + let q_poly = Polynomial::from(quotient.clone()); + let r_poly = Polynomial::from(remainder.clone()); + + // q * m + r + let right_product = q_poly.alloc_product(cs.namespace(|| "right_product"), &mod_poly)?; + let right = right_product.sum(&r_poly); + + let right_max_word = { + let mut x = BigInt::from(min(quotient.limbs.len(), modulus.limbs.len())); + x *= "ient.params.max_word; + x *= &modulus.params.max_word; + x += &remainder.params.max_word; + x + }; + + let right_int = Self::from_poly(right, limb_width, right_max_word); + self.equal_when_carried_regroup(cs.namespace(|| "carry"), &right_int)?; + Ok(remainder) + } + + /// Combines limbs into groups. + pub fn group_limbs(&self, limbs_per_group: usize) -> Self { + let n_groups = (self.limbs.len() - 1) / limbs_per_group + 1; + let limb_values = self.limb_values.as_ref().map(|vs| { + let mut values: Vec = vec![Scalar::ZERO; n_groups]; + let mut shift = Scalar::ONE; + let limb_block = (0..self.params.limb_width).fold(Scalar::ONE, |mut l, _| { + l = l.double(); + l + }); + for (i, v) in vs.iter().enumerate() { + if i % limbs_per_group == 0 { + shift = Scalar::ONE; + } + let mut a = shift; + a *= v; + values[i / limbs_per_group].add_assign(&a); + shift.mul_assign(&limb_block); + } + values + }); + let limbs = { + let mut limbs: Vec> = vec![LinearCombination::zero(); n_groups]; + let mut shift = Scalar::ONE; + let limb_block = (0..self.params.limb_width).fold(Scalar::ONE, |mut l, _| { + l = l.double(); + l + }); + for (i, limb) in self.limbs.iter().enumerate() { + if i % limbs_per_group == 0 { + shift = Scalar::ONE; + } + limbs[i / limbs_per_group] = + std::mem::replace(&mut limbs[i / limbs_per_group], LinearCombination::zero()) + + (shift, limb); + shift.mul_assign(&limb_block); + } + limbs + }; + let max_word = (0..limbs_per_group).fold(BigInt::from(0u8), |mut acc, i| { + acc.set_bit((i * self.params.limb_width) as u64, true); + acc + }) * &self.params.max_word; + Self { + params: BigNatParams { + min_bits: self.params.min_bits, + limb_width: self.params.limb_width * limbs_per_group, + n_limbs: limbs.len(), + max_word, + }, + limbs, + limb_values, + value: self.value.clone(), + } + } + + pub fn n_bits(&self) -> usize { + assert!(self.params.n_limbs > 0); + self.params.limb_width * (self.params.n_limbs - 1) + self.params.max_word.bits() as usize + } +} + +pub struct Polynomial { + pub coefficients: Vec>, + pub values: Option>, +} + +impl Polynomial { + pub fn alloc_product>( + &self, + mut cs: CS, + other: &Self, + ) -> Result { + let n_product_coeffs = self.coefficients.len() + other.coefficients.len() - 1; + let values = self.values.as_ref().and_then(|self_vs| { + other.values.as_ref().map(|other_vs| { + let mut values: Vec = + std::iter::repeat_with(|| Scalar::ZERO).take(n_product_coeffs).collect(); + for (self_i, self_v) in self_vs.iter().enumerate() { + for (other_i, other_v) in other_vs.iter().enumerate() { + let mut v = *self_v; + v.mul_assign(other_v); + values[self_i + other_i].add_assign(&v); + } + } + values + }) + }); + let coefficients = (0..n_product_coeffs) + .map(|i| { + Ok(LinearCombination::zero() + cs.alloc(|| format!("prod {i}"), || Ok(values.grab()?[i]))?) + }) + .collect::>, SynthesisError>>()?; + let product = Self { coefficients, values }; + let one = Scalar::ONE; + let mut x = Scalar::ZERO; + for _ in 1..(n_product_coeffs + 1) { + x.add_assign(&one); + cs.enforce( + || format!("pointwise product @ {x:?}"), + |lc| { + let mut i = Scalar::ONE; + self.coefficients.iter().fold(lc, |lc, c| { + let r = lc + (i, c); + i.mul_assign(&x); + r + }) + }, + |lc| { + let mut i = Scalar::ONE; + other.coefficients.iter().fold(lc, |lc, c| { + let r = lc + (i, c); + i.mul_assign(&x); + r + }) + }, + |lc| { + let mut i = Scalar::ONE; + product.coefficients.iter().fold(lc, |lc, c| { + let r = lc + (i, c); + i.mul_assign(&x); + r + }) + }, + ) + } + Ok(product) + } + + pub fn sum(&self, other: &Self) -> Self { + let n_coeffs = max(self.coefficients.len(), other.coefficients.len()); + let values = self.values.as_ref().and_then(|self_vs| { + other.values.as_ref().map(|other_vs| { + (0..n_coeffs) + .map(|i| { + let mut s = Scalar::ZERO; + if i < self_vs.len() { + s.add_assign(&self_vs[i]); + } + if i < other_vs.len() { + s.add_assign(&other_vs[i]); + } + s + }) + .collect() + }) + }); + let coefficients = (0..n_coeffs) + .map(|i| { + let mut lc = LinearCombination::zero(); + if i < self.coefficients.len() { + lc = lc + &self.coefficients[i]; + } + if i < other.coefficients.len() { + lc = lc + &other.coefficients[i]; + } + lc + }) + .collect(); + Self { coefficients, values } + } +} + +#[cfg(test)] +mod tests { + use bellpepper_core::{test_cs::TestConstraintSystem, Circuit}; + #[cfg(not(target_arch = "wasm32"))] + use proptest::prelude::*; + + use super::*; + use crate::provider::bn256_grumpkin::bn256::Scalar; + + pub struct PolynomialMultiplier { + pub a: Vec, + pub b: Vec, + } + + impl Circuit for PolynomialMultiplier { + fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { + let a = Polynomial { + coefficients: self + .a + .iter() + .enumerate() + .map(|(i, x)| { + Ok(LinearCombination::zero() + cs.alloc(|| format!("coeff_a {i}"), || Ok(*x))?) + }) + .collect::>, SynthesisError>>()?, + values: Some(self.a), + }; + let b = Polynomial { + coefficients: self + .b + .iter() + .enumerate() + .map(|(i, x)| { + Ok(LinearCombination::zero() + cs.alloc(|| format!("coeff_b {i}"), || Ok(*x))?) + }) + .collect::>, SynthesisError>>()?, + values: Some(self.b), + }; + let _prod = a.alloc_product(cs.namespace(|| "product"), &b)?; + Ok(()) + } + } + + #[test] + fn test_polynomial_multiplier_circuit() { + let mut cs = TestConstraintSystem::::new(); + + let circuit = PolynomialMultiplier { + a: [1, 1, 1].iter().map(|i| Scalar::from_u128(*i)).collect(), + b: [1, 1].iter().map(|i| Scalar::from_u128(*i)).collect(), + }; + + circuit.synthesize(&mut cs).expect("synthesis failed"); + + if let Some(token) = cs.which_is_unsatisfied() { + eprintln!("Error: {} is unsatisfied", token); + } + } + + #[derive(Debug)] + pub struct BigNatBitDecompInputs { + pub n: BigInt, + } + + pub struct BigNatBitDecompParams { + pub limb_width: usize, + pub n_limbs: usize, + } + + pub struct BigNatBitDecomp { + inputs: Option, + params: BigNatBitDecompParams, + } + + impl Circuit for BigNatBitDecomp { + fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { + let n = BigNat::alloc_from_nat( + cs.namespace(|| "n"), + || Ok(self.inputs.grab()?.n.clone()), + self.params.limb_width, + self.params.n_limbs, + )?; + n.decompose(cs.namespace(|| "decomp"))?; + Ok(()) + } + } + + #[cfg(not(target_arch = "wasm32"))] + proptest! { + #![proptest_config(ProptestConfig { + cases: 10, // this test is costlier as max n gets larger + .. ProptestConfig::default() + })] + #[test] + fn test_big_nat_can_decompose(n in any::(), limb_width in 40u8..200) { + let n = n as usize; + + let n_limbs = if n == 0 { + 1 + } else { + (n - 1) / limb_width as usize + 1 + }; + + let circuit = BigNatBitDecomp { + inputs: Some(BigNatBitDecompInputs { + n: BigInt::from(n), + }), + params: BigNatBitDecompParams { + limb_width: limb_width as usize, + n_limbs, + }, + }; + let mut cs = TestConstraintSystem::::new(); + circuit.synthesize(&mut cs).expect("synthesis failed"); + prop_assert!(cs.is_satisfied()); + } + } +} diff --git a/prover/src/gadgets/nonnative/mod.rs b/prover/src/gadgets/nonnative/mod.rs new file mode 100644 index 0000000..f81b709 --- /dev/null +++ b/prover/src/gadgets/nonnative/mod.rs @@ -0,0 +1,35 @@ +//! This module implements various gadgets necessary for doing non-native +//! arithmetic Code in this module is adapted from [bellman-bignat](https://github.com/alex-ozdemir/bellman-bignat), which is licenced under MIT + +use bellpepper_core::SynthesisError; +use ff::PrimeField; + +trait OptionExt { + fn grab(&self) -> Result<&T, SynthesisError>; +} + +impl OptionExt for Option { + fn grab(&self) -> Result<&T, SynthesisError> { + self.as_ref().ok_or(SynthesisError::AssignmentMissing) + } +} + +trait BitAccess { + fn get_bit(&self, i: usize) -> Option; +} + +impl BitAccess for Scalar { + fn get_bit(&self, i: usize) -> Option { + if i as u32 >= Scalar::NUM_BITS { + return None; + } + + let (byte_pos, bit_pos) = (i / 8, i % 8); + let byte = self.to_repr().as_ref()[byte_pos]; + let bit = byte >> bit_pos & 1; + Some(bit == 1) + } +} + +pub mod bignat; +pub mod util; diff --git a/prover/src/gadgets/nonnative/util.rs b/prover/src/gadgets/nonnative/util.rs new file mode 100644 index 0000000..e5f8d78 --- /dev/null +++ b/prover/src/gadgets/nonnative/util.rs @@ -0,0 +1,254 @@ +use std::{ + convert::From, + io::{self, Write}, +}; + +use bellpepper_core::{ + num::AllocatedNum, ConstraintSystem, LinearCombination, SynthesisError, Variable, +}; +use byteorder::WriteBytesExt; +use ff::PrimeField; +use num_bigint::{BigInt, Sign}; + +use super::{BitAccess, OptionExt}; + +#[derive(Clone)] +/// A representation of a bit +pub struct Bit { + /// The linear combination which constrain the value of the bit + pub bit: LinearCombination, + #[allow(unused)] + /// The value of the bit (filled at witness-time) + pub value: Option, +} + +#[derive(Clone)] +/// A representation of a bit-vector +pub struct Bitvector { + /// The linear combination which constrain the values of the bits + pub bits: Vec>, + /// The value of the bits (filled at witness-time) + pub values: Option>, + /// Allocated bit variables + pub allocations: Vec>, +} + +impl Bit { + /// Allocate a variable in the constraint system which can only be a + /// boolean value. + pub fn alloc>( + mut cs: CS, + value: Option, + ) -> Result { + let var = cs.alloc( + || "boolean", + || { + if *value.grab()? { + Ok(Scalar::ONE) + } else { + Ok(Scalar::ZERO) + } + }, + )?; + + // Constrain: (1 - a) * a = 0 + // This constrains a to be either 0 or 1. + cs.enforce(|| "boolean constraint", |lc| lc + CS::one() - var, |lc| lc + var, |lc| lc); + + Ok(Self { bit: LinearCombination::zero() + var, value }) + } +} + +pub struct Num { + pub num: LinearCombination, + pub value: Option, +} + +impl Num { + pub const fn new(value: Option, num: LinearCombination) -> Self { + Self { value, num } + } + + pub fn alloc(mut cs: CS, value: F) -> Result + where + CS: ConstraintSystem, + F: FnOnce() -> Result, { + let mut new_value = None; + let var = cs.alloc( + || "num", + || { + let tmp = value()?; + + new_value = Some(tmp); + + Ok(tmp) + }, + )?; + + Ok(Self { value: new_value, num: LinearCombination::zero() + var }) + } + + pub fn fits_in_bits>( + &self, + mut cs: CS, + n_bits: usize, + ) -> Result<(), SynthesisError> { + let v = self.value; + + // Allocate all but the first bit. + let bits: Vec = (1..n_bits) + .map(|i| { + cs.alloc( + || format!("bit {i}"), + || { + let r = if *v.grab()?.get_bit(i).grab()? { Scalar::ONE } else { Scalar::ZERO }; + Ok(r) + }, + ) + }) + .collect::>()?; + + for (i, v) in bits.iter().enumerate() { + cs.enforce(|| format!("{i} is bit"), |lc| lc + *v, |lc| lc + CS::one() - *v, |lc| lc) + } + + // Last bit + cs.enforce( + || "last bit", + |mut lc| { + let mut f = Scalar::ONE; + lc = lc + &self.num; + for v in bits.iter() { + f = f.double(); + lc = lc - (f, *v); + } + lc + }, + |mut lc| { + lc = lc + CS::one(); + let mut f = Scalar::ONE; + lc = lc - &self.num; + for v in bits.iter() { + f = f.double(); + lc = lc + (f, *v); + } + lc + }, + |lc| lc, + ); + Ok(()) + } + + /// Checks if the natural number equals an array of bits. + pub fn is_equal>(&self, mut cs: CS, other: &Bitvector) { + let mut f = Scalar::ONE; + let sum = other.allocations.iter().fold(LinearCombination::zero(), |lc, bit| { + let l = lc + (f, &bit.bit); + f = f.double(); + l + }); + let sum_lc = LinearCombination::zero() + &self.num - ∑ + cs.enforce(|| "sum", |lc| lc + &sum_lc, |lc| lc + CS::one(), |lc| lc); + } + + /// Compute the natural number represented by an array of limbs. + /// The limbs are assumed to be based the `limb_width` power of 2. + /// Low-index bits are low-order + pub fn decompose>( + &self, + mut cs: CS, + n_bits: usize, + ) -> Result, SynthesisError> { + let values: Option> = self.value.as_ref().map(|v| { + let num = *v; + (0..n_bits).map(|i| num.get_bit(i).unwrap()).collect() + }); + let allocations: Vec> = (0..n_bits) + .map(|bit_i| { + Bit::alloc(cs.namespace(|| format!("bit{bit_i}")), values.as_ref().map(|vs| vs[bit_i])) + }) + .collect::, _>>()?; + let mut f = Scalar::ONE; + let sum = allocations.iter().fold(LinearCombination::zero(), |lc, bit| { + let l = lc + (f, &bit.bit); + f = f.double(); + l + }); + let sum_lc = LinearCombination::zero() + &self.num - ∑ + cs.enforce(|| "sum", |lc| lc + &sum_lc, |lc| lc + CS::one(), |lc| lc); + let bits: Vec> = + allocations.iter().map(|a| LinearCombination::zero() + &a.bit).collect(); + Ok(Bitvector { allocations, values, bits }) + } + + pub fn as_allocated_num>( + &self, + mut cs: CS, + ) -> Result, SynthesisError> { + let new = AllocatedNum::alloc(cs.namespace(|| "alloc"), || Ok(*self.value.grab()?))?; + cs.enforce(|| "eq", |lc| lc, |lc| lc, |lc| lc + new.get_variable() - &self.num); + Ok(new) + } +} + +impl From> for Num { + fn from(a: AllocatedNum) -> Self { + Self::new(a.get_value(), LinearCombination::zero() + a.get_variable()) + } +} + +fn write_be(f: &F, mut writer: W) -> io::Result<()> { + for digit in f.to_repr().as_ref().iter().rev() { + writer.write_u8(*digit)?; + } + + Ok(()) +} + +/// Convert a field element to a natural number +pub fn f_to_nat(f: &Scalar) -> BigInt { + let mut s = Vec::new(); + write_be(f, &mut s).unwrap(); + BigInt::from_bytes_le(Sign::Plus, f.to_repr().as_ref()) +} + +/// Convert a natural number to a field element. +/// Returns `None` if the number is too big for the field. +pub fn nat_to_f(n: &BigInt) -> Option { + Scalar::from_str_vartime(&format!("{n}")) +} + +#[cfg(test)] +mod tests { + use bitvec::field::BitField as _; + use ff::PrimeFieldBits; + use rand::SeedableRng; + use rand_chacha::ChaCha20Rng; + + // the write_be function above assumes Field::to_repr() outputs a representation + // that's an instance of `AsRef<[u8]>` in lower endian. We test that here, + // as this is not what the I2OSP standard recommends and may change in some + // implementations. + fn test_repr_is_le_with() { + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + for _i in 0..50 { + let f = F::random(&mut rng); + // This is guaranteed to be in LE + let le_bits = f.to_le_bits(); + let leftmost_u64 = le_bits[..64].load_le::(); + + // This is not + let f_repr = f.to_repr(); + let bytes: [u8; 8] = f_repr.as_ref()[..8].try_into().unwrap(); + let u64_from_repr = u64::from_le_bytes(bytes); + + assert_eq!(leftmost_u64, u64_from_repr); + } + } + + #[test] + fn test_repr_is_le() { + test_repr_is_le_with::(); + test_repr_is_le_with::(); + } +} diff --git a/prover/src/gadgets/r1cs.rs b/prover/src/gadgets/r1cs.rs new file mode 100644 index 0000000..c958331 --- /dev/null +++ b/prover/src/gadgets/r1cs.rs @@ -0,0 +1,395 @@ +//! This module implements various gadgets necessary for folding R1CS types. +use bellpepper::gadgets::{ + boolean::Boolean, boolean_utils::conditionally_select, num::AllocatedNum, Assignment, +}; +use bellpepper_core::{ConstraintSystem, SynthesisError}; +use ff::Field; +use itertools::Itertools as _; + +use super::nonnative::{ + bignat::BigNat, + util::{f_to_nat, Num}, +}; +use crate::{ + constants::{NUM_CHALLENGE_BITS, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD}, + gadgets::{ + ecc::AllocatedPoint, + utils::{ + alloc_bignat_constant, alloc_one, alloc_scalar_as_base, conditionally_select_bignat, + le_bits_to_num, + }, + }, + r1cs::{R1CSInstance, RelaxedR1CSInstance}, + traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, +}; + +/// An Allocated R1CS Instance +#[derive(Clone)] +pub struct AllocatedR1CSInstance { + pub(crate) W: AllocatedPoint, + pub(crate) X: [AllocatedNum; N], +} + +impl AllocatedR1CSInstance { + /// Takes the r1cs instance and creates a new allocated r1cs instance + pub fn alloc::Base>>( + mut cs: CS, + u: Option<&R1CSInstance>, + ) -> Result { + let W = + AllocatedPoint::alloc(cs.namespace(|| "allocate W"), u.map(|u| u.comm_W.to_coordinates()))?; + W.check_on_curve(cs.namespace(|| "check W on curve"))?; + + let X: [AllocatedNum; N] = (0..N) + .map(|idx| { + alloc_scalar_as_base::( + cs.namespace(|| format!("allocating X[{idx}]")), + u.map(|u| u.X[idx]), + ) + }) + .collect::, _>>()? + .try_into() + .map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + Ok(Self { W, X }) + } + + /// Absorb the provided instance in the RO + pub fn absorb_in_ro(&self, ro: &mut E::ROCircuit) { + ro.absorb(&self.W.x); + ro.absorb(&self.W.y); + ro.absorb(&self.W.is_infinity); + self.X.iter().for_each(|x| ro.absorb(x)); + } +} + +/// An Allocated Relaxed R1CS Instance +#[derive(Clone)] +pub struct AllocatedRelaxedR1CSInstance { + pub(crate) W: AllocatedPoint, + pub(crate) E: AllocatedPoint, + pub(crate) u: AllocatedNum, + pub(crate) X: [BigNat; N], +} + +impl AllocatedRelaxedR1CSInstance { + /// Allocates the given `RelaxedR1CSInstance` as a witness of the circuit + pub fn alloc::Base>>( + mut cs: CS, + inst: Option<&RelaxedR1CSInstance>, + limb_width: usize, + n_limbs: usize, + ) -> Result { + // We do not need to check that W or E are well-formed (e.g., on the curve) as + // we do a hash check in the Nova augmented circuit, which ensures that + // the relaxed instance came from a prior iteration of Nova. + let W = AllocatedPoint::alloc( + cs.namespace(|| "allocate W"), + inst.map(|inst| inst.comm_W.to_coordinates()), + )?; + + let E = AllocatedPoint::alloc( + cs.namespace(|| "allocate E"), + inst.map(|inst| inst.comm_E.to_coordinates()), + )?; + + // u << |E::Base| despite the fact that u is a scalar. + // So we parse all of its bytes as a E::Base element + let u = alloc_scalar_as_base::(cs.namespace(|| "allocate u"), inst.map(|inst| inst.u))?; + + // Allocate X. If the input instance is None then allocate components as zero. + let X = (0..N) + .map(|idx| { + BigNat::alloc_from_nat( + cs.namespace(|| format!("allocate X[{idx}]")), + || Ok(f_to_nat(&inst.map_or(E::Scalar::ZERO, |inst| inst.X[idx]))), + limb_width, + n_limbs, + ) + }) + .collect::, _>>()? + .try_into() + .map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + Ok(Self { W, E, u, X }) + } + + /// Allocates the hardcoded default `RelaxedR1CSInstance` in the circuit. + /// W = E = 0, u = 0, X0 = X1 = 0 + pub fn default::Base>>( + mut cs: CS, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let W = AllocatedPoint::default(cs.namespace(|| "allocate W")); + let E = W.clone(); + + let u = W.x.clone(); // In the default case, W.x = u = 0 + + // X is allocated and in the honest prover case set to zero + // If the prover is malicious, it can set to arbitrary values, but the resulting + // relaxed R1CS instance with the the checked default values of W, E, and u must + // still be satisfying + + let X = (0..N) + .map(|idx| { + BigNat::alloc_from_nat( + cs.namespace(|| format!("allocate X_default[{idx}]")), + || Ok(f_to_nat(&E::Scalar::ZERO)), + limb_width, + n_limbs, + ) + }) + .collect::, _>>()? + .try_into() + .map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + Ok(Self { W, E, u, X }) + } + + /// Allocates the R1CS Instance as a `RelaxedR1CSInstance` in the circuit. + /// E = 0, u = 1 + pub fn from_r1cs_instance::Base>>( + mut cs: CS, + inst: AllocatedR1CSInstance, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let E = AllocatedPoint::default(cs.namespace(|| "allocate default E")); + + let u = alloc_one(cs.namespace(|| "one")); + + let X = inst + .X + .into_iter() + .enumerate() + .map(|(idx, x)| { + BigNat::from_num( + cs.namespace(|| format!("allocate X[{idx}] from relaxed r1cs")), + &Num::from(x), + limb_width, + n_limbs, + ) + }) + .collect::, _>>()? + .try_into() + .map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + Ok(Self { W: inst.W, E, u, X }) + } + + /// Absorb the provided instance in the RO + pub fn absorb_in_ro::Base>>( + &self, + mut cs: CS, + ro: &mut E::ROCircuit, + ) -> Result<(), SynthesisError> { + ro.absorb(&self.W.x); + ro.absorb(&self.W.y); + ro.absorb(&self.W.is_infinity); + ro.absorb(&self.E.x); + ro.absorb(&self.E.y); + ro.absorb(&self.E.is_infinity); + ro.absorb(&self.u); + + self.X.iter().enumerate().try_for_each(|(idx, X)| { + X.as_limbs().iter().enumerate().try_for_each(|(i, limb)| -> Result<(), SynthesisError> { + ro.absorb( + &limb + .as_allocated_num(cs.namespace(|| format!("convert limb {i} of X_r[{idx}] to num")))?, + ); + Ok(()) + }) + })?; + + Ok(()) + } + + /// Folds self with a relaxed r1cs instance and returns the result + pub fn fold_with_r1cs::Base>>( + &self, + mut cs: CS, + params: &AllocatedNum, // hash of R1CSShape of F' + u: &AllocatedR1CSInstance, + T: &AllocatedPoint, + ro_consts: ROConstantsCircuit, + limb_width: usize, + n_limbs: usize, + ) -> Result { + // Compute r: + let mut ro = E::ROCircuit::new(ro_consts, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD + N); + ro.absorb(params); + + // running instance `U` does not need to absorbed since u.X[0] = Hash(params, U, + // i, z0, zi) + u.absorb_in_ro(&mut ro); + + ro.absorb(&T.x); + ro.absorb(&T.y); + ro.absorb(&T.is_infinity); + let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; + let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; + + // W_fold = self.W + r * u.W + let rW = u.W.scalar_mul(cs.namespace(|| "r * u.W"), &r_bits)?; + let W_fold = self.W.add(cs.namespace(|| "self.W + r * u.W"), &rW)?; + + // E_fold = self.E + r * T + let rT = T.scalar_mul(cs.namespace(|| "r * T"), &r_bits)?; + let E_fold = self.E.add(cs.namespace(|| "self.E + r * T"), &rT)?; + + // u_fold = u_r + r + let u_fold = AllocatedNum::alloc(cs.namespace(|| "u_fold"), || { + Ok(*self.u.get_value().get()? + r.get_value().get()?) + })?; + cs.enforce( + || "Check u_fold", + |lc| lc, + |lc| lc, + |lc| lc + u_fold.get_variable() - self.u.get_variable() - r.get_variable(), + ); + + // Fold the IO: + // Analyze r into limbs + let r_bn = + BigNat::from_num(cs.namespace(|| "allocate r_bn"), &Num::from(r), limb_width, n_limbs)?; + + // Allocate the order of the non-native field as a constant + let m_bn = alloc_bignat_constant( + cs.namespace(|| "alloc m"), + &E::GE::group_params().2, + limb_width, + n_limbs, + )?; + + let mut X_fold = vec![]; + + for (idx, (X, x)) in self.X.iter().zip_eq(u.X.iter()).enumerate() { + let x_bn = BigNat::from_num( + cs.namespace(|| format!("allocate u.X[{idx}]_bn")), + &Num::from(x.clone()), + limb_width, + n_limbs, + )?; + + let (_, r) = x_bn.mult_mod(cs.namespace(|| format!("r*u.X[{idx}]")), &r_bn, &m_bn)?; + let r_new = X.add(&r)?; + let X_i_fold = r_new.red_mod(cs.namespace(|| format!("reduce folded X[{idx}]")), &m_bn)?; + X_fold.push(X_i_fold); + } + + let X_fold = X_fold.try_into().map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + Ok(Self { W: W_fold, E: E_fold, u: u_fold, X: X_fold }) + } + + /// If the condition is true then returns this otherwise it returns the + /// other + pub fn conditionally_select::Base>>( + &self, + cs: CS, + other: &Self, + condition: &Boolean, + ) -> Result { + conditionally_select_alloc_relaxed_r1cs(cs, self, other, condition) + } +} + +/// c = cond ? a: b, where a, b: `AllocatedRelaxedR1CSInstance` +pub fn conditionally_select_alloc_relaxed_r1cs< + E: Engine, + CS: ConstraintSystem<::Base>, + const N: usize, +>( + mut cs: CS, + a: &AllocatedRelaxedR1CSInstance, + b: &AllocatedRelaxedR1CSInstance, + condition: &Boolean, +) -> Result, SynthesisError> { + let c_X = a + .X + .iter() + .zip_eq(b.X.iter()) + .enumerate() + .map(|(idx, (a, b))| { + conditionally_select_bignat( + cs.namespace(|| format!("X[{idx}] = cond ? a.X[{idx}] : b.X[{idx}]")), + a, + b, + condition, + ) + }) + .collect::, _>>()?; + + let c_X = c_X.try_into().map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + let c = AllocatedRelaxedR1CSInstance { + W: conditionally_select_point(cs.namespace(|| "W = cond ? a.W : b.W"), &a.W, &b.W, condition)?, + E: conditionally_select_point(cs.namespace(|| "E = cond ? a.E : b.E"), &a.E, &b.E, condition)?, + u: conditionally_select(cs.namespace(|| "u = cond ? a.u : b.u"), &a.u, &b.u, condition)?, + X: c_X, + }; + Ok(c) +} + +/// c = cond ? a: b, where a, b: `Vec` +pub fn conditionally_select_vec_allocated_relaxed_r1cs_instance< + E: Engine, + CS: ConstraintSystem<::Base>, + const N: usize, +>( + mut cs: CS, + a: &[AllocatedRelaxedR1CSInstance], + b: &[AllocatedRelaxedR1CSInstance], + condition: &Boolean, +) -> Result>, SynthesisError> { + a.iter() + .enumerate() + .zip_eq(b.iter()) + .map(|((i, a), b)| { + a.conditionally_select(cs.namespace(|| format!("cond ? a[{}]: b[{}]", i, i)), b, condition) + }) + .collect::>, _>>() +} + +/// c = cond ? a: b, where a, b: `AllocatedPoint` +pub fn conditionally_select_point>( + mut cs: CS, + a: &AllocatedPoint, + b: &AllocatedPoint, + condition: &Boolean, +) -> Result, SynthesisError> { + let c = AllocatedPoint { + x: conditionally_select( + cs.namespace(|| "x = cond ? a.x : b.x"), + &a.x, + &b.x, + condition, + )?, + y: conditionally_select( + cs.namespace(|| "y = cond ? a.y : b.y"), + &a.y, + &b.y, + condition, + )?, + is_infinity: conditionally_select( + cs.namespace(|| "is_infinity = cond ? a.is_infinity : b.is_infinity"), + &a.is_infinity, + &b.is_infinity, + condition, + )?, + }; + Ok(c) +} diff --git a/prover/src/gadgets/utils.rs b/prover/src/gadgets/utils.rs new file mode 100644 index 0000000..a5daa02 --- /dev/null +++ b/prover/src/gadgets/utils.rs @@ -0,0 +1,385 @@ +//! This module implements various low-level gadgets +use bellpepper::gadgets::Assignment; +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, LinearCombination, SynthesisError, +}; +use ff::{Field, PrimeField, PrimeFieldBits}; +use num_bigint::BigInt; + +use super::nonnative::bignat::{nat_to_limbs, BigNat}; +use crate::traits::Engine; + +/// Gets as input the little indian representation of a number and spits out the +/// number +pub fn le_bits_to_num( + mut cs: CS, + bits: &[AllocatedBit], +) -> Result, SynthesisError> +where + Scalar: PrimeField + PrimeFieldBits, + CS: ConstraintSystem, +{ + // We loop over the input bits and construct the constraint + // and the field element that corresponds to the result + let mut lc = LinearCombination::zero(); + let mut coeff = Scalar::ONE; + let mut fe = Some(Scalar::ZERO); + for bit in bits.iter() { + lc = lc + (coeff, bit.get_variable()); + fe = bit.get_value().map(|val| if val { fe.unwrap() + coeff } else { fe.unwrap() }); + coeff = coeff.double(); + } + let num = AllocatedNum::alloc(cs.namespace(|| "Field element"), || { + fe.ok_or(SynthesisError::AssignmentMissing) + })?; + lc = lc - num.get_variable(); + cs.enforce(|| "compute number from bits", |lc| lc, |lc| lc, |_| lc); + Ok(num) +} + +/// Allocate a variable that is set to zero +pub fn alloc_zero>(mut cs: CS) -> AllocatedNum { + let zero = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc"), || F::ZERO); + cs.enforce(|| "check zero is valid", |lc| lc, |lc| lc, |lc| lc + zero.get_variable()); + zero +} + +/// Allocate a variable that is set to one +pub fn alloc_one>(mut cs: CS) -> AllocatedNum { + let one = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc"), || F::ONE); + cs.enforce( + || "check one is valid", + |lc| lc + CS::one(), + |lc| lc + CS::one(), + |lc| lc + one.get_variable(), + ); + + one +} + +/// Allocate a scalar as a base. Only to be used is the scalar fits in base! +pub fn alloc_scalar_as_base( + mut cs: CS, + input: Option, +) -> Result, SynthesisError> +where + E: Engine, + CS: ConstraintSystem<::Base>, +{ + AllocatedNum::alloc(cs.namespace(|| "allocate scalar as base"), || { + let val = scalar_as_base::(input.unwrap_or(E::Scalar::ZERO)); + Ok(val) + }) +} + +/// interpret scalar as base +pub fn scalar_as_base(input: E::Scalar) -> E::Base { + let input_bits = input.to_le_bits(); + let mut mult = E::Base::ONE; + let mut val = E::Base::ZERO; + for bit in input_bits { + if bit { + val += mult; + } + mult = mult + mult; + } + val +} + +/// Allocate bignat a constant +pub fn alloc_bignat_constant>( + mut cs: CS, + val: &BigInt, + limb_width: usize, + n_limbs: usize, +) -> Result, SynthesisError> { + let limbs = nat_to_limbs(val, limb_width, n_limbs).unwrap(); + let bignat = BigNat::alloc_from_limbs( + cs.namespace(|| "alloc bignat"), + || Ok(limbs.clone()), + None, + limb_width, + n_limbs, + )?; + // Now enforce that the limbs are all equal to the constants + (0..n_limbs).for_each(|i| { + cs.enforce( + || format!("check limb {i}"), + |lc| lc + &bignat.limbs[i], + |lc| lc + CS::one(), + |lc| lc + (limbs[i], CS::one()), + ); + }); + Ok(bignat) +} + +/// Check that two numbers are equal and return a bit +pub fn alloc_num_equals>( + mut cs: CS, + a: &AllocatedNum, + b: &AllocatedNum, +) -> Result { + // Allocate and constrain `r`: result boolean bit. + // It equals `true` if `a` equals `b`, `false` otherwise + let r_value = match (a.get_value(), b.get_value()) { + (Some(a), Some(b)) => Some(a == b), + _ => None, + }; + + let r = AllocatedBit::alloc(cs.namespace(|| "r"), r_value)?; + + // Allocate t s.t. t=1 if a == b else 1/(a - b) + + let t = AllocatedNum::alloc(cs.namespace(|| "t"), || { + let a_val = *a.get_value().get()?; + let b_val = *b.get_value().get()?; + Ok(if a_val == b_val { F::ONE } else { (a_val - b_val).invert().unwrap() }) + })?; + + cs.enforce( + || "t*(a - b) = 1 - r", + |lc| lc + t.get_variable(), + |lc| lc + a.get_variable() - b.get_variable(), + |lc| lc + CS::one() - r.get_variable(), + ); + + cs.enforce( + || "r*(a - b) = 0", + |lc| lc + r.get_variable(), + |lc| lc + a.get_variable() - b.get_variable(), + |lc| lc, + ); + + Ok(r) +} + +// TODO: Figure out if this can be done better +pub fn conditionally_select_allocated_bit>( + mut cs: CS, + a: &AllocatedBit, + b: &AllocatedBit, + condition: &Boolean, +) -> Result { + let c = AllocatedBit::alloc( + cs.namespace(|| "conditionally select result"), + if condition.get_value().unwrap_or(false) { a.get_value() } else { b.get_value() }, + )?; + + // a * condition + b*(1-condition) = c -> + // a * condition - b*condition = c - b + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable() - b.get_variable(), + |_| condition.lc(CS::one(), F::ONE), + |lc| lc + c.get_variable() - b.get_variable(), + ); + + Ok(c) +} +/// If condition return a otherwise b where a and b are `BigNats` +pub fn conditionally_select_bignat>( + mut cs: CS, + a: &BigNat, + b: &BigNat, + condition: &Boolean, +) -> Result, SynthesisError> { + assert!(a.limbs.len() == b.limbs.len()); + let c = BigNat::alloc_from_nat( + cs.namespace(|| "conditional select result"), + || { + if *condition.get_value().get()? { + Ok(a.value.get()?.clone()) + } else { + Ok(b.value.get()?.clone()) + } + }, + a.params.limb_width, + a.params.n_limbs, + )?; + + // a * condition + b*(1-condition) = c -> + // a * condition - b*condition = c - b + for i in 0..c.limbs.len() { + cs.enforce( + || format!("conditional select constraint {i}"), + |lc| lc + &a.limbs[i] - &b.limbs[i], + |_| condition.lc(CS::one(), F::ONE), + |lc| lc + &c.limbs[i] - &b.limbs[i], + ); + } + Ok(c) +} + +/// Same as the above but Condition is an `AllocatedNum` that needs to be +/// 0 or 1. 1 => True, 0 => False +pub fn conditionally_select2>( + mut cs: CS, + a: &AllocatedNum, + b: &AllocatedNum, + condition: &AllocatedNum, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? == F::ONE { + Ok(*a.get_value().get()?) + } else { + Ok(*b.get_value().get()?) + } + })?; + + // a * condition + b*(1-condition) = c -> + // a * condition - b*condition = c - b + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable() - b.get_variable(), + |lc| lc + condition.get_variable(), + |lc| lc + c.get_variable() - b.get_variable(), + ); + + Ok(c) +} + +/// If condition set to 0 otherwise a. Condition is an allocated num +pub fn select_zero_or_num2>( + mut cs: CS, + a: &AllocatedNum, + condition: &AllocatedNum, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? == F::ONE { + Ok(F::ZERO) + } else { + Ok(*a.get_value().get()?) + } + })?; + + // a * (1 - condition) = c + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable(), + |lc| lc + CS::one() - condition.get_variable(), + |lc| lc + c.get_variable(), + ); + + Ok(c) +} + +/// If condition set to a otherwise 0. Condition is an allocated num +pub fn select_num_or_zero2>( + mut cs: CS, + a: &AllocatedNum, + condition: &AllocatedNum, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? == F::ONE { + Ok(*a.get_value().get()?) + } else { + Ok(F::ZERO) + } + })?; + + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable(), + |lc| lc + condition.get_variable(), + |lc| lc + c.get_variable(), + ); + + Ok(c) +} + +/// If condition set to a otherwise 0 +pub fn select_num_or_zero>( + mut cs: CS, + a: &AllocatedNum, + condition: &Boolean, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? { + Ok(*a.get_value().get()?) + } else { + Ok(F::ZERO) + } + })?; + + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable(), + |_| condition.lc(CS::one(), F::ONE), + |lc| lc + c.get_variable(), + ); + + Ok(c) +} + +/// If condition set to 1 otherwise a +pub fn select_one_or_num2>( + mut cs: CS, + a: &AllocatedNum, + condition: &AllocatedNum, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? == F::ONE { + Ok(F::ONE) + } else { + Ok(*a.get_value().get()?) + } + })?; + + cs.enforce( + || "conditional select constraint", + |lc| lc + CS::one() - a.get_variable(), + |lc| lc + condition.get_variable(), + |lc| lc + c.get_variable() - a.get_variable(), + ); + Ok(c) +} + +/// If condition set to 1 otherwise a - b +pub fn select_one_or_diff2>( + mut cs: CS, + a: &AllocatedNum, + b: &AllocatedNum, + condition: &AllocatedNum, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? == F::ONE { + Ok(F::ONE) + } else { + Ok(*a.get_value().get()? - *b.get_value().get()?) + } + })?; + + cs.enforce( + || "conditional select constraint", + |lc| lc + CS::one() - a.get_variable() + b.get_variable(), + |lc| lc + condition.get_variable(), + |lc| lc + c.get_variable() - a.get_variable() + b.get_variable(), + ); + Ok(c) +} + +/// If condition set to a otherwise 1 for boolean conditions +pub fn select_num_or_one>( + mut cs: CS, + a: &AllocatedNum, + condition: &Boolean, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? { + Ok(*a.get_value().get()?) + } else { + Ok(F::ONE) + } + })?; + + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable() - CS::one(), + |_| condition.lc(CS::one(), F::ONE), + |lc| lc + c.get_variable() - CS::one(), + ); + + Ok(c) +} diff --git a/prover/src/lib.rs b/prover/src/lib.rs new file mode 100644 index 0000000..d0ed78b --- /dev/null +++ b/prover/src/lib.rs @@ -0,0 +1,1625 @@ +#![allow(non_snake_case)] + +// private modules +pub mod bellpepper; +pub mod circuit; +pub mod digest; +pub mod nifs; + +// public modules +pub mod constants; +pub mod errors; +pub mod fast_serde; +pub mod gadgets; +pub mod provider; +pub mod r1cs; +pub mod spartan; +pub mod traits; + +pub mod cyclefold; +pub mod supernova; + +use std::sync::Arc; + +use bellpepper_core::{ConstraintSystem, SynthesisError}; +use circuit::{NovaAugmentedCircuit, NovaAugmentedCircuitInputs, NovaAugmentedCircuitParams}; +use constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_WITHOUT_IO_FOR_CRHF, NUM_HASH_BITS}; +use errors::NovaError; +use ff::Field; +use gadgets::scalar_as_base; +use nifs::NIFS; +use once_cell::sync::OnceCell; +use r1cs::{ + CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness, +}; +use serde::{Deserialize, Serialize}; +use supernova::StepCircuit; +use traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait}, + snark::RelaxedR1CSSNARKTrait, + AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROConstants, ROConstantsCircuit, ROTrait, +}; + +use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + digest::{DigestComputer, SimpleDigestible}, + r1cs::R1CSResult, +}; + +/// A type that holds parameters for the primary and secondary circuits of Nova +/// and SuperNova +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct R1CSWithArity { + F_arity: usize, + r1cs_shape: R1CSShape, +} + +impl SimpleDigestible for R1CSWithArity {} + +impl R1CSWithArity { + /// Create a new `R1CSWithArity` + pub fn new(r1cs_shape: R1CSShape, F_arity: usize) -> Self { Self { F_arity, r1cs_shape } } + + /// Return the [`R1CSWithArity`]' digest. + pub fn digest(&self) -> E::Scalar { + let dc: DigestComputer<'_, ::Scalar, Self> = DigestComputer::new(self); + dc.digest().expect("Failure in computing digest") + } +} + +/// A type that holds public parameters of Nova +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct PublicParams +where E: CurveCycleEquipped { + F_arity_primary: usize, + F_arity_secondary: usize, + ro_consts_primary: ROConstants, + ro_consts_circuit_primary: ROConstantsCircuit>, + pub ck_primary: Arc>, + circuit_shape_primary: R1CSWithArity, + ro_consts_secondary: ROConstants>, + ro_consts_circuit_secondary: ROConstantsCircuit, + pub ck_secondary: Arc>>, + circuit_shape_secondary: R1CSWithArity>, + augmented_circuit_params_primary: NovaAugmentedCircuitParams, + augmented_circuit_params_secondary: NovaAugmentedCircuitParams, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} + +impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} + +impl PublicParams +where E1: CurveCycleEquipped +{ + /// Set up builder to create `PublicParams` for a pair of circuits `C1` and + /// `C2`. + /// + /// # Note + /// + /// Public parameters set up a number of bases for the homomorphic + /// commitment scheme of Nova. + /// + /// Some final compressing SNARKs, like variants of Spartan, use computation + /// commitments that require larger sizes for these parameters. These + /// SNARKs provide a hint for these values by implementing + /// `RelaxedR1CSSNARKTrait::ck_floor()`, which can be passed to this + /// function. + /// + /// If you're not using such a SNARK, pass + /// `arecibo::traits::snark::default_ck_hint()` instead. + /// + /// # Arguments + /// + /// * `c_primary`: The primary circuit of type `C1`. + /// * `c_secondary`: The secondary circuit of type `C2`. + /// * `ck_hint1`: A `CommitmentKeyHint` for `G1`, which is a function that provides a hint for the + /// number of generators required in the commitment scheme for the primary circuit. + /// * `ck_hint2`: A `CommitmentKeyHint` for `G2`, similar to `ck_hint1`, but for the secondary + /// circuit. + /// + /// # Example + /// + /// ```rust + /// # use client_side_prover::spartan::ppsnark::RelaxedR1CSSNARK; + /// # use client_side_prover::provider::ipa_pc::EvaluationEngine; + /// # use client_side_prover::provider::{PallasEngine, VestaEngine}; + /// # use client_side_prover::traits::{circuit::TrivialCircuit, Engine, snark::RelaxedR1CSSNARKTrait}; + /// use client_side_prover::PublicParams; + /// + /// type E1 = PallasEngine; + /// type E2 = VestaEngine; + /// type EE = EvaluationEngine; + /// type SPrime = RelaxedR1CSSNARK>; + /// + /// let circuit1 = TrivialCircuit::<::Scalar>::default(); + /// let circuit2 = TrivialCircuit::<::Scalar>::default(); + /// // Only relevant for a SNARK using computation commitmnets, pass &(|_| 0) + /// // or &*nova_snark::traits::snark::default_ck_hint() otherwise. + /// let ck_hint1 = &*SPrime::::ck_floor(); + /// let ck_hint2 = &*SPrime::::ck_floor(); + /// + /// let pp = PublicParams::setup(&circuit1, &circuit2, ck_hint1, ck_hint2).unwrap(); + /// ``` + pub fn setup, C2: StepCircuit< as Engine>::Scalar>>( + c_primary: &C1, + c_secondary: &C2, + ck_hint1: &CommitmentKeyHint, + ck_hint2: &CommitmentKeyHint>, + ) -> Result { + let augmented_circuit_params_primary = + NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + let augmented_circuit_params_secondary = + NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); + + let ro_consts_primary: ROConstants = ROConstants::::default(); + let ro_consts_secondary: ROConstants> = ROConstants::>::default(); + + let F_arity_primary = c_primary.arity(); + let F_arity_secondary = c_secondary.arity(); + + // ro_consts_circuit_primary are parameterized by E2 because the type alias uses + // E2::Base = E1::Scalar + let ro_consts_circuit_primary: ROConstantsCircuit> = + ROConstantsCircuit::>::default(); + let ro_consts_circuit_secondary: ROConstantsCircuit = ROConstantsCircuit::::default(); + + // Initialize ck for the primary + let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( + &augmented_circuit_params_primary, + None, + c_primary, + ro_consts_circuit_primary.clone(), + ); + let mut cs: ShapeCS = ShapeCS::new(); + let _ = circuit_primary.synthesize(&mut cs); + let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape_and_key(ck_hint1); + let ck_primary = Arc::new(ck_primary); + + // Initialize ck for the secondary + let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( + &augmented_circuit_params_secondary, + None, + c_secondary, + ro_consts_circuit_secondary.clone(), + ); + let mut cs: ShapeCS> = ShapeCS::new(); + let _ = circuit_secondary.synthesize(&mut cs); + let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape_and_key(ck_hint2); + let ck_secondary = Arc::new(ck_secondary); + + if r1cs_shape_primary.num_io != 2 || r1cs_shape_secondary.num_io != 2 { + return Err(NovaError::InvalidStepCircuitIO); + } + + let circuit_shape_primary = R1CSWithArity::new(r1cs_shape_primary, F_arity_primary); + let circuit_shape_secondary = R1CSWithArity::new(r1cs_shape_secondary, F_arity_secondary); + + Ok(Self { + F_arity_primary, + F_arity_secondary, + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + circuit_shape_primary, + ro_consts_secondary, + ro_consts_circuit_secondary, + ck_secondary, + circuit_shape_secondary, + augmented_circuit_params_primary, + augmented_circuit_params_secondary, + digest: OnceCell::new(), + }) + } + + /// Retrieve the digest of the public parameters. + pub fn digest(&self) -> E1::Scalar { + self + .digest + .get_or_try_init(|| DigestComputer::new(self).digest()) + .cloned() + .expect("Failure in retrieving digest") + } + + /// Returns the number of constraints in the primary and secondary circuits + pub const fn num_constraints(&self) -> (usize, usize) { + ( + self.circuit_shape_primary.r1cs_shape.num_cons, + self.circuit_shape_secondary.r1cs_shape.num_cons, + ) + } + + /// Returns the number of variables in the primary and secondary circuits + pub const fn num_variables(&self) -> (usize, usize) { + ( + self.circuit_shape_primary.r1cs_shape.num_vars, + self.circuit_shape_secondary.r1cs_shape.num_vars, + ) + } +} + +/// A resource buffer for [`RecursiveSNARK`] for storing scratch values that are +/// computed by `prove_step`, which allows the reuse of memory allocations and +/// avoids unnecessary new allocations in the critical section. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct ResourceBuffer { + l_w: Option>, + l_u: Option>, + + ABC_Z_1: R1CSResult, + ABC_Z_2: R1CSResult, + + /// buffer for `commit_T` + T: Vec, +} + +/// A SNARK that proves the correct execution of an incremental computation +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RecursiveSNARK +where E1: CurveCycleEquipped { + z0_primary: Vec, + z0_secondary: Vec< as Engine>::Scalar>, + r_W_primary: RelaxedR1CSWitness, + r_U_primary: RelaxedR1CSInstance, + r_W_secondary: RelaxedR1CSWitness>, + r_U_secondary: RelaxedR1CSInstance>, + l_w_secondary: R1CSWitness>, + l_u_secondary: R1CSInstance>, + + /// Buffer for memory needed by the primary fold-step + buffer_primary: ResourceBuffer, + /// Buffer for memory needed by the secondary fold-step + buffer_secondary: ResourceBuffer>, + + i: usize, + zi_primary: Vec, + zi_secondary: Vec< as Engine>::Scalar>, +} + +impl RecursiveSNARK +where E1: CurveCycleEquipped +{ + /// Create new instance of recursive SNARK + pub fn new, C2: StepCircuit< as Engine>::Scalar>>( + pp: &PublicParams, + c_primary: &C1, + c_secondary: &C2, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result { + if z0_primary.len() != pp.F_arity_primary || z0_secondary.len() != pp.F_arity_secondary { + return Err(NovaError::InvalidInitialInputLength); + } + + let r1cs_primary = &pp.circuit_shape_primary.r1cs_shape; + let r1cs_secondary = &pp.circuit_shape_secondary.r1cs_shape; + + // base case for the primary + let mut cs_primary = SatisfyingAssignment::::new(); + let inputs_primary: NovaAugmentedCircuitInputs> = NovaAugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + E1::Scalar::ZERO, + z0_primary.to_vec(), + None, + None, + None, + None, + ); + + let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, + Some(inputs_primary), + c_primary, + pp.ro_consts_circuit_primary.clone(), + ); + let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; + let (u_primary, w_primary) = + cs_primary.r1cs_instance_and_witness(r1cs_primary, &pp.ck_primary)?; + + // base case for the secondary + let mut cs_secondary = SatisfyingAssignment::>::new(); + let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( + pp.digest(), + as Engine>::Scalar::ZERO, + z0_secondary.to_vec(), + None, + None, + Some(u_primary.clone()), + None, + ); + let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, + Some(inputs_secondary), + c_secondary, + pp.ro_consts_circuit_secondary.clone(), + ); + let zi_secondary = circuit_secondary.synthesize(&mut cs_secondary)?; + let (u_secondary, w_secondary) = cs_secondary + .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary)?; + + // IVC proof for the primary circuit + let l_w_primary = w_primary; + let l_u_primary = u_primary; + let r_W_primary = RelaxedR1CSWitness::from_r1cs_witness(r1cs_primary, l_w_primary); + let r_U_primary = RelaxedR1CSInstance::from_r1cs_instance( + &*pp.ck_primary, + &pp.circuit_shape_primary.r1cs_shape, + l_u_primary, + ); + + // IVC proof for the secondary circuit + let l_w_secondary = w_secondary; + let l_u_secondary = u_secondary; + let r_W_secondary = RelaxedR1CSWitness::>::default(r1cs_secondary); + let r_U_secondary = RelaxedR1CSInstance::>::default(&pp.ck_secondary, r1cs_secondary); + + assert!( + !(zi_primary.len() != pp.F_arity_primary || zi_secondary.len() != pp.F_arity_secondary), + "Invalid step length" + ); + + let zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect::::Scalar>, _>>()?; + + let zi_secondary = zi_secondary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect:: as Engine>::Scalar>, _>>()?; + + let buffer_primary = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(r1cs_primary.num_cons), + ABC_Z_2: R1CSResult::default(r1cs_primary.num_cons), + T: r1cs::default_T::(r1cs_primary.num_cons), + }; + + let buffer_secondary = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(r1cs_secondary.num_cons), + ABC_Z_2: R1CSResult::default(r1cs_secondary.num_cons), + T: r1cs::default_T::>(r1cs_secondary.num_cons), + }; + + Ok(Self { + z0_primary: z0_primary.to_vec(), + z0_secondary: z0_secondary.to_vec(), + r_W_primary, + r_U_primary, + r_W_secondary, + r_U_secondary, + l_w_secondary, + l_u_secondary, + + buffer_primary, + buffer_secondary, + i: 0, + zi_primary, + zi_secondary, + }) + } + + /// Inputs of the primary circuits + pub fn z0_primary(&self) -> &Vec { &self.z0_primary } + + /// Outputs of the primary circuits + pub fn zi_primary(&self) -> &Vec { &self.zi_primary } + + /// Create a new `RecursiveSNARK` (or updates the provided `RecursiveSNARK`) + /// by executing a step of the incremental computation + #[tracing::instrument(skip_all, name = "nova::RecursiveSNARK::prove_step")] + pub fn prove_step, C2: StepCircuit< as Engine>::Scalar>>( + &mut self, + pp: &PublicParams, + c_primary: &C1, + c_secondary: &C2, + ) -> Result<(), NovaError> { + // first step was already done in the constructor + if self.i == 0 { + self.i = 1; + return Ok(()); + } + + // save the inputs before proceeding to the `i+1`th step + let r_U_primary_i = self.r_U_primary.clone(); + let r_U_secondary_i = self.r_U_secondary.clone(); + let l_u_secondary_i = self.l_u_secondary.clone(); + + // fold the secondary circuit's instance + let (nifs_secondary, _) = NIFS::prove_mut( + &*pp.ck_secondary, + &pp.ro_consts_secondary, + &scalar_as_base::(pp.digest()), + &pp.circuit_shape_secondary.r1cs_shape, + &mut self.r_U_secondary, + &mut self.r_W_secondary, + &self.l_u_secondary, + &self.l_w_secondary, + &mut self.buffer_secondary.T, + &mut self.buffer_secondary.ABC_Z_1, + &mut self.buffer_secondary.ABC_Z_2, + )?; + + let mut cs_primary = SatisfyingAssignment::::with_capacity( + pp.circuit_shape_primary.r1cs_shape.num_io + 1, + pp.circuit_shape_primary.r1cs_shape.num_vars, + ); + let inputs_primary: NovaAugmentedCircuitInputs> = NovaAugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + E1::Scalar::from(self.i as u64), + self.z0_primary.to_vec(), + Some(self.zi_primary.clone()), + Some(r_U_secondary_i), + Some(l_u_secondary_i), + Some(Commitment::>::decompress(&nifs_secondary.comm_T)?), + ); + + let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, + Some(inputs_primary), + c_primary, + pp.ro_consts_circuit_primary.clone(), + ); + + let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; + + let (l_u_primary, l_w_primary) = + cs_primary.r1cs_instance_and_witness(&pp.circuit_shape_primary.r1cs_shape, &pp.ck_primary)?; + + // fold the primary circuit's instance + let (nifs_primary, _) = NIFS::prove_mut( + &*pp.ck_primary, + &pp.ro_consts_primary, + &pp.digest(), + &pp.circuit_shape_primary.r1cs_shape, + &mut self.r_U_primary, + &mut self.r_W_primary, + &l_u_primary, + &l_w_primary, + &mut self.buffer_primary.T, + &mut self.buffer_primary.ABC_Z_1, + &mut self.buffer_primary.ABC_Z_2, + )?; + + let mut cs_secondary = SatisfyingAssignment::>::with_capacity( + pp.circuit_shape_secondary.r1cs_shape.num_io + 1, + pp.circuit_shape_secondary.r1cs_shape.num_vars, + ); + let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( + pp.digest(), + as Engine>::Scalar::from(self.i as u64), + self.z0_secondary.to_vec(), + Some(self.zi_secondary.clone()), + Some(r_U_primary_i), + Some(l_u_primary), + Some(Commitment::::decompress(&nifs_primary.comm_T)?), + ); + + let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, + Some(inputs_secondary), + c_secondary, + pp.ro_consts_circuit_secondary.clone(), + ); + let zi_secondary = circuit_secondary.synthesize(&mut cs_secondary)?; + + let (l_u_secondary, l_w_secondary) = cs_secondary + .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary) + .map_err(|_e| NovaError::UnSat)?; + + // update the running instances and witnesses + self.zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect::::Scalar>, _>>()?; + self.zi_secondary = zi_secondary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect:: as Engine>::Scalar>, _>>()?; + + self.l_u_secondary = l_u_secondary; + self.l_w_secondary = l_w_secondary; + + self.i += 1; + + Ok(()) + } + + /// Verify the correctness of the `RecursiveSNARK` + pub fn verify( + &self, + pp: &PublicParams, + num_steps: usize, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result<(Vec, Vec< as Engine>::Scalar>), NovaError> { + // number of steps cannot be zero + let is_num_steps_zero = num_steps == 0; + + // check if the provided proof has executed num_steps + let is_num_steps_not_match = self.i != num_steps; + + // check if the initial inputs match + let is_inputs_not_match = self.z0_primary != z0_primary || self.z0_secondary != z0_secondary; + + // check if the (relaxed) R1CS instances have two public outputs + let is_instance_has_two_outputs = self.l_u_secondary.X.len() != 2 + || self.r_U_primary.X.len() != 2 + || self.r_U_secondary.X.len() != 2; + + if is_num_steps_zero + || is_num_steps_not_match + || is_inputs_not_match + || is_instance_has_two_outputs + { + return Err(NovaError::ProofVerifyError); + } + + // check if the output hashes in R1CS instances point to the right running + // instances + let (hash_primary, hash_secondary) = { + let mut hasher = as Engine>::RO::new( + pp.ro_consts_secondary.clone(), + NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * pp.F_arity_primary, + ); + hasher.absorb(pp.digest()); + hasher.absorb(E1::Scalar::from(num_steps as u64)); + for e in z0_primary { + hasher.absorb(*e); + } + for e in &self.zi_primary { + hasher.absorb(*e); + } + self.r_U_secondary.absorb_in_ro(&mut hasher); + + let mut hasher2 = ::RO::new( + pp.ro_consts_primary.clone(), + NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * pp.F_arity_secondary, + ); + hasher2.absorb(scalar_as_base::(pp.digest())); + hasher2.absorb( as Engine>::Scalar::from(num_steps as u64)); + for e in z0_secondary { + hasher2.absorb(*e); + } + for e in &self.zi_secondary { + hasher2.absorb(*e); + } + self.r_U_primary.absorb_in_ro(&mut hasher2); + + (hasher.squeeze(NUM_HASH_BITS), hasher2.squeeze(NUM_HASH_BITS)) + }; + + if hash_primary != self.l_u_secondary.X[0] + || hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) + { + return Err(NovaError::ProofVerifyError); + } + + // check the satisfiability of the provided instances + let (res_r_primary, (res_r_secondary, res_l_secondary)) = rayon::join( + || { + pp.circuit_shape_primary.r1cs_shape.is_sat_relaxed( + &pp.ck_primary, + &self.r_U_primary, + &self.r_W_primary, + ) + }, + || { + rayon::join( + || { + pp.circuit_shape_secondary.r1cs_shape.is_sat_relaxed( + &pp.ck_secondary, + &self.r_U_secondary, + &self.r_W_secondary, + ) + }, + || { + pp.circuit_shape_secondary.r1cs_shape.is_sat( + &pp.ck_secondary, + &self.l_u_secondary, + &self.l_w_secondary, + ) + }, + ) + }, + ); + + // check the returned res objects + res_r_primary?; + res_r_secondary?; + res_l_secondary?; + + Ok((self.zi_primary.clone(), self.zi_secondary.clone())) + } + + /// Get the outputs after the last step of computation. + pub fn outputs(&self) -> (&[E1::Scalar], &[ as Engine>::Scalar]) { + (&self.zi_primary, &self.zi_secondary) + } + + /// The number of steps which have been executed thus far. + pub fn num_steps(&self) -> usize { self.i } +} + +/// A type that holds the prover key for `CompressedSNARK` +#[derive(Clone, Debug)] +pub struct ProverKey +where + E1: CurveCycleEquipped, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + pub pk_primary: S1::ProverKey, + pub pk_secondary: S2::ProverKey, +} + +/// A type that holds the verifier key for `CompressedSNARK` +#[derive(Debug, Clone, Serialize)] +#[serde(bound = "")] +pub struct VerifierKey +where + E1: CurveCycleEquipped, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + F_arity_primary: usize, + F_arity_secondary: usize, + ro_consts_primary: ROConstants, + ro_consts_secondary: ROConstants>, + pp_digest: E1::Scalar, + vk_primary: S1::VerifierKey, + vk_secondary: S2::VerifierKey, +} + +/// A SNARK that proves the knowledge of a valid `RecursiveSNARK` +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CompressedSNARK +where + E1: CurveCycleEquipped, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + r_U_primary: RelaxedR1CSInstance, + r_W_snark_primary: S1, + + r_U_secondary: RelaxedR1CSInstance>, + l_u_secondary: R1CSInstance>, + nifs_secondary: NIFS>, + f_W_snark_secondary: S2, + + zn_primary: Vec, + zn_secondary: Vec< as Engine>::Scalar>, +} + +impl CompressedSNARK +where + E1: CurveCycleEquipped, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, +{ + /// Creates prover and verifier keys for `CompressedSNARK` + pub fn setup( + pp: &PublicParams, + ) -> Result<(ProverKey, VerifierKey), NovaError> { + let (pk_primary, vk_primary) = + S1::setup(pp.ck_primary.clone(), &pp.circuit_shape_primary.r1cs_shape)?; + let (pk_secondary, vk_secondary) = + S2::setup(pp.ck_secondary.clone(), &pp.circuit_shape_secondary.r1cs_shape)?; + + let pk = ProverKey { pk_primary, pk_secondary }; + + let vk = VerifierKey { + F_arity_primary: pp.F_arity_primary, + F_arity_secondary: pp.F_arity_secondary, + ro_consts_primary: pp.ro_consts_primary.clone(), + ro_consts_secondary: pp.ro_consts_secondary.clone(), + pp_digest: pp.digest(), + vk_primary, + vk_secondary, + }; + + Ok((pk, vk)) + } + + /// Create a new `CompressedSNARK` + pub fn prove( + pp: &PublicParams, + pk: &ProverKey, + recursive_snark: &RecursiveSNARK, + ) -> Result { + // fold the secondary circuit's instance with its running instance + let (nifs_secondary, (f_U_secondary, f_W_secondary), _) = NIFS::prove( + &*pp.ck_secondary, + &pp.ro_consts_secondary, + &scalar_as_base::(pp.digest()), + &pp.circuit_shape_secondary.r1cs_shape, + &recursive_snark.r_U_secondary, + &recursive_snark.r_W_secondary, + &recursive_snark.l_u_secondary, + &recursive_snark.l_w_secondary, + )?; + + // create SNARKs proving the knowledge of f_W_primary and f_W_secondary + let (r_W_snark_primary, f_W_snark_secondary) = rayon::join( + || { + S1::prove( + &pp.ck_primary, + &pk.pk_primary, + &pp.circuit_shape_primary.r1cs_shape, + &recursive_snark.r_U_primary, + &recursive_snark.r_W_primary, + ) + }, + || { + S2::prove( + &pp.ck_secondary, + &pk.pk_secondary, + &pp.circuit_shape_secondary.r1cs_shape, + &f_U_secondary, + &f_W_secondary, + ) + }, + ); + + Ok(Self { + r_U_primary: recursive_snark.r_U_primary.clone(), + r_W_snark_primary: r_W_snark_primary?, + + r_U_secondary: recursive_snark.r_U_secondary.clone(), + l_u_secondary: recursive_snark.l_u_secondary.clone(), + nifs_secondary, + f_W_snark_secondary: f_W_snark_secondary?, + + zn_primary: recursive_snark.zi_primary.clone(), + zn_secondary: recursive_snark.zi_secondary.clone(), + }) + } + + /// Verify the correctness of the `CompressedSNARK` + pub fn verify( + &self, + vk: &VerifierKey, + num_steps: usize, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result<(Vec, Vec< as Engine>::Scalar>), NovaError> { + // the number of steps cannot be zero + if num_steps == 0 { + return Err(NovaError::ProofVerifyError); + } + + // check if the (relaxed) R1CS instances have two public outputs + if self.l_u_secondary.X.len() != 2 + || self.r_U_primary.X.len() != 2 + || self.r_U_secondary.X.len() != 2 + { + return Err(NovaError::ProofVerifyError); + } + + // check if the output hashes in R1CS instances point to the right running + // instances + let (hash_primary, hash_secondary) = { + let mut hasher = as Engine>::RO::new( + vk.ro_consts_secondary.clone(), + NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * vk.F_arity_primary, + ); + hasher.absorb(vk.pp_digest); + hasher.absorb(E1::Scalar::from(num_steps as u64)); + for e in z0_primary { + hasher.absorb(*e); + } + for e in &self.zn_primary { + hasher.absorb(*e); + } + self.r_U_secondary.absorb_in_ro(&mut hasher); + + let mut hasher2 = ::RO::new( + vk.ro_consts_primary.clone(), + NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * vk.F_arity_secondary, + ); + hasher2.absorb(scalar_as_base::(vk.pp_digest)); + hasher2.absorb( as Engine>::Scalar::from(num_steps as u64)); + for e in z0_secondary { + hasher2.absorb(*e); + } + for e in &self.zn_secondary { + hasher2.absorb(*e); + } + self.r_U_primary.absorb_in_ro(&mut hasher2); + + (hasher.squeeze(NUM_HASH_BITS), hasher2.squeeze(NUM_HASH_BITS)) + }; + + if hash_primary != self.l_u_secondary.X[0] + || hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) + { + return Err(NovaError::ProofVerifyError); + } + + // fold the secondary's running instance with the last instance to get a folded + // instance + let f_U_secondary = self.nifs_secondary.verify( + &vk.ro_consts_secondary, + &scalar_as_base::(vk.pp_digest), + &self.r_U_secondary, + &self.l_u_secondary, + )?; + + // check the satisfiability of the folded instances using + // SNARKs proving the knowledge of their satisfying witnesses + let (res_primary, res_secondary) = rayon::join( + || self.r_W_snark_primary.verify(&vk.vk_primary, &self.r_U_primary), + || self.f_W_snark_secondary.verify(&vk.vk_secondary, &f_U_secondary), + ); + + res_primary?; + res_secondary?; + + Ok((self.zn_primary.clone(), self.zn_secondary.clone())) + } +} + +/// Compute the circuit digest of a [`StepCircuit`]. +/// +/// Note for callers: This function should be called with its performance +/// characteristics in mind. It will synthesize and digest the full `circuit` +/// given. +pub fn circuit_digest>( + circuit: &C, +) -> E1::Scalar { + let augmented_circuit_params = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + + // ro_consts_circuit are parameterized by G2 because the type alias uses + // G2::Base = G1::Scalar + let ro_consts_circuit: ROConstantsCircuit> = ROConstantsCircuit::>::default(); + + // Initialize ck for the primary + let augmented_circuit: NovaAugmentedCircuit<'_, Dual, C> = + NovaAugmentedCircuit::new(&augmented_circuit_params, None, circuit, ro_consts_circuit); + let mut cs: ShapeCS = ShapeCS::new(); + let _ = augmented_circuit.synthesize(&mut cs); + cs.r1cs_shape().digest() +} + +pub type CommitmentKey = <::CE as CommitmentEngineTrait>::CommitmentKey; +type Commitment = <::CE as CommitmentEngineTrait>::Commitment; +type CompressedCommitment = <<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait>::CompressedCommitment; +type CE = ::CE; + +// #[cfg(test)] +// mod tests { +// use core::{fmt::Write, marker::PhantomData}; + +// use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, +// SynthesisError}; use expect_test::{expect, Expect}; +// use ff::PrimeField; +// use halo2curves::bn256::Bn256; +// use traits::circuit::TrivialCircuit; + +// use self::traits::CurveCycleEquipped; +// use super::*; +// use crate::{ +// provider::{ +// non_hiding_zeromorph::ZMPCS, Bn256EngineIPA, Bn256EngineKZG, +// Bn256EngineZM, PallasEngine, Secp256k1Engine, +// }, +// traits::{evaluation::EvaluationEngineTrait, snark::default_ck_hint}, +// }; + +// type EE = provider::ipa_pc::EvaluationEngine; +// type S = spartan::snark::RelaxedR1CSSNARK; +// type SPrime = spartan::ppsnark::RelaxedR1CSSNARK; + +// #[derive(Clone, Debug, Default)] +// struct CubicCircuit { +// _p: PhantomData, +// } + +// impl StepCircuit for CubicCircuit { +// fn arity(&self) -> usize { +// 1 +// } + +// fn synthesize>( +// &self, +// cs: &mut CS, +// z: &[AllocatedNum], +// ) -> Result>, SynthesisError> { +// // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and +// `y` are // respectively the input and output. +// let x = &z[0]; +// let x_sq = x.square(cs.namespace(|| "x_sq"))?; +// let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; +// let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { +// Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + +// F::from(5u64)) })?; + +// cs.enforce( +// || "y = x^3 + x + 5", +// |lc| { +// lc + x_cu.get_variable() +// + x.get_variable() +// + CS::one() +// + CS::one() +// + CS::one() +// + CS::one() +// + CS::one() +// }, +// |lc| lc + CS::one(), +// |lc| lc + y.get_variable(), +// ); + +// Ok(vec![y]) +// } +// } + +// impl CubicCircuit { +// fn output(&self, z: &[F]) -> Vec { +// vec![z[0] * z[0] * z[0] + z[0] + F::from(5u64)] +// } +// } + +// fn test_pp_digest_with(circuit1: &T1, circuit2: +// &T2, expected: &Expect) where +// E1: CurveCycleEquipped, +// T1: StepCircuit, +// T2: StepCircuit< as Engine>::Scalar>, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// // this tests public parameters with a size specifically intended for +// a // spark-compressed SNARK +// let ck_hint1 = &*SPrime::::ck_floor(); +// let ck_hint2 = &*SPrime::, EE2>::ck_floor(); +// let pp = PublicParams::::setup(circuit1, circuit2, ck_hint1, +// ck_hint2).unwrap(); + +// let digest_str = +// pp.digest() +// .to_repr() +// .as_ref() +// .iter() +// .fold(String::new(), |mut output, b| { +// let _ = write!(output, "{b:02x}"); +// output +// }); + +// expected.assert_eq(&digest_str); +// } + +// #[test] +// fn test_pp_digest() { +// test_pp_digest_with::, EE<_>>( +// &TrivialCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["e5a6a85b77f3fb958b69722a5a21bf656fd21a6b5a012708a4b086b6be6d2b03"], +// ); + +// test_pp_digest_with::, EE<_>>( +// &CubicCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["ec707a8b822baebca114b6e61b238374f9ed358c542dd37ee73febb47832cd01"], +// ); + +// test_pp_digest_with::, EE<_>>( +// &TrivialCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["df52de22456157eb056003d4dc580a167ab8ce40a151c9944ea09a6fd0028600"], +// ); + +// test_pp_digest_with::, EE<_>>( +// &CubicCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["b3ad0f4b734c5bd2ab9e83be8ee0cbaaa120e5cd0270b51cb9d7778a33f0b801"], +// ); + +// test_pp_digest_with::, EE<_>>( +// &TrivialCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["e1feca53664212ee750da857c726b2a09bb30b2964f22ea85a19b58c9eaf5701"], +// ); +// test_pp_digest_with::, EE<_>>( +// &CubicCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["4ad6b10b6fd24fecba49f08d35bc874a6da9c77735bc0bcf4b78b1914a97e602"], +// ); +// } + +// fn test_ivc_trivial_with() +// where +// E1: CurveCycleEquipped, +// { +// let test_circuit1 = TrivialCircuit::<::Scalar>::default(); let test_circuit2 = +// TrivialCircuit::< as Engine>::Scalar>::default(); + +// // produce public parameters +// let pp = PublicParams::::setup( +// &test_circuit1, +// &test_circuit2, +// &*default_ck_hint(), +// &*default_ck_hint(), +// ) +// .unwrap(); +// let num_steps = 1; + +// // produce a recursive SNARK +// let mut recursive_snark = RecursiveSNARK::new( +// &pp, +// &test_circuit1, +// &test_circuit2, +// &[::Scalar::ZERO], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// recursive_snark +// .prove_step(&pp, &test_circuit1, &test_circuit2) +// .unwrap(); + +// // verify the recursive SNARK +// recursive_snark +// .verify( +// &pp, +// num_steps, +// &[::Scalar::ZERO], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); +// } + +// #[test] +// fn test_ivc_trivial() { +// test_ivc_trivial_with::(); +// test_ivc_trivial_with::(); +// test_ivc_trivial_with::(); +// } + +// fn test_ivc_nontrivial_with() +// where +// E1: CurveCycleEquipped, +// { +// let circuit_primary = TrivialCircuit::default(); +// let circuit_secondary = CubicCircuit::default(); + +// // produce public parameters +// let pp = PublicParams::::setup( +// &circuit_primary, +// &circuit_secondary, +// &*default_ck_hint(), +// &*default_ck_hint(), +// ) +// .unwrap(); + +// let num_steps = 3; + +// // produce a recursive SNARK +// let mut recursive_snark = RecursiveSNARK::::new( +// &pp, +// &circuit_primary, +// &circuit_secondary, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// for i in 0..num_steps { +// recursive_snark +// .prove_step(&pp, &circuit_primary, &circuit_secondary) +// .unwrap(); + +// // verify the recursive snark at each step of recursion +// recursive_snark +// .verify( +// &pp, +// i + 1, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); +// } + +// // verify the recursive SNARK +// let (zn_primary, zn_secondary) = recursive_snark +// .verify( +// &pp, +// num_steps, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// // sanity: check the claimed output with a direct computation of the +// same assert_eq!(zn_primary, vec![::Scalar::ONE]); +// let mut zn_secondary_direct = vec![ as +// Engine>::Scalar::ZERO]; for _i in 0..num_steps { +// zn_secondary_direct = +// circuit_secondary.clone().output(&zn_secondary_direct); } +// assert_eq!(zn_secondary, zn_secondary_direct); +// assert_eq!( +// zn_secondary, +// vec![ as Engine>::Scalar::from(2460515u64)] +// ); +// } + +// #[test] +// fn test_ivc_nontrivial() { +// test_ivc_nontrivial_with::(); +// test_ivc_nontrivial_with::(); +// test_ivc_nontrivial_with::(); +// } + +// fn test_ivc_nontrivial_with_some_compression_with() +// where +// E1: CurveCycleEquipped, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// S1: RelaxedR1CSSNARKTrait, +// S2: RelaxedR1CSSNARKTrait>, +// { +// let circuit_primary = TrivialCircuit::default(); +// let circuit_secondary = CubicCircuit::default(); + +// // produce public parameters, which we'll maybe use with a +// preprocessing // compressed SNARK +// let pp = PublicParams::::setup( +// &circuit_primary, +// &circuit_secondary, +// &*S1::ck_floor(), +// &*S2::ck_floor(), +// ) +// .unwrap(); + +// let num_steps = 3; + +// // produce a recursive SNARK +// let mut recursive_snark = RecursiveSNARK::::new( +// &pp, +// &circuit_primary, +// &circuit_secondary, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// for _i in 0..num_steps { +// recursive_snark +// .prove_step(&pp, &circuit_primary, &circuit_secondary) +// .unwrap(); +// } + +// // verify the recursive SNARK +// let (zn_primary, zn_secondary) = recursive_snark +// .verify( +// &pp, +// num_steps, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// // sanity: check the claimed output with a direct computation of the +// same assert_eq!(zn_primary, vec![::Scalar::ONE]); +// let mut zn_secondary_direct = vec![ as +// Engine>::Scalar::ZERO]; for _i in 0..num_steps { +// zn_secondary_direct = +// circuit_secondary.clone().output(&zn_secondary_direct); } +// assert_eq!(zn_secondary, zn_secondary_direct); +// assert_eq!( +// zn_secondary, +// vec![ as Engine>::Scalar::from(2460515u64)] +// ); + +// // run the compressed snark +// // produce the prover and verifier keys for compressed snark +// let (pk, vk) = CompressedSNARK::<_, S1, S2>::setup(&pp).unwrap(); + +// // produce a compressed SNARK +// let compressed_snark = +// CompressedSNARK::<_, S1, S2>::prove(&pp, &pk, +// &recursive_snark).unwrap(); + +// // verify the compressed SNARK +// compressed_snark +// .verify( +// &vk, +// num_steps, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); +// } + +// fn test_ivc_nontrivial_with_compression_with() +// where +// E1: CurveCycleEquipped, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// test_ivc_nontrivial_with_some_compression_with::, S<_, +// EE2>>() } + +// #[test] +// fn test_ivc_nontrivial_with_compression() { +// test_ivc_nontrivial_with_compression_with::, +// EE<_>>(); test_ivc_nontrivial_with_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_compression_with::, +// EE<_>>(); test_ivc_nontrivial_with_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_compression_with::< +// Bn256EngineKZG, +// provider::hyperkzg::EvaluationEngine, +// EE<_>, +// >(); +// } + +// fn test_ivc_nontrivial_with_spark_compression_with() +// where +// E1: CurveCycleEquipped, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// test_ivc_nontrivial_with_some_compression_with::, +// SPrime<_, EE2>>() } + +// #[test] +// fn test_ivc_nontrivial_with_spark_compression() { +// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_spark_compression_with::< +// Bn256EngineKZG, +// provider::hyperkzg::EvaluationEngine, +// EE<_>, +// >(); +// } + +// type BatchedS = spartan::batched::BatchedRelaxedR1CSSNARK; +// type BatchedSPrime = spartan::batched::BatchedRelaxedR1CSSNARK; + +// fn test_ivc_nontrivial_with_batched_compression_with() +// where +// E1: CurveCycleEquipped, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// // this tests compatibility of the batched workflow with the +// non-batched one test_ivc_nontrivial_with_some_compression_with::, BatchedS<_, EE2>>() } + +// #[test] +// fn test_ivc_nontrivial_with_batched_compression() { +// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>( ); +// test_ivc_nontrivial_with_batched_compression_with::< +// Bn256EngineKZG, +// provider::hyperkzg::EvaluationEngine, +// EE<_>, +// >(); +// } + +// fn test_ivc_nontrivial_with_batched_spark_compression_with() where +// E1: CurveCycleEquipped, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// // this tests compatibility of the batched workflow with the +// non-batched one test_ivc_nontrivial_with_some_compression_with::< +// E1, +// BatchedSPrime<_, EE1>, +// BatchedSPrime<_, EE2>, +// >() +// } + +// #[test] +// fn test_ivc_nontrivial_with_batched_spark_compression() { +// test_ivc_nontrivial_with_batched_spark_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_batched_spark_compression_with::, +// EE<_>>(); test_ivc_nontrivial_with_batched_spark_compression_with::, EE<_>>(); test_ivc_nontrivial_with_batched_spark_compression_with::< +// Bn256EngineZM, +// ZMPCS, +// EE<_>, +// >(); +// test_ivc_nontrivial_with_batched_spark_compression_with::< +// Bn256EngineKZG, +// provider::hyperkzg::EvaluationEngine, +// EE<_>, +// >(); +// } + +// fn test_ivc_nondet_with_compression_with() +// where +// E1: CurveCycleEquipped, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// // y is a non-deterministic advice representing the fifth root of the +// input at a // step. +// #[derive(Clone, Debug)] +// struct FifthRootCheckingCircuit { +// y: F, +// } + +// impl FifthRootCheckingCircuit { +// fn new(num_steps: usize) -> (Vec, Vec) { +// let mut powers = Vec::new(); +// let rng = &mut rand::rngs::OsRng; +// let mut seed = F::random(rng); +// for _i in 0..num_steps + 1 { +// seed *= seed.clone().square().square(); + +// powers.push(Self { y: seed }); +// } + +// // reverse the powers to get roots +// let roots = powers.into_iter().rev().collect::>(); +// (vec![roots[0].y], roots[1..].to_vec()) +// } +// } + +// impl StepCircuit for FifthRootCheckingCircuit +// where +// F: PrimeField, +// { +// fn arity(&self) -> usize { +// 1 +// } + +// fn synthesize>( +// &self, +// cs: &mut CS, +// z: &[AllocatedNum], +// ) -> Result>, SynthesisError> { +// let x = &z[0]; + +// // we allocate a variable and set it to the provided +// non-deterministic advice. let y = +// AllocatedNum::alloc_infallible(cs.namespace(|| "y"), || self.y); + +// // We now check if y = x^{1/5} by checking if y^5 = x +// let y_sq = y.square(cs.namespace(|| "y_sq"))?; +// let y_quad = y_sq.square(cs.namespace(|| "y_quad"))?; +// let y_pow_5 = y_quad.mul(cs.namespace(|| "y_fifth"), &y)?; + +// cs.enforce( +// || "y^5 = x", +// |lc| lc + y_pow_5.get_variable(), +// |lc| lc + CS::one(), +// |lc| lc + x.get_variable(), +// ); + +// Ok(vec![y]) +// } +// } + +// let circuit_primary = FifthRootCheckingCircuit { +// y: ::Scalar::ZERO, +// }; + +// let circuit_secondary = TrivialCircuit::default(); + +// // produce public parameters +// let pp = PublicParams::::setup( +// &circuit_primary, +// &circuit_secondary, +// &*default_ck_hint(), +// &*default_ck_hint(), +// ) +// .unwrap(); + +// let num_steps = 3; + +// // produce non-deterministic advice +// let (z0_primary, roots) = FifthRootCheckingCircuit::new(num_steps); +// let z0_secondary = vec![ as Engine>::Scalar::ZERO]; + +// // produce a recursive SNARK +// let mut recursive_snark = RecursiveSNARK::::new( +// &pp, +// &roots[0], +// &circuit_secondary, +// &z0_primary, +// &z0_secondary, +// ) +// .unwrap(); + +// for circuit_primary in roots.iter().take(num_steps) { +// recursive_snark +// .prove_step(&pp, circuit_primary, &circuit_secondary) +// .unwrap(); +// } + +// // verify the recursive SNARK +// recursive_snark +// .verify(&pp, num_steps, &z0_primary, &z0_secondary) +// .unwrap(); + +// // produce the prover and verifier keys for compressed snark +// let (pk, vk) = CompressedSNARK::<_, S, S<_, +// EE2>>::setup(&pp).unwrap(); + +// // produce a compressed SNARK +// let compressed_snark = +// CompressedSNARK::<_, S, S<_, EE2>>::prove(&pp, &pk, +// &recursive_snark).unwrap(); + +// // verify the compressed SNARK +// compressed_snark +// .verify(&vk, num_steps, &z0_primary, &z0_secondary) +// .unwrap(); +// } + +// #[test] +// fn test_ivc_nondet_with_compression() { +// test_ivc_nondet_with_compression_with::, +// EE<_>>(); test_ivc_nondet_with_compression_with::, EE<_>>(); +// test_ivc_nondet_with_compression_with::, +// EE<_>>(); test_ivc_nondet_with_compression_with::, EE<_>>(); } + +// fn test_ivc_base_with() +// where +// E1: CurveCycleEquipped, +// { +// let test_circuit1 = TrivialCircuit::<::Scalar>::default(); let test_circuit2 = +// CubicCircuit::< as Engine>::Scalar>::default(); + +// // produce public parameters +// let pp = PublicParams::::setup( +// &test_circuit1, +// &test_circuit2, +// &*default_ck_hint(), +// &*default_ck_hint(), +// ) +// .unwrap(); + +// let num_steps = 1; + +// // produce a recursive SNARK +// let mut recursive_snark = RecursiveSNARK::::new( +// &pp, +// &test_circuit1, +// &test_circuit2, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// // produce a recursive SNARK +// recursive_snark +// .prove_step(&pp, &test_circuit1, &test_circuit2) +// .unwrap(); + +// // verify the recursive SNARK +// let (zn_primary, zn_secondary) = recursive_snark +// .verify( +// &pp, +// num_steps, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// assert_eq!(zn_primary, vec![::Scalar::ONE]); +// assert_eq!(zn_secondary, vec![ as +// Engine>::Scalar::from(5u64)]); } + +// #[test] +// fn test_ivc_base() { +// test_ivc_base_with::(); +// test_ivc_base_with::(); +// test_ivc_base_with::(); +// } + +// fn test_setup_with() { +// #[derive(Clone, Debug, Default)] +// struct CircuitWithInputize { +// _p: PhantomData, +// } + +// impl StepCircuit for CircuitWithInputize { +// fn arity(&self) -> usize { +// 1 +// } + +// fn synthesize>( +// &self, +// cs: &mut CS, +// z: &[AllocatedNum], +// ) -> Result>, SynthesisError> { +// let x = &z[0]; +// // a simplified version of this test would only have one +// input // but beside the Nova Public parameter requirement for +// a num_io = 2, being // probed in this test, we *also* require +// num_io to be even, so // negative testing requires at least 4 +// inputs let y = x.square(cs.namespace(|| "x_sq"))?; +// y.inputize(cs.namespace(|| "y"))?; // inputize y +// let y2 = x.square(cs.namespace(|| "x_sq2"))?; +// y2.inputize(cs.namespace(|| "y2"))?; // inputize y2 +// let y3 = x.square(cs.namespace(|| "x_sq3"))?; +// y3.inputize(cs.namespace(|| "y3"))?; // inputize y2 +// let y4 = x.square(cs.namespace(|| "x_sq4"))?; +// y4.inputize(cs.namespace(|| "y4"))?; // inputize y2 +// Ok(vec![y, y2, y3, y4]) +// } +// } + +// // produce public parameters with trivial secondary +// let circuit = CircuitWithInputize::<::Scalar>::default(); let pp = PublicParams::::setup( +// &circuit, +// &TrivialCircuit::default(), +// &*default_ck_hint(), +// &*default_ck_hint(), +// ); +// assert!(pp.is_err()); +// assert_eq!(pp.err(), Some(NovaError::InvalidStepCircuitIO)); + +// // produce public parameters with the trivial primary +// let circuit = CircuitWithInputize::< as +// Engine>::Scalar>::default(); let pp = PublicParams::::setup( +// &TrivialCircuit::default(), +// &circuit, +// &*default_ck_hint(), +// &*default_ck_hint(), +// ); +// assert!(pp.is_err()); +// assert_eq!(pp.err(), Some(NovaError::InvalidStepCircuitIO)); +// } + +// #[test] +// fn test_setup() { +// test_setup_with::(); +// } +// } diff --git a/prover/src/nifs.rs b/prover/src/nifs.rs new file mode 100644 index 0000000..9848bdf --- /dev/null +++ b/prover/src/nifs.rs @@ -0,0 +1,370 @@ +//! This module implements a non-interactive folding scheme +#![allow(non_snake_case)] + +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::{NUM_CHALLENGE_BITS, NUM_FE_FOR_RO, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD}, + errors::NovaError, + r1cs::{ + R1CSInstance, R1CSResult, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness, + }, + scalar_as_base, + traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROConstants, ROTrait}, + Commitment, CommitmentKey, CompressedCommitment, +}; + +/// A SNARK that holds the proof of a step of an incremental computation +#[allow(clippy::upper_case_acronyms)] +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct NIFS { + pub(crate) comm_T: CompressedCommitment, +} + +impl NIFS { + /// Takes as input a Relaxed R1CS instance-witness tuple `(U1, W1)` and + /// an R1CS instance-witness tuple `(U2, W2)` with the same structure + /// `shape` and defined with respect to the same `ck`, and outputs + /// a folded Relaxed R1CS instance-witness tuple `(U, W)` of the same shape + /// `shape`, with the guarantee that the folded witness `W` satisfies + /// the folded instance `U` if and only if `W1` satisfies `U1` and `W2` + /// satisfies `U2`. + /// + /// Note that this code is tailored for use with Nova's IVC scheme, which + /// enforces certain requirements between the two instances that are + /// folded. In particular, it requires that `U1` and `U2` are such that + /// the hash of `U1` is stored in the public IO of `U2`. + /// In this particular setting, this means that if `U2` is absorbed in the + /// RO, it implicitly absorbs `U1` as well. So the code below avoids + /// absorbing `U1` in the RO. + #[allow(clippy::too_many_arguments)] + #[tracing::instrument(skip_all, level = "trace", name = "NIFS::prove")] + pub fn prove( + ck: &CommitmentKey, + ro_consts: &ROConstants, + pp_digest: &E::Scalar, + S: &R1CSShape, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) -> Result<(Self, (RelaxedR1CSInstance, RelaxedR1CSWitness), E::Scalar), NovaError> { + // Check `U1` and `U2` have the same arity + let io_arity = U1.X.len(); + if io_arity != U2.X.len() { + return Err(NovaError::InvalidInputLength); + } + + // initialize a new RO + let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD + io_arity); + + // append the digest of pp to the transcript + ro.absorb(scalar_as_base::(*pp_digest)); + + // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = + // Hash(params, U1, i, z0, zi) + U2.absorb_in_ro(&mut ro); + + // compute a commitment to the cross-term + let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; + + // append `comm_T` to the transcript and obtain a challenge + comm_T.absorb_in_ro(&mut ro); + + // compute a challenge from the RO + let r = ro.squeeze(NUM_CHALLENGE_BITS); + + // fold the instance using `r` and `comm_T` + let U = U1.fold(U2, &comm_T, &r); + + // fold the witness using `r` and `T` + let W = W1.fold(W2, &T, &r)?; + + // return the folded instance and witness + Ok((Self { comm_T: comm_T.compress() }, (U, W), r)) + } + + /// Takes as input a Relaxed R1CS instance-witness tuple `(U1, W1)` and + /// an R1CS instance-witness tuple `(U2, W2)` with the same structure + /// `shape` and defined with respect to the same `ck`, and updates `(U1, + /// W1)` by folding `(U2, W2)` into it with the guarantee that the + /// updated witness `W` satisfies the updated instance `U` if and only + /// if `W1` satisfies `U1` and `W2` satisfies `U2`. + #[allow(clippy::too_many_arguments)] + #[tracing::instrument(skip_all, level = "trace", name = "NIFS::prove_mut")] + pub fn prove_mut( + ck: &CommitmentKey, + ro_consts: &ROConstants, + pp_digest: &E::Scalar, + S: &R1CSShape, + U1: &mut RelaxedR1CSInstance, + W1: &mut RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + T: &mut Vec, + ABC_Z_1: &mut R1CSResult, + ABC_Z_2: &mut R1CSResult, + ) -> Result<(Self, E::Scalar), NovaError> { + // initialize a new RO + let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); + + // append the digest of pp to the transcript + ro.absorb(scalar_as_base::(*pp_digest)); + + // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = + // Hash(params, U1, i, z0, zi) + U2.absorb_in_ro(&mut ro); + + // compute a commitment to the cross-term + let comm_T = S.commit_T_into(ck, U1, W1, U2, W2, T, ABC_Z_1, ABC_Z_2)?; + + // append `comm_T` to the transcript and obtain a challenge + comm_T.absorb_in_ro(&mut ro); + + // compute a challenge from the RO + let r = ro.squeeze(NUM_CHALLENGE_BITS); + + // fold the instance using `r` and `comm_T` + U1.fold_mut(U2, &comm_T, &r); + + // fold the witness using `r` and `T` + W1.fold_mut(W2, T, &r)?; + + // return the commitment + Ok((Self { comm_T: comm_T.compress() }, r)) + } + + /// Takes as input a relaxed R1CS instance `U1` and R1CS instance `U2` + /// with the same shape and defined with respect to the same parameters, + /// and outputs a folded instance `U` with the same shape, + /// with the guarantee that the folded instance `U` + /// if and only if `U1` and `U2` are satisfiable. + pub fn verify( + &self, + ro_consts: &ROConstants, + pp_digest: &E::Scalar, + U1: &RelaxedR1CSInstance, + U2: &R1CSInstance, + ) -> Result, NovaError> { + // initialize a new RO + let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); + + // append the digest of pp to the transcript + ro.absorb(scalar_as_base::(*pp_digest)); + + // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = + // Hash(params, U1, i, z0, zi) + U2.absorb_in_ro(&mut ro); + + // append `comm_T` to the transcript and obtain a challenge + let comm_T = Commitment::::decompress(&self.comm_T)?; + comm_T.absorb_in_ro(&mut ro); + + // compute a challenge from the RO + let r = ro.squeeze(NUM_CHALLENGE_BITS); + + // fold the instance using `r` and `comm_T` + let U = U1.fold(U2, &comm_T, &r); + + // return the folded instance + Ok(U) + } +} + +#[cfg(test)] +mod tests { + use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; + use ff::{Field, PrimeField}; + use rand::rngs::OsRng; + + use super::*; + use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + solver::SatisfyingAssignment, + test_shape_cs::TestShapeCS, + }, + provider::Bn256EngineKZG, + r1cs::commitment_key, + traits::{snark::default_ck_hint, Engine}, + }; + + fn synthesize_tiny_r1cs_bellpepper>( + cs: &mut CS, + x_val: Option, + ) -> Result<(), SynthesisError> { + // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are + // respectively the input and output. + let x = AllocatedNum::alloc_infallible(cs.namespace(|| "x"), || x_val.unwrap()); + let _ = x.inputize(cs.namespace(|| "x is input")); + + let x_sq = x.square(cs.namespace(|| "x_sq"))?; + let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), &x)?; + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + Scalar::from(5u64)) + })?; + let _ = y.inputize(cs.namespace(|| "y is output")); + + cs.enforce( + || "y = x^3 + x + 5", + |lc| { + lc + x_cu.get_variable() + + x.get_variable() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + }, + |lc| lc + CS::one(), + |lc| lc + y.get_variable(), + ); + + Ok(()) + } + + fn test_tiny_r1cs_bellpepper_with() { + // First create the shape + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, None); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + let ro_consts = <::RO as ROTrait< + ::Base, + ::Scalar, + >>::Constants::default(); + + // Now get the instance and assignment for one instance + let mut cs = SatisfyingAssignment::::new(); + let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(E::Scalar::from(5))); + let (U1, W1) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + + // Make sure that the first instance is satisfiable + shape.is_sat(&ck, &U1, &W1).unwrap(); + + // Now get the instance and assignment for second instance + let mut cs = SatisfyingAssignment::::new(); + let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(E::Scalar::from(135))); + let (U2, W2) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + + // Make sure that the second instance is satisfiable + shape.is_sat(&ck, &U2, &W2).unwrap(); + + // execute a sequence of folds + execute_sequence(&ck, &ro_consts, &::Scalar::ZERO, &shape, &U1, &W1, &U2, &W2); + } + + #[test] + fn test_tiny_r1cs_bellpepper() { test_tiny_r1cs_bellpepper_with::(); } + + fn execute_sequence( + ck: &CommitmentKey, + ro_consts: &<::RO as ROTrait<::Base, ::Scalar>>::Constants, + pp_digest: &::Scalar, + shape: &R1CSShape, + U1: &R1CSInstance, + W1: &R1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) { + // produce a default running instance + let mut r_W = RelaxedR1CSWitness::default(shape); + let mut r_U = RelaxedR1CSInstance::default(ck, shape); + + // produce a step SNARK with (W1, U1) as the first incoming witness-instance + // pair + let res = NIFS::prove(ck, ro_consts, pp_digest, shape, &r_U, &r_W, U1, W1); + assert!(res.is_ok()); + let (nifs, (_U, W), _) = res.unwrap(); + + // verify the step SNARK with U1 as the first incoming instance + let res = nifs.verify(ro_consts, pp_digest, &r_U, U1); + assert!(res.is_ok()); + let U = res.unwrap(); + + assert_eq!(U, _U); + + // update the running witness and instance + r_W = W; + r_U = U; + + // produce a step SNARK with (W2, U2) as the second incoming witness-instance + // pair + let res = NIFS::prove(ck, ro_consts, pp_digest, shape, &r_U, &r_W, U2, W2); + assert!(res.is_ok()); + let (nifs, (_U, W), _) = res.unwrap(); + + // verify the step SNARK with U1 as the first incoming instance + let res = nifs.verify(ro_consts, pp_digest, &r_U, U2); + assert!(res.is_ok()); + let U = res.unwrap(); + + assert_eq!(U, _U); + + // update the running witness and instance + r_W = W; + r_U = U; + + // check if the running instance is satisfiable + shape.is_sat_relaxed(ck, &r_U, &r_W).unwrap(); + } + + fn test_tiny_r1cs_with() { + let num_vars = 3; + let S = crate::r1cs::tests::tiny_r1cs::(num_vars); + let one = ::ONE; + + // generate generators and ro constants + let ck = commitment_key(&S, &*default_ck_hint()); + let ro_consts = <::RO as ROTrait< + ::Base, + ::Scalar, + >>::Constants::default(); + + let rand_inst_witness_generator = + |ck: &CommitmentKey, I: &E::Scalar| -> (E::Scalar, R1CSInstance, R1CSWitness) { + let i0 = *I; + + // compute a satisfying (vars, X) tuple + let (O, vars, X) = { + let z0 = i0 * i0; // constraint 0 + let z1 = i0 * z0; // constraint 1 + let z2 = z1 + i0; // constraint 2 + let i1 = z2 + one + one + one + one + one; // constraint 3 + + // store the witness and IO for the instance + let W = vec![z0, z1, z2]; + let X = vec![i0, i1]; + (i1, W, X) + }; + + let W = { + let res = R1CSWitness::new(&S, vars); + assert!(res.is_ok()); + res.unwrap() + }; + let U = { + let comm_W = W.commit(ck); + let res = R1CSInstance::new(&S, comm_W, X); + assert!(res.is_ok()); + res.unwrap() + }; + + // check that generated instance is satisfiable + S.is_sat(ck, &U, &W).unwrap(); + + (O, U, W) + }; + + let mut csprng: OsRng = OsRng; + let I = E::Scalar::random(&mut csprng); // the first input is picked randomly for the first instance + let (O, U1, W1) = rand_inst_witness_generator(&ck, &I); + let (_O, U2, W2) = rand_inst_witness_generator(&ck, &O); + + // execute a sequence of folds + execute_sequence(&ck, &ro_consts, &::Scalar::ZERO, &S, &U1, &W1, &U2, &W2); + } + + #[test] + fn test_tiny_r1cs() { test_tiny_r1cs_with::(); } +} diff --git a/prover/src/provider/bn256_grumpkin.rs b/prover/src/provider/bn256_grumpkin.rs new file mode 100644 index 0000000..35d3f76 --- /dev/null +++ b/prover/src/provider/bn256_grumpkin.rs @@ -0,0 +1,109 @@ +//! This module implements the Nova traits for `bn256::Point`, `bn256::Scalar`, +//! `grumpkin::Point`, `grumpkin::Scalar`. +use std::io::Read; + +use digest::{ExtendableOutput, Update}; +use ff::{FromUniformBytes, PrimeField}; +use group::{cofactor::CofactorCurveAffine, Curve, Group as AnotherGroup}; +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] +use grumpkin_msm::{bn256 as bn256_msm, grumpkin as grumpkin_msm}; +// Remove this when https://github.com/zcash/pasta_curves/issues/41 resolves +use halo2curves::{bn256::G2Affine, CurveAffine, CurveExt}; +use num_bigint::BigInt; +use num_traits::Num; +use rayon::prelude::*; +use sha3::Shake256; + +use crate::{ + impl_traits, + provider::{traits::DlogGroup, util::msm::cpu_best_msm}, + traits::{Group, PrimeFieldExt, TranscriptReprTrait}, +}; + +// Thus compile-time assertions checks important assumptions in the memory +// representation of group data that supports the use of Abomonation. +static_assertions::assert_eq_size!(G2Affine, [u64; 16]); + +/// Re-exports that give access to the standard aliases used in the code base, +/// for bn256 +pub mod bn256 { + pub use halo2curves::bn256::{ + Fq as Base, Fr as Scalar, G1Affine as Affine, G1Compressed as Compressed, G1 as Point, + }; +} + +/// Re-exports that give access to the standard aliases used in the code base, +/// for grumpkin +pub mod grumpkin { + pub use halo2curves::grumpkin::{ + Fq as Base, Fr as Scalar, G1Affine as Affine, G1Compressed as Compressed, G1 as Point, + }; +} + +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] +impl_traits!( + bn256, + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", + bn256_msm +); +#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] +impl_traits!( + bn256, + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47" +); + +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] +impl_traits!( + grumpkin, + "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + grumpkin_msm +); +#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] +impl_traits!( + grumpkin, + "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001" +); + +#[cfg(test)] +mod tests { + use ff::Field; + use rand::thread_rng; + + use crate::provider::{ + bn256_grumpkin::{bn256, grumpkin}, + traits::DlogGroup, + util::msm::cpu_best_msm, + }; + + #[test] + fn test_bn256_msm_correctness() { + let npoints = 1usize << 16; + let points = bn256::Point::from_label(b"test", npoints); + + let mut rng = thread_rng(); + let scalars = (0..npoints).map(|_| bn256::Scalar::random(&mut rng)).collect::>(); + + let cpu_msm = cpu_best_msm(&points, &scalars); + let gpu_msm = bn256::Point::vartime_multiscalar_mul(&scalars, &points); + + assert_eq!(cpu_msm, gpu_msm); + } + + #[test] + fn test_grumpkin_msm_correctness() { + let npoints = 1usize << 16; + let points = grumpkin::Point::from_label(b"test", npoints); + + let mut rng = thread_rng(); + let scalars = (0..npoints).map(|_| grumpkin::Scalar::random(&mut rng)).collect::>(); + + let cpu_msm = cpu_best_msm(&points, &scalars); + let gpu_msm = grumpkin::Point::vartime_multiscalar_mul(&scalars, &points); + + assert_eq!(cpu_msm, gpu_msm); + } +} diff --git a/prover/src/provider/hyperkzg.rs b/prover/src/provider/hyperkzg.rs new file mode 100644 index 0000000..f1f88b8 --- /dev/null +++ b/prover/src/provider/hyperkzg.rs @@ -0,0 +1,852 @@ +//! This module implements Nova's evaluation engine using `HyperKZG`, a +//! KZG-based polynomial commitment for multilinear polynomials HyperKZG is based on the transformation from univariate PCS to multilinear PCS in the Gemini paper (section 2.4.2 in ``). +//! However, there are some key differences: +//! (1) HyperKZG works with multilinear polynomials represented in evaluation +//! form (rather than in coefficient form in Gemini's transformation). +//! This means that Spartan's polynomial IOP can use commit to its polynomials +//! as-is without incurring any interpolations or FFTs. (2) HyperKZG is +//! specialized to use KZG as the univariate commitment scheme, so it includes +//! several optimizations (both during the transformation of +//! multilinear-to-univariate claims and within the KZG commitment scheme +//! implementation itself). (3) HyperKZG also includes optimisation based on so called Shplonk/HaloInfinite technique (``). +//! Compared to pure HyperKZG, this optimisation in theory improves prover (at +//! cost of using 1 fixed KZG opening) and verifier (at cost of eliminating MSM) +#![allow(non_snake_case)] +use core::marker::PhantomData; +use std::sync::Arc; + +use ff::{Field, PrimeFieldBits}; +use group::{prime::PrimeCurveAffine as _, Curve, Group as _}; +use itertools::Itertools as _; +use pairing::{Engine, MillerLoopResult, MultiMillerLoop}; +use rayon::{ + iter::{ + IndexedParallelIterator, IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator, + }, + prelude::*, +}; +use ref_cast::RefCast as _; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + provider::{ + kzg_commitment::{KZGCommitmentEngine, KZGProverKey, KZGVerifierKey, UniversalKZGParam}, + pedersen::Commitment, + traits::DlogGroup, + util::iterators::IndexedParallelIteratorExt as _, + }, + spartan::{math::Math, polys::univariate::UniPoly}, + traits::{ + commitment::{CommitmentEngineTrait, Len}, + evaluation::EvaluationEngineTrait, + Engine as NovaEngine, Group, TranscriptEngineTrait, TranscriptReprTrait, + }, +}; + +/// Provides an implementation of a polynomial evaluation argument +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::Fr: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::Fr: Deserialize<'de>" +))] +pub struct EvaluationArgument { + comms: Vec, + evals: Vec>, + R_x: Vec, + C_Q: E::G1Affine, + C_H: E::G1Affine, +} + +/// Provides an implementation of a polynomial evaluation engine using KZG +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct EvaluationEngine { + _p: PhantomData<(E, NE)>, +} + +// This impl block defines helper functions that are not a part of +// EvaluationEngineTrait, but that we will use to implement the trait methods. +impl EvaluationEngine +where + E: Engine, + NE: NovaEngine>, + E::G1: DlogGroup, + // the following bounds repeat existing, satisfied bounds on associated types of the above + // but are required since the equality constraints we use in the above do not transitively + // carry bounds we should be able to remove most of those constraints when rust supports + // associated_type_bounds + E::Fr: Serialize + DeserializeOwned, + E::G1Affine: Serialize + DeserializeOwned, + E::G1Affine: TranscriptReprTrait, // TODO: this bound on DlogGroup is really unusable! + E::G2Affine: Serialize + DeserializeOwned, + E::Fr: PrimeFieldBits + TranscriptReprTrait, + ::Base: TranscriptReprTrait, +{ + fn compute_challenge( + com: &[E::G1Affine], + transcript: &mut impl TranscriptEngineTrait, + ) -> E::Fr { + transcript.absorb(b"c", &com); + transcript.squeeze(b"c").unwrap() + } + + // Compute challenge q = Hash(vk, C0, ..., C_{k-1}, u0, ...., u_{t-1}, + // (f_i(u_j))_{i=0..k-1,j=0..t-1}) + // It is assumed that both 'C' and 'u' are already absorbed by the transcript + fn get_batch_challenge( + v: &[Vec], + transcript: &mut impl TranscriptEngineTrait, + ) -> E::Fr { + transcript.absorb(b"v", &v.iter().flatten().cloned().collect::>().as_slice()); + + transcript.squeeze(b"r").unwrap() + } + + fn compute_a(c_q: &E::G1Affine, transcript: &mut impl TranscriptEngineTrait) -> E::Fr { + transcript.absorb(b"C_Q", c_q); + transcript.squeeze(b"a").unwrap() + } + + fn compute_pi_polynomials(hat_P: &[E::Fr], point: &[E::Fr]) -> Vec> { + let mut polys: Vec> = Vec::new(); + polys.push(hat_P.to_vec()); + + for i in 0..point.len() - 1 { + let Pi_len = polys[i].len() / 2; + let mut Pi = vec![E::Fr::ZERO; Pi_len]; + + (0..Pi_len) + .into_par_iter() + .map(|j| { + point[point.len() - i - 1] * (polys[i][2 * j + 1] - polys[i][2 * j]) + polys[i][2 * j] + }) + .collect_into_vec(&mut Pi); + + polys.push(Pi); + } + + assert_eq!(polys.len(), hat_P.len().log_2()); + + polys + } + + fn compute_commitments( + ck: &UniversalKZGParam, + _C: &Commitment, + polys: &[Vec], + ) -> Vec { + let comms: Vec = (1..polys.len()) + .into_par_iter() + .map(|i| >::commit(ck, &polys[i]).comm) + .collect(); + + let mut comms_affine: Vec = vec![E::G1Affine::identity(); comms.len()]; + NE::GE::batch_normalize(&comms, &mut comms_affine); + comms_affine + } + + fn compute_evals(polys: &[Vec], u: &[E::Fr]) -> Vec> { + let mut v = vec![vec!(E::Fr::ZERO; polys.len()); u.len()]; + v.par_iter_mut().enumerate().for_each(|(i, v_i)| { + // for each point u + v_i.par_iter_mut().zip_eq(polys).for_each(|(v_ij, f)| { + // for each poly f (except the last one - since it is constant) + *v_ij = UniPoly::ref_cast(f).evaluate(&u[i]); + }); + }); + v + } + + fn compute_k_polynomial( + batched_Pi: &UniPoly, + Q_x: &UniPoly, + D: &UniPoly, + R_x: &UniPoly, + a: E::Fr, + ) -> UniPoly { + let mut tmp = Q_x.clone(); + tmp *= &D.evaluate(&a); + tmp[0] += &R_x.evaluate(&a); + let mut K_x = batched_Pi.clone(); + K_x -= &tmp; + K_x + } +} + +impl EvaluationEngineTrait for EvaluationEngine +where + E: MultiMillerLoop, + NE: NovaEngine>, + E::Fr: Serialize + DeserializeOwned, + E::G1Affine: Serialize + DeserializeOwned, + E::G2Affine: Serialize + DeserializeOwned, + E::G1: DlogGroup, + ::Base: TranscriptReprTrait, /* Note: due to the move of the bound + * TranscriptReprTrait on G::Base + * from Group to Engine */ + E::Fr: PrimeFieldBits, // TODO due to use of gen_srs_for_testing, make optional + E::Fr: TranscriptReprTrait, + E::G1Affine: TranscriptReprTrait, +{ + type EvaluationArgument = EvaluationArgument; + type ProverKey = KZGProverKey; + type VerifierKey = KZGVerifierKey; + + fn setup(ck: Arc>) -> (Self::ProverKey, Self::VerifierKey) { + let len = ck.length() - 1; + UniversalKZGParam::trim(ck, len) + } + + fn prove( + ck: &UniversalKZGParam, + _pk: &Self::ProverKey, + transcript: &mut ::TE, + _C: &Commitment, + hat_P: &[E::Fr], + point: &[E::Fr], + _eval: &E::Fr, + ) -> Result { + let x: Vec = point.to_vec(); + let ell = x.len(); + let n = hat_P.len(); + assert_eq!(n, 1 << ell); // Below we assume that n is a power of two + + // Phase 1 -- create commitments com_1, ..., com_\ell + // We do not compute final Pi (and its commitment as well since it is already + // committed according to EvaluationEngineTrait API) as it is constant and + // equals to 'eval' also known to verifier, so can be derived on its + // side as well + let polys = Self::compute_pi_polynomials(hat_P, point); + let comms = Self::compute_commitments(ck, _C, &polys); + + // Phase 2 + let r = Self::compute_challenge(&comms, transcript); + let u = vec![r, -r, r * r]; + let evals = Self::compute_evals(&polys, &u); + + // Phase 3 + // Compute B(x) = f_0(x) + q * f_1(x) + ... + q^(k-1) * f_{k-1}(x) + let q = Self::get_batch_challenge(&evals, transcript); + let batched_Pi: UniPoly = polys.into_par_iter().map(UniPoly::new).rlc(&q); + + // Q(x), R(x) = P(x) / D(x), where D(x) = (x - r) * (x + r) * (x - r^2) = 1 * + // x^3 - r^2 * x^2 - r^2 * x + r^4 + let D = UniPoly::new(vec![u[2] * u[2], -u[2], -u[2], E::Fr::from(1)]); + let (Q_x, R_x) = batched_Pi.divide_with_q_and_r(&D).unwrap(); + + let C_Q = >::commit(ck, &Q_x.coeffs).comm.to_affine(); + + let a = Self::compute_a(&C_Q, transcript); + + // K(x) = P(x) - Q(x) * D(a) - R(a), note that R(a) should be subtracted from a + // free term of polynomial + let K_x = Self::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); + + // TODO: since this is a usual KZG10 we should use it as utility instead + let h = K_x.divide_minus_u(a); + let C_H = >::commit(ck, &h.coeffs).comm.to_affine(); + + Ok(EvaluationArgument:: { comms, evals, R_x: R_x.coeffs, C_Q, C_H }) + } + + /// A method to verify purported evaluations of a batch of polynomials + fn verify( + vk: &Self::VerifierKey, + transcript: &mut ::TE, + C: &Commitment, + point: &[E::Fr], + P_of_x: &E::Fr, + pi: &Self::EvaluationArgument, + ) -> Result<(), NovaError> { + let r = Self::compute_challenge(&pi.comms, transcript); + let u = [r, -r, r * r]; + + if pi.evals.len() != u.len() { + return Err(NovaError::ProofVerifyError); + } + if pi.R_x.len() != u.len() { + return Err(NovaError::ProofVerifyError); + } + + let mut comms = pi.comms.to_vec(); + comms.insert(0, C.comm.to_affine()); + + let q = Self::get_batch_challenge(&pi.evals, transcript); + let R_x = UniPoly::new(pi.R_x.clone()); + + let verification_failed = pi.evals.iter().zip_eq(u.iter()).any(|(evals_i, u_i)| { + // here we check correlation between R polynomial and batched evals, e.g.: + // 1) R(r) == eval at r + // 2) R(-r) == eval at -r + // 3) R(r^2) == eval at r^2 + let batched_eval = UniPoly::ref_cast(evals_i).evaluate(&q); + batched_eval != R_x.evaluate(u_i) + }); + if verification_failed { + return Err(NovaError::ProofVerifyError); + } + + // here we check that Pi polynomials were correctly constructed by the prover, + // using 'r' as a random point, e.g: P_i_even = P_i(r) + P_i(-r) * 1/2 + // P_i_odd = P_i(r) - P_i(-r) * 1/2*r + // P_i+1(r^2) == (1 - point_i) * P_i_even + point_i * P_i_odd -> should hold, + // according to Gemini transformation + let mut point = point.to_vec(); + point.reverse(); + + let r_mul_2 = E::Fr::from(2) * r; + #[allow(clippy::disallowed_methods)] + let verification_failed = pi.evals[0] + .par_iter() + .chain(&[*P_of_x]) + .zip_eq(pi.evals[1].par_iter().chain(&[*P_of_x])) + .zip(pi.evals[2][1..].par_iter().chain(&[*P_of_x])) + .enumerate() + .any(|(index, ((eval_r, eval_minus_r), eval_r_squared))| { + // some optimisation to avoid using expensive inversions: + // P_i+1(r^2) == (1 - point_i) * (P_i(r) + P_i(-r)) * 1/2 + point_i * (P_i(r) - + // P_i(-r)) * 1/2 * r is equivalent to: + // 2 * r * P_i+1(r^2) == r * (1 - point_i) * (P_i(r) + P_i(-r)) + point_i * + // (P_i(r) - P_i(-r)) + + let even = *eval_r + eval_minus_r; + let odd = *eval_r - eval_minus_r; + let right = r * ((E::Fr::ONE - point[index]) * even) + (point[index] * odd); + let left = *eval_r_squared * r_mul_2; + left != right + }); + + if verification_failed { + return Err(NovaError::ProofVerifyError); + } + + let C_P: E::G1 = comms.par_iter().map(|comm| comm.to_curve()).rlc(&q); + let C_Q = pi.C_Q; + let C_H = pi.C_H; + let r_squared = u[2]; + + // D = (x - r) * (x + r) * (x - r^2) = 1 * x^3 - r^2 * x^2 - r^2 * x + r^4 + let D = UniPoly::new(vec![r_squared * r_squared, -r_squared, -r_squared, E::Fr::from(1)]); + + let a = Self::compute_a(&C_Q, transcript); + + let C_K = C_P - (C_Q * D.evaluate(&a) + vk.g * R_x.evaluate(&a)); + + let pairing_inputs: Vec<(E::G1Affine, E::G2Prepared)> = + vec![(C_H, vk.beta_h.into()), ((C_H * (-a) - C_K).to_affine(), vk.h.into())]; + + #[allow(clippy::map_identity)] + let pairing_input_refs = pairing_inputs.iter().map(|(a, b)| (a, b)).collect::>(); + + let pairing_result = E::multi_miller_loop(pairing_input_refs.as_slice()).final_exponentiation(); + let successful: bool = pairing_result.is_identity().into(); + if !successful { + return Err(NovaError::ProofVerifyError); + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use bincode::Options; + use expect_test::expect; + use halo2curves::bn256::G1; + use itertools::Itertools; + + use super::*; + use crate::{ + provider::{ + keccak::Keccak256Transcript, + util::{iterators::DoubleEndedIteratorExt as _, test_utils::prove_verify_from_num_vars}, + }, + spartan::powers, + traits::TranscriptEngineTrait, + zip_with, CommitmentEngineTrait, CommitmentKey, + }; + + type E = halo2curves::bn256::Bn256; + type NE = crate::provider::Bn256EngineKZG; + type Fr = ::Scalar; + + fn test_commitment_to_k_polynomial_correctness( + ck: &CommitmentKey, + C: &Commitment, + poly: &[Fr], + point: &[Fr], + _eval: &Fr, + ) { + let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); + let mut comms = EvaluationEngine::::compute_commitments(ck, C, &polys); + comms.insert(0, C.comm.to_affine()); + + let q = Fr::from(8165763); + let q_powers = batch_challenge_powers(q, polys.len()); + let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); + + let r = Fr::from(1354678); + let r_squared = r * r; + + let divident = batched_Pi.clone(); + let D = UniPoly::new(vec![r_squared * r_squared, -r_squared, -r_squared, Fr::from(1)]); + let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); + + let a = Fr::from(938576); + + let K_x = EvaluationEngine::::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); + + let mut C_P = G1::identity(); + q_powers.iter().zip_eq(comms.iter()).for_each(|(q_i, C_i)| { + C_P += *C_i * q_i; + }); + + let C_Q = + <::CE as CommitmentEngineTrait>::commit( + ck, + &Q_x.coeffs, + ) + .comm + .to_affine(); + + // Check that Cp - Cq * D(a) - g1 * R(a) == MSM(ck, K(x)) + let C_K = C_P - C_Q * D.evaluate(&a) - ck.powers_of_g[0] * R_x.evaluate(&a); + + let C_K_expected = + <::CE as CommitmentEngineTrait>::commit( + ck, + &K_x.coeffs, + ) + .comm + .to_affine(); + + assert_eq!(C_K_expected, C_K.to_affine()); + } + + fn test_k_polynomial_correctness(poly: &[Fr], point: &[Fr], _eval: &Fr) { + let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); + let q = Fr::from(8165763); + let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); + + let r = Fr::from(56263); + let r_squared = r * r; + + let divident = batched_Pi.clone(); + let D = UniPoly::new(vec![r_squared * r_squared, -r_squared, -r_squared, Fr::from(1)]); + let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); + + let a = Fr::from(190837645); + + let K_x = EvaluationEngine::::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); + + assert_eq!(Fr::from(0), K_x.evaluate(&a)); + } + + fn test_d_polynomial_correctness(poly: &[Fr], point: &[Fr], _eval: &Fr) { + let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); + let q = Fr::from(8165763); + let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); + + let r = Fr::from(2895776832); + let r_squared = r * r; + + let divident = batched_Pi.clone(); + // D(x) = (x - r) * (x + r) * (x - r^2) + let D = UniPoly::new(vec![r_squared * r_squared, -r_squared, -r_squared, Fr::from(1)]); + let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); + + let evaluation_scalar = Fr::from(182746); + assert_eq!( + batched_Pi.evaluate(&evaluation_scalar), + D.evaluate(&evaluation_scalar) * Q_x.evaluate(&evaluation_scalar) + + R_x.evaluate(&evaluation_scalar) + ); + + // Check that Q(x) = (P(x) - R(x)) / D(x) + let mut P_x = batched_Pi.clone(); + let minus_R_x = + UniPoly::new(R_x.clone().coeffs.into_iter().map(|coeff| -coeff).collect::>()); + P_x += &minus_R_x; + + let divident = P_x.clone(); + let (Q_x_recomputed, _) = divident.divide_with_q_and_r(&D).unwrap(); + + assert_eq!(Q_x, Q_x_recomputed); + } + + fn test_batching_property_on_evaluation(poly: &[Fr], point: &[Fr], _eval: &Fr) { + let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); + + let q = Fr::from(97652); + let u = [Fr::from(10), Fr::from(20), Fr::from(50)]; + + let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); + + let q_powers = batch_challenge_powers(q, polys.len()); + for evaluation_scalar in u.iter() { + let evals = polys + .clone() + .into_iter() + .map(|poly| UniPoly::new(poly).evaluate(evaluation_scalar)) + .collect::>(); + + let expected = zip_with!((evals.iter(), q_powers.iter()), |eval, q| eval * q) + .collect::>() + .into_iter() + .sum::(); + + let actual = batched_Pi.evaluate(evaluation_scalar); + assert_eq!(expected, actual); + } + } + + #[test] + fn test_hyperkzg_shplonk_unit_tests() { + // poly = [1, 2, 1, 4, 1, 2, 1, 4] + let poly = vec![ + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + ]; + + // point = [4,3,8] + let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; + + // eval = 57 + let eval = Fr::from(57); + + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", poly.len()); + + let ck = Arc::new(ck); + let C: Commitment = KZGCommitmentEngine::commit(&ck, &poly); + + test_batching_property_on_evaluation(&poly, &point, &eval); + test_d_polynomial_correctness(&poly, &point, &eval); + test_k_polynomial_correctness(&poly, &point, &eval); + test_commitment_to_k_polynomial_correctness(&ck, &C, &poly, &point, &eval); + } + + #[test] + fn test_hyperkzg_shplonk_pcs() { + let n = 8; + + // poly = [1, 2, 1, 4, 1, 2, 1, 4] + let poly = vec![ + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + ]; + + // point = [4,3,8] + let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; + + // eval = 57 + let eval = Fr::from(57); + + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let ck = Arc::new(ck); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = + EvaluationEngine::::setup(ck.clone()); + + // make a commitment + let C: Commitment = KZGCommitmentEngine::commit(&ck, &poly); + + let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); + let proof = + EvaluationEngine::::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) + .unwrap(); + + let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); + EvaluationEngine::::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof) + .unwrap(); + } + + #[test] + fn test_hyperkzg_shplonk_pcs_negative() { + let n = 8; + // poly = [1, 2, 1, 4, 1, 2, 1, 4] + let poly = vec![ + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + ]; + // point = [4,3,8] + let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; + // eval = 57 + let eval = Fr::from(57); + + // eval = 57 + let eval1 = Fr::from(56); // wrong eval + test_negative_inner(n, &poly, &point, &eval1); + + // point = [4,3,8] + let point1 = vec![Fr::from(4), Fr::from(3), Fr::from(7)]; // wrong point + test_negative_inner(n, &poly, &point1, &eval); + + // poly = [1, 2, 1, 4, 1, 2, 1, 4] + let poly1 = vec![ + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + Fr::ONE, + Fr::from(2), + Fr::from(200), + Fr::from(100), + ]; // wrong poly + test_negative_inner(n, &poly1, &point, &eval); + } + + fn test_negative_inner(n: usize, poly: &[Fr], point: &[Fr], eval: &Fr) { + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let ck = Arc::new(ck); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = + EvaluationEngine::::setup(ck.clone()); + + // make a commitment + let C: Commitment = KZGCommitmentEngine::commit(&ck, poly); + + let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); + let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); + + let proof = + EvaluationEngine::::prove(&ck, &pk, &mut prover_transcript, &C, poly, point, eval) + .unwrap(); + + assert!(EvaluationEngine::::verify( + &vk, + &mut verifier_transcript, + &C, + point, + eval, + &proof + ) + .is_err()); + } + + #[test] + fn test_hyperkzg_shplonk_pcs_negative_wrong_commitment() { + let n = 8; + // poly = [1, 2, 1, 4, 1, 2, 1, 4] + let poly = vec![ + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + ]; + // point = [4,3,8] + let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; + // eval = 57 + let eval = Fr::from(57); + + // altered_poly = [85, 84, 83, 82, 81, 80, 79, 78] + let altered_poly = vec![ + Fr::from(85), + Fr::from(84), + Fr::from(83), + Fr::from(82), + Fr::from(81), + Fr::from(80), + Fr::from(79), + Fr::from(78), + ]; + + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + + let C1: Commitment = KZGCommitmentEngine::commit(&ck, &poly); // correct commitment + let C2: Commitment = KZGCommitmentEngine::commit(&ck, &altered_poly); // wrong commitment + + test_negative_inner_commitment(&poly, &point, &eval, &ck, &C1, &C2); // here we check detection when proof and commitment do not correspond + test_negative_inner_commitment(&poly, &point, &eval, &ck, &C2, &C2); // here we check detection + // when proof was built + // with wrong commitment + } + + fn test_negative_inner_commitment( + poly: &[Fr], + point: &[Fr], + eval: &Fr, + ck: &CommitmentKey, + C_prover: &Commitment, + C_verifier: &Commitment, + ) { + let ck = Arc::new(ck.clone()); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = + EvaluationEngine::::setup(ck.clone()); + + let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); + let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); + + let proof = EvaluationEngine::::prove( + &ck, + &pk, + &mut prover_transcript, + C_prover, + poly, + point, + eval, + ) + .unwrap(); + + assert!(EvaluationEngine::::verify( + &vk, + &mut verifier_transcript, + C_verifier, + point, + eval, + &proof + ) + .is_err()); + } + + #[test] + fn test_hyperkzg_shplonk_eval() { + // Test with poly(X1, X2) = 1 + X1 + X2 + X1*X2 + let n = 4; + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let ck = Arc::new(ck); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = + EvaluationEngine::::setup(ck.clone()); + + // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)] + let poly = vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)]; + + let C = as CommitmentEngineTrait>::commit(&ck, &poly); + + let test_inner = |point: Vec, eval: Fr| -> Result<(), NovaError> { + let mut tr = Keccak256Transcript::::new(b"TestEval"); + let proof = + EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).unwrap(); + let mut tr = Keccak256Transcript::new(b"TestEval"); + EvaluationEngine::::verify(&vk, &mut tr, &C, &point, &eval, &proof) + }; + + // Call the prover with a (point, eval) pair. + // The prover does not recompute so it may produce a proof, but it should not + // verify + let point = vec![Fr::from(0), Fr::from(0)]; + let eval = Fr::ONE; + test_inner(point, eval).unwrap(); + + let point = vec![Fr::from(0), Fr::from(1)]; + let eval = Fr::from(2); + test_inner(point, eval).unwrap(); + + let point = vec![Fr::from(1), Fr::from(1)]; + let eval = Fr::from(4); + test_inner(point, eval).unwrap(); + + let point = vec![Fr::from(0), Fr::from(2)]; + let eval = Fr::from(3); + test_inner(point, eval).unwrap(); + + let point = vec![Fr::from(2), Fr::from(2)]; + let eval = Fr::from(9); + test_inner(point, eval).unwrap(); + + // Try a couple incorrect evaluations and expect failure + let point = vec![Fr::from(2), Fr::from(2)]; + let eval = Fr::from(50); + assert!(test_inner(point, eval).is_err()); + + let point = vec![Fr::from(0), Fr::from(2)]; + let eval = Fr::from(4); + assert!(test_inner(point, eval).is_err()); + } + + #[test] + fn test_hyperkzg_shplonk_transcript_correctness() { + let n = 4; + + // poly = [1, 2, 1, 4] + let poly = vec![Fr::ONE, Fr::from(2), Fr::from(1), Fr::from(4)]; + + // point = [4,3] + let point = vec![Fr::from(4), Fr::from(3)]; + + // eval = 28 + let eval = Fr::from(28); + + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let ck = Arc::new(ck); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = + EvaluationEngine::::setup(ck.clone()); + + // make a commitment + let C = KZGCommitmentEngine::commit(&ck, &poly); + + // prove an evaluation + let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); + let proof = + EvaluationEngine::::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) + .unwrap(); + let post_c_p = prover_transcript.squeeze(b"c").unwrap(); + + // verify the evaluation + let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); + EvaluationEngine::::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof) + .unwrap(); + let post_c_v = verifier_transcript.squeeze(b"c").unwrap(); + + // check if the prover transcript and verifier transcript are kept in the + // same state + assert_eq!(post_c_p, post_c_v); + + let proof_bytes = bincode::DefaultOptions::new() + .with_big_endian() + .with_fixint_encoding() + .serialize(&proof) + .unwrap(); + expect!["432"].assert_eq(&proof_bytes.len().to_string()); + + // Change the proof and expect verification to fail + let mut bad_proof = proof.clone(); + bad_proof.comms[0] = (bad_proof.comms[0] + bad_proof.comms[0] * Fr::from(123)).to_affine(); + let mut verifier_transcript2 = Keccak256Transcript::::new(b"TestEval"); + assert!(EvaluationEngine::::verify( + &vk, + &mut verifier_transcript2, + &C, + &point, + &eval, + &bad_proof + ) + .is_err()); + } + + #[test] + fn test_hyperkzg_shplonk_more() { + // test the hyperkzg prover and verifier with random instances (derived from a + // seed) + for num_vars in [4, 5, 6] { + prove_verify_from_num_vars::<_, EvaluationEngine>(num_vars); + } + } + + /// Compute powers of q : (1, q, q^2, ..., q^(k-1)) + fn batch_challenge_powers(q: Fr, k: usize) -> Vec { powers(&q, k) } +} diff --git a/prover/src/provider/ipa_pc.rs b/prover/src/provider/ipa_pc.rs new file mode 100644 index 0000000..116e979 --- /dev/null +++ b/prover/src/provider/ipa_pc.rs @@ -0,0 +1,360 @@ +//! This module implements `EvaluationEngine` using an IPA-based polynomial +//! commitment scheme +use core::iter; +use std::{marker::PhantomData, sync::Arc}; + +use ff::Field; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use crate::{ + digest::SimpleDigestible, + errors::{NovaError, PCSError}, + provider::{pedersen::CommitmentKeyExtTrait, traits::DlogGroup, util::field::batch_invert}, + spartan::polys::eq::EqPolynomial, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait}, + evaluation::EvaluationEngineTrait, + Engine, TranscriptEngineTrait, TranscriptReprTrait, + }, + zip_with, Commitment, CommitmentKey, CompressedCommitment, CE, +}; + +/// Provides an implementation of the prover key +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct ProverKey { + pub ck_s: CommitmentKey, +} + +/// Provides an implementation of the verifier key +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct VerifierKey { + pub ck_v: Arc>, + pub ck_s: CommitmentKey, +} + +impl SimpleDigestible for VerifierKey {} + +/// Provides an implementation of a polynomial evaluation engine using IPA +#[derive(Clone, Debug)] +pub struct EvaluationEngine { + _p: PhantomData, +} + +impl EvaluationEngineTrait for EvaluationEngine +where + E: Engine, + E::GE: DlogGroup, + CommitmentKey: CommitmentKeyExtTrait, +{ + type EvaluationArgument = InnerProductArgument; + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn setup( + ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, + ) -> (Self::ProverKey, Self::VerifierKey) { + let ck_c = E::CE::setup(b"ipa", 1); + + let pk = ProverKey { ck_s: ck_c.clone() }; + let vk = VerifierKey { ck_v: ck.clone(), ck_s: ck_c }; + + (pk, vk) + } + + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + transcript: &mut E::TE, + comm: &Commitment, + poly: &[E::Scalar], + point: &[E::Scalar], + eval: &E::Scalar, + ) -> Result { + let u = InnerProductInstance::new(comm, &EqPolynomial::evals_from_points(point), eval); + let w = InnerProductWitness::new(poly); + + InnerProductArgument::prove(ck.clone(), pk.ck_s.clone(), &u, &w, transcript) + } + + /// A method to verify purported evaluations of a batch of polynomials + fn verify( + vk: &Self::VerifierKey, + transcript: &mut E::TE, + comm: &Commitment, + point: &[E::Scalar], + eval: &E::Scalar, + arg: &Self::EvaluationArgument, + ) -> Result<(), NovaError> { + let u = InnerProductInstance::new(comm, &EqPolynomial::evals_from_points(point), eval); + + arg.verify(&vk.ck_v, vk.ck_s.clone(), 1 << point.len(), &u, transcript)?; + + Ok(()) + } +} + +fn inner_product(a: &[T], b: &[T]) -> T { + zip_with!(par_iter, (a, b), |x, y| *x * y).sum() +} + +/// An inner product instance consists of a commitment to a vector `a` and +/// another vector `b` and the claim that c = . +struct InnerProductInstance { + comm_a_vec: Commitment, + b_vec: Vec, + c: E::Scalar, +} + +impl InnerProductInstance +where + E: Engine, + E::GE: DlogGroup, +{ + fn new(comm_a_vec: &Commitment, b_vec: &[E::Scalar], c: &E::Scalar) -> Self { + Self { comm_a_vec: *comm_a_vec, b_vec: b_vec.to_vec(), c: *c } + } +} + +impl TranscriptReprTrait for InnerProductInstance { + fn to_transcript_bytes(&self) -> Vec { + // we do not need to include self.b_vec as in our context it is produced from + // the transcript + [self.comm_a_vec.to_transcript_bytes(), self.c.to_transcript_bytes()].concat() + } +} + +struct InnerProductWitness { + a_vec: Vec, +} + +impl InnerProductWitness { + fn new(a_vec: &[E::Scalar]) -> Self { Self { a_vec: a_vec.to_vec() } } +} + +/// An inner product argument +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct InnerProductArgument { + pub(in crate::provider) L_vec: Vec>, + pub(in crate::provider) R_vec: Vec>, + pub(in crate::provider) a_hat: E::Scalar, +} + +impl InnerProductArgument +where + E: Engine, + E::GE: DlogGroup, + CommitmentKey: CommitmentKeyExtTrait, +{ + const fn protocol_name() -> &'static [u8] { b"IPA" } + + fn prove( + ck: CommitmentKey, + mut ck_c: CommitmentKey, + U: &InnerProductInstance, + W: &InnerProductWitness, + transcript: &mut E::TE, + ) -> Result { + transcript.dom_sep(Self::protocol_name()); + + let (ck, _) = ck.split_at(U.b_vec.len()); + + if U.b_vec.len() != W.a_vec.len() { + return Err(NovaError::InvalidInputLength); + } + + // absorb the instance in the transcript + transcript.absorb(b"U", U); + + // sample a random base for committing to the inner product + let r = transcript.squeeze(b"r")?; + ck_c.scale(&r); + + // a closure that executes a step of the recursive inner product argument + let prove_inner = |a_vec: &[E::Scalar], + b_vec: &[E::Scalar], + ck: CommitmentKey, + transcript: &mut E::TE| + -> Result< + ( + CompressedCommitment, + CompressedCommitment, + Vec, + Vec, + CommitmentKey, + ), + NovaError, + > { + let n = a_vec.len(); + let (ck_L, ck_R) = ck.split_at(n / 2); + + let c_L = inner_product(&a_vec[0..n / 2], &b_vec[n / 2..n]); + let c_R = inner_product(&a_vec[n / 2..n], &b_vec[0..n / 2]); + + let L = CE::::commit( + &ck_R.combine(&ck_c), + &a_vec[0..n / 2].iter().chain(iter::once(&c_L)).copied().collect::>(), + ) + .compress(); + let R = CE::::commit( + &ck_L.combine(&ck_c), + &a_vec[n / 2..n].iter().chain(iter::once(&c_R)).copied().collect::>(), + ) + .compress(); + + transcript.absorb(b"L", &L); + transcript.absorb(b"R", &R); + + let r = transcript.squeeze(b"r")?; + let r_inverse = r.invert().unwrap(); + + // fold the left half and the right half + let a_vec_folded = + zip_with!((a_vec[0..n / 2].par_iter(), a_vec[n / 2..n].par_iter()), |a_L, a_R| *a_L * r + + r_inverse * *a_R) + .collect::>(); + + let b_vec_folded = + zip_with!((b_vec[0..n / 2].par_iter(), b_vec[n / 2..n].par_iter()), |b_L, b_R| *b_L + * r_inverse + + r * *b_R) + .collect::>(); + + let ck_folded = CommitmentKeyExtTrait::fold(&ck_L, &ck_R, &r_inverse, &r); + + Ok((L, R, a_vec_folded, b_vec_folded, ck_folded)) + }; + + // two vectors to hold the logarithmic number of group elements + let mut L_vec: Vec> = Vec::new(); + let mut R_vec: Vec> = Vec::new(); + + // we create mutable copies of vectors and generators + let mut a_vec = W.a_vec.to_vec(); + let mut b_vec = U.b_vec.to_vec(); + let mut ck = ck; + for _i in 0..usize::try_from(U.b_vec.len().ilog2()).unwrap() { + let (L, R, a_vec_folded, b_vec_folded, ck_folded) = + prove_inner(&a_vec, &b_vec, ck, transcript)?; + L_vec.push(L); + R_vec.push(R); + + a_vec = a_vec_folded; + b_vec = b_vec_folded; + ck = ck_folded; + } + + Ok(Self { L_vec, R_vec, a_hat: a_vec[0] }) + } + + fn verify( + &self, + ck: &CommitmentKey, + mut ck_c: CommitmentKey, + n: usize, + U: &InnerProductInstance, + transcript: &mut E::TE, + ) -> Result<(), NovaError> { + let (ck, _) = ck.clone().split_at(U.b_vec.len()); + + transcript.dom_sep(Self::protocol_name()); + if U.b_vec.len() != n + || n != (1 << self.L_vec.len()) + || self.L_vec.len() != self.R_vec.len() + || self.L_vec.len() >= 32 + { + return Err(NovaError::InvalidInputLength); + } + + // absorb the instance in the transcript + transcript.absorb(b"U", U); + + // sample a random base for committing to the inner product + let r = transcript.squeeze(b"r")?; + ck_c.scale(&r); + + let P = U.comm_a_vec + CE::::commit(&ck_c, &[U.c]); + + // compute a vector of public coins using self.L_vec and self.R_vec + let r = (0..self.L_vec.len()) + .map(|i| { + transcript.absorb(b"L", &self.L_vec[i]); + transcript.absorb(b"R", &self.R_vec[i]); + transcript.squeeze(b"r") + }) + .collect::, NovaError>>()?; + + // precompute scalars necessary for verification + let r_square: Vec = + (0..self.L_vec.len()).into_par_iter().map(|i| r[i] * r[i]).collect(); + let r_inverse = batch_invert(r.clone())?; + let r_inverse_square: Vec = + (0..self.L_vec.len()).into_par_iter().map(|i| r_inverse[i] * r_inverse[i]).collect(); + + // compute the vector with the tensor structure + let s = { + let mut s = vec![E::Scalar::ZERO; n]; + s[0] = { + let mut v = E::Scalar::ONE; + for r_inverse_i in r_inverse { + v *= r_inverse_i; + } + v + }; + for i in 1..n { + let pos_in_r = (31 - (i as u32).leading_zeros()) as usize; + s[i] = s[i - (1 << pos_in_r)] * r_square[(self.L_vec.len() - 1) - pos_in_r]; + } + s + }; + + let ck_hat = { + let c = CE::::commit(&ck, &s).compress(); + CommitmentKey::::reinterpret_commitments_as_ck(&[c])? + }; + + let b_hat = inner_product(&U.b_vec, &s); + + let P_hat = { + let ck_folded = { + let ck_L = CommitmentKey::::reinterpret_commitments_as_ck(&self.L_vec)?; + let ck_R = CommitmentKey::::reinterpret_commitments_as_ck(&self.R_vec)?; + let ck_P = CommitmentKey::::reinterpret_commitments_as_ck(&[P.compress()])?; + ck_L.combine(&ck_R).combine(&ck_P) + }; + + CE::::commit( + &ck_folded, + &r_square + .iter() + .chain(r_inverse_square.iter()) + .chain(iter::once(&E::Scalar::ONE)) + .copied() + .collect::>(), + ) + }; + + if P_hat == CE::::commit(&ck_hat.combine(&ck_c), &[self.a_hat, self.a_hat * b_hat]) { + Ok(()) + } else { + Err(NovaError::PCSError(PCSError::InvalidPCS)) + } + } +} + +#[cfg(test)] +mod test { + use crate::provider::{ + ipa_pc::EvaluationEngine, util::test_utils::prove_verify_from_num_vars, GrumpkinEngine, + }; + + #[test] + fn test_multiple_polynomial_size() { + for num_vars in [4, 5, 6] { + prove_verify_from_num_vars::<_, EvaluationEngine>(num_vars); + } + } +} diff --git a/prover/src/provider/keccak.rs b/prover/src/provider/keccak.rs new file mode 100644 index 0000000..76ea95c --- /dev/null +++ b/prover/src/provider/keccak.rs @@ -0,0 +1,219 @@ +//! This module provides an implementation of `TranscriptEngineTrait` using +//! keccak256 +use core::marker::PhantomData; + +use sha3::{Digest, Keccak256}; + +use crate::{ + errors::NovaError, + traits::{Engine, PrimeFieldExt, TranscriptEngineTrait, TranscriptReprTrait}, +}; + +const PERSONA_TAG: &[u8] = b"NoTR"; +const DOM_SEP_TAG: &[u8] = b"NoDS"; +const KECCAK256_STATE_SIZE: usize = 64; +const KECCAK256_PREFIX_CHALLENGE_LO: u8 = 0; +const KECCAK256_PREFIX_CHALLENGE_HI: u8 = 1; + +/// Provides an implementation of `TranscriptEngine` +#[derive(Debug)] +pub struct Keccak256Transcript { + round: u16, + state: [u8; KECCAK256_STATE_SIZE], + transcript: Keccak256, + _p: PhantomData, +} + +fn compute_updated_state(keccak_instance: Keccak256, input: &[u8]) -> [u8; KECCAK256_STATE_SIZE] { + let mut updated_instance = keccak_instance; + updated_instance.update(input); + + let input_lo = &[KECCAK256_PREFIX_CHALLENGE_LO]; + let input_hi = &[KECCAK256_PREFIX_CHALLENGE_HI]; + + let mut hasher_lo = updated_instance.clone(); + let mut hasher_hi = updated_instance; + + hasher_lo.update(input_lo); + hasher_hi.update(input_hi); + + let output_lo = hasher_lo.finalize(); + let output_hi = hasher_hi.finalize(); + + [output_lo, output_hi].concat().as_slice().try_into().unwrap() +} + +impl TranscriptEngineTrait for Keccak256Transcript { + fn new(label: &'static [u8]) -> Self { + let keccak_instance = Keccak256::new(); + let input = [PERSONA_TAG, label].concat(); + let output = compute_updated_state(keccak_instance.clone(), &input); + + Self { + round: 0u16, + state: output, + transcript: keccak_instance, + _p: PhantomData, + } + } + + fn squeeze(&mut self, label: &'static [u8]) -> Result { + // we gather the full input from the round, preceded by the current state of the + // transcript + let input = + [DOM_SEP_TAG, self.round.to_le_bytes().as_ref(), self.state.as_ref(), label].concat(); + let output = compute_updated_state(self.transcript.clone(), &input); + + // update state + self.round = { self.round.checked_add(1).ok_or(NovaError::InternalTranscriptError)? }; + self.state.copy_from_slice(&output); + self.transcript = Keccak256::new(); + + // squeeze out a challenge + Ok(E::Scalar::from_uniform(&output)) + } + + fn absorb>(&mut self, label: &'static [u8], o: &T) { + self.transcript.update(label); + self.transcript.update(&o.to_transcript_bytes()); + } + + fn dom_sep(&mut self, bytes: &'static [u8]) { + self.transcript.update(DOM_SEP_TAG); + self.transcript.update(bytes); + } +} + +#[cfg(test)] +mod tests { + use ff::PrimeField; + use rand::Rng; + use sha3::{Digest, Keccak256}; + + use crate::{ + provider::{keccak::Keccak256Transcript, Bn256EngineKZG, GrumpkinEngine}, + traits::{Engine, PrimeFieldExt, TranscriptEngineTrait, TranscriptReprTrait}, + }; + + fn test_keccak_transcript_with(expected_h1: &'static str, expected_h2: &'static str) { + let mut transcript: Keccak256Transcript = Keccak256Transcript::new(b"test"); + + // two scalars + let s1 = ::Scalar::from(2u64); + let s2 = ::Scalar::from(5u64); + + // add the scalars to the transcript + transcript.absorb(b"s1", &s1); + transcript.absorb(b"s2", &s2); + + // make a challenge + let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); + assert_eq!(hex::encode(c1.to_repr().as_ref()), expected_h1); + + // a scalar + let s3 = ::Scalar::from(128u64); + + // add the scalar to the transcript + transcript.absorb(b"s3", &s3); + + // make a challenge + let c2: ::Scalar = transcript.squeeze(b"c2").unwrap(); + assert_eq!(hex::encode(c2.to_repr().as_ref()), expected_h2); + } + + #[test] + fn test_keccak_transcript() { + test_keccak_transcript_with::( + "9fb71e3b74bfd0b60d97349849b895595779a240b92a6fae86bd2812692b6b0e", + "bfd4c50b7d6317e9267d5d65c985eb455a3561129c0b3beef79bfc8461a84f18", + ); + } + + #[test] + fn test_keccak_example() { + let mut hasher = Keccak256::new(); + hasher.update(0xffffffff_u32.to_le_bytes()); + let output: [u8; 32] = hasher.finalize().into(); + assert_eq!( + hex::encode(output), + "29045a592007d0c246ef02c2223570da9522d0cf0f73282c79a1bc8f0bb2c238" + ); + } + + use super::{ + DOM_SEP_TAG, KECCAK256_PREFIX_CHALLENGE_HI, KECCAK256_PREFIX_CHALLENGE_LO, + KECCAK256_STATE_SIZE, PERSONA_TAG, + }; + + fn compute_updated_state_for_testing(input: &[u8]) -> [u8; KECCAK256_STATE_SIZE] { + let input_lo = [input, &[KECCAK256_PREFIX_CHALLENGE_LO]].concat(); + let input_hi = [input, &[KECCAK256_PREFIX_CHALLENGE_HI]].concat(); + + let mut hasher_lo = Keccak256::new(); + let mut hasher_hi = Keccak256::new(); + + hasher_lo.update(&input_lo); + hasher_hi.update(&input_hi); + + let output_lo = hasher_lo.finalize(); + let output_hi = hasher_hi.finalize(); + + [output_lo, output_hi].concat().as_slice().try_into().unwrap() + } + + fn squeeze_for_testing( + transcript: &[u8], + round: u16, + state: [u8; KECCAK256_STATE_SIZE], + label: &'static [u8], + ) -> [u8; 64] { + let input = + [transcript, DOM_SEP_TAG, round.to_le_bytes().as_ref(), state.as_ref(), label].concat(); + compute_updated_state_for_testing(&input) + } + + // This test is meant to ensure compatibility between the incremental way of + // computing the transcript above, and the former, which materialized the + // entirety of the input vector before calling Keccak256 on it. + fn test_keccak_transcript_incremental_vs_explicit_with() { + let test_label = b"test"; + let mut transcript: Keccak256Transcript = Keccak256Transcript::new(test_label); + let mut rng = rand::thread_rng(); + + // ten scalars + let scalars = std::iter::from_fn(|| Some(::Scalar::from(rng.gen::()))) + .take(10) + .collect::>(); + + // add the scalars to the transcripts, + let mut manual_transcript: Vec = vec![]; + let labels = [b"s1", b"s2", b"s3", b"s4", b"s5", b"s6", b"s7", b"s8", b"s9", b"s0"]; + + for i in 0..10 { + transcript.absorb(&labels[i][..], &scalars[i]); + manual_transcript.extend(labels[i]); + manual_transcript.extend(scalars[i].to_transcript_bytes()); + } + + // compute the initial state + let input = [PERSONA_TAG, test_label].concat(); + let initial_state = compute_updated_state_for_testing(&input); + + // make a challenge + let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); + + let c1_bytes = squeeze_for_testing(&manual_transcript[..], 0u16, initial_state, b"c1"); + let to_hex = |g: E::Scalar| hex::encode(g.to_repr().as_ref()); + assert_eq!(to_hex(c1), to_hex(E::Scalar::from_uniform(&c1_bytes))); + } + + #[test] + fn test_keccak_transcript_incremental_vs_explicit() { + // test_keccak_transcript_incremental_vs_explicit_with::(); + // test_keccak_transcript_incremental_vs_explicit_with::(); + test_keccak_transcript_incremental_vs_explicit_with::(); + test_keccak_transcript_incremental_vs_explicit_with::(); + // test_keccak_transcript_incremental_vs_explicit_with::(); + // test_keccak_transcript_incremental_vs_explicit_with::(); + } +} diff --git a/prover/src/provider/kzg_commitment.rs b/prover/src/provider/kzg_commitment.rs new file mode 100644 index 0000000..30940e0 --- /dev/null +++ b/prover/src/provider/kzg_commitment.rs @@ -0,0 +1,301 @@ +//! Commitment engine for KZG commitments + +use std::{io::Cursor, marker::PhantomData, sync::Arc}; + +use ff::{Field, PrimeField, PrimeFieldBits}; +use group::{prime::PrimeCurveAffine, Curve, Group as _}; +use halo2curves::serde::SerdeObject; +use pairing::Engine; +use rand::rngs::StdRng; +use rand_core::{CryptoRng, RngCore, SeedableRng}; +use serde::{Deserialize, Serialize}; + +use crate::{ + digest::SimpleDigestible, + fast_serde, + fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, + provider::{pedersen::Commitment, traits::DlogGroup, util::fb_msm}, + traits::{ + commitment::{CommitmentEngineTrait, Len}, + Engine as NovaEngine, Group, TranscriptReprTrait, + }, +}; + +/// `UniversalParams` are the universal parameters for the KZG10 scheme. +#[derive(Debug, Clone, Eq, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" +))] +pub struct UniversalKZGParam { + /// Group elements of the form `{ β^i G }`, where `i` ranges from 0 to + /// `degree`. + pub powers_of_g: Vec, + /// Group elements of the form `{ β^i H }`, where `i` ranges from 0 to + /// `degree`. + pub powers_of_h: Vec, +} + +impl PartialEq for UniversalKZGParam { + fn eq(&self, other: &Self) -> bool { + self.powers_of_g == other.powers_of_g && self.powers_of_h == other.powers_of_h + } +} +// for the purpose of the Len trait, we count commitment bases, i.e. G1 elements +impl Len for UniversalKZGParam { + fn length(&self) -> usize { self.powers_of_g.len() } +} + +/// `UnivariateProverKey` is used to generate a proof +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" +))] +pub struct KZGProverKey { + /// generators from the universal parameters + uv_params: Arc>, + /// offset at which we start reading into the SRS + offset: usize, + /// maximum supported size + supported_size: usize, +} + +impl KZGProverKey { + pub(in crate::provider) fn new( + uv_params: Arc>, + offset: usize, + supported_size: usize, + ) -> Self { + assert!( + uv_params.max_degree() >= offset + supported_size, + "not enough bases (req: {} from offset {}) in the UVKZGParams (length: {})", + supported_size, + offset, + uv_params.max_degree() + ); + Self { uv_params, offset, supported_size } + } + + pub fn powers_of_g(&self) -> &[E::G1Affine] { + &self.uv_params.powers_of_g[self.offset..self.offset + self.supported_size] + } +} + +/// `UVKZGVerifierKey` is used to check evaluation proofs for a given +/// commitment. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(bound(serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize",))] +pub struct KZGVerifierKey { + /// The generator of G1. + pub g: E::G1Affine, + /// The generator of G2. + pub h: E::G2Affine, + /// β times the above generator of G2. + pub beta_h: E::G2Affine, +} + +impl SimpleDigestible for KZGVerifierKey +where + E::G1Affine: Serialize, + E::G2Affine: Serialize, +{ +} + +impl UniversalKZGParam { + /// Returns the maximum supported degree + pub fn max_degree(&self) -> usize { self.powers_of_g.len() } + + /// Trim the universal parameters to specialize the public parameters + /// for univariate polynomials to the given `supported_size`, and + /// returns prover key and verifier key. `supported_size` should + /// be in range `1..params.len()` + /// + /// # Panics + /// If `supported_size` is greater than `self.max_degree()`, or + /// `self.max_degree()` is zero. + pub fn trim(ukzg: Arc, supported_size: usize) -> (KZGProverKey, KZGVerifierKey) { + assert!(ukzg.max_degree() > 0, "max_degree is zero"); + let g = ukzg.powers_of_g[0]; + let h = ukzg.powers_of_h[0]; + let beta_h = ukzg.powers_of_h[1]; + let pk = KZGProverKey::new(ukzg, 0, supported_size + 1); + let vk = KZGVerifierKey { g, h, beta_h }; + (pk, vk) + } +} + +impl FastSerde for UniversalKZGParam +where + E::G1Affine: SerdeObject, + E::G2Affine: SerdeObject, +{ + /// Byte format: + /// + /// [0..4] - Magic number (4 bytes) + /// [4] - Serde type: UniversalKZGParam (u8) + /// [5] - Number of sections (u8 = 2) + /// [6] - Section 1 type: powers_of_g (u8) + /// [7..11] - Section 1 size (u32) + /// [11..] - Section 1 data + /// [...+1] - Section 2 type: powers_of_h (u8) + /// [...+5] - Section 2 size (u32) + /// [...end] - Section 2 data + fn to_bytes(&self) -> Vec { + let mut out = Vec::new(); + + out.extend_from_slice(&fast_serde::MAGIC_NUMBER); + out.push(fast_serde::SerdeByteTypes::UniversalKZGParam as u8); + out.push(2); // num_sections + + Self::write_section_bytes( + &mut out, + 1, + &self.powers_of_g.iter().flat_map(|p| p.to_raw_bytes()).collect::>(), + ); + + Self::write_section_bytes( + &mut out, + 2, + &self.powers_of_h.iter().flat_map(|p| p.to_raw_bytes()).collect::>(), + ); + + out + } + + fn from_bytes(bytes: &Vec) -> Result { + let mut cursor = Cursor::new(bytes); + + Self::validate_header(&mut cursor, SerdeByteTypes::UniversalKZGParam, 2)?; + + // Read sections of points + let powers_of_g = Self::read_section_bytes(&mut cursor, 1)? + .chunks(E::G1Affine::identity().to_raw_bytes().len()) + .map(|bytes| E::G1Affine::from_raw_bytes(bytes).ok_or(SerdeByteError::G1DecodeError)) + .collect::, _>>()?; + + let powers_of_h = Self::read_section_bytes(&mut cursor, 2)? + .chunks(E::G2Affine::identity().to_raw_bytes().len()) + .map(|bytes| E::G2Affine::from_raw_bytes(bytes).ok_or(SerdeByteError::G2DecodeError)) + .collect::, _>>()?; + + Ok(Self { powers_of_g, powers_of_h }) + } +} + +impl UniversalKZGParam +where E::Fr: PrimeFieldBits +{ + /// Build SRS for testing. + /// WARNING: THIS FUNCTION IS FOR TESTING PURPOSE ONLY. + /// THE OUTPUT SRS SHOULD NOT BE USED IN PRODUCTION. + pub fn gen_srs_for_testing(mut rng: &mut R, max_degree: usize) -> Self { + let beta = E::Fr::random(&mut rng); + let g = E::G1::random(&mut rng); + let h = E::G2::random(rng); + + let nz_powers_of_beta = (0..=max_degree) + .scan(beta, |acc, _| { + let val = *acc; + *acc *= beta; + Some(val) + }) + .collect::>(); + + let window_size = fb_msm::get_mul_window_size(max_degree); + let scalar_bits = E::Fr::NUM_BITS as usize; + + let (powers_of_g_projective, powers_of_h_projective) = rayon::join( + || { + let g_table = fb_msm::get_window_table(scalar_bits, window_size, g); + fb_msm::multi_scalar_mul::(scalar_bits, window_size, &g_table, &nz_powers_of_beta) + }, + || { + let h_table = fb_msm::get_window_table(scalar_bits, window_size, h); + fb_msm::multi_scalar_mul::(scalar_bits, window_size, &h_table, &nz_powers_of_beta) + }, + ); + + let mut powers_of_g = vec![E::G1Affine::identity(); powers_of_g_projective.len()]; + let mut powers_of_h = vec![E::G2Affine::identity(); powers_of_h_projective.len()]; + + rayon::join( + || E::G1::batch_normalize(&powers_of_g_projective, &mut powers_of_g), + || E::G2::batch_normalize(&powers_of_h_projective, &mut powers_of_h), + ); + + Self { powers_of_g, powers_of_h } + } +} + +/// Commitments +#[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>" +))] +pub struct UVKZGCommitment( + /// the actual commitment is an affine point. + pub E::G1Affine, +); + +impl TranscriptReprTrait for UVKZGCommitment +where + E::G1: DlogGroup, + // Note: due to the move of the bound TranscriptReprTrait on G::Base from Group to Engine + ::Base: TranscriptReprTrait, +{ + fn to_transcript_bytes(&self) -> Vec { + // TODO: avoid the round-trip through the group (to_curve .. to_coordinates) + let (x, y, is_infinity) = self.0.to_curve().to_coordinates(); + let is_infinity_byte = (!is_infinity).into(); + [x.to_transcript_bytes(), y.to_transcript_bytes(), [is_infinity_byte].to_vec()].concat() + } +} + +/// Provides a commitment engine +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct KZGCommitmentEngine { + _p: PhantomData, +} + +impl> CommitmentEngineTrait + for KZGCommitmentEngine +where + E::G1: DlogGroup, + E::G1Affine: Serialize + for<'de> Deserialize<'de>, + E::G2Affine: Serialize + for<'de> Deserialize<'de>, + E::Fr: PrimeFieldBits, // TODO due to use of gen_srs_for_testing, make optional +{ + type Commitment = Commitment; + type CommitmentKey = UniversalKZGParam; + + fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { + // TODO: this is just for testing, replace by grabbing from a real setup for + // production + let mut bytes = [0u8; 32]; + let len = label.len().min(32); + bytes[..len].copy_from_slice(&label[..len]); + let rng = &mut StdRng::from_seed(bytes); + UniversalKZGParam::gen_srs_for_testing(rng, n.next_power_of_two()) + } + + fn commit(ck: &Self::CommitmentKey, v: &[::Scalar]) -> Self::Commitment { + assert!(ck.length() >= v.len()); + Commitment { comm: E::G1::vartime_multiscalar_mul(v, &ck.powers_of_g[..v.len()]) } + } +} + +impl> From> + for UVKZGCommitment +where E::G1: Group +{ + fn from(c: Commitment) -> Self { Self(c.comm.to_affine()) } +} + +impl> From> + for Commitment +where E::G1: Group +{ + fn from(c: UVKZGCommitment) -> Self { Self { comm: c.0.to_curve() } } +} diff --git a/prover/src/provider/mod.rs b/prover/src/provider/mod.rs new file mode 100644 index 0000000..5b8f369 --- /dev/null +++ b/prover/src/provider/mod.rs @@ -0,0 +1,170 @@ +//! This module implements Nova's traits using the following several different +//! combinations + +// public modules to be used as an evaluation engine with Spartan +pub mod hyperkzg; +pub mod ipa_pc; + +// crate-public modules, made crate-public mostly for tests +pub(crate) mod bn256_grumpkin; +mod pedersen; +pub(crate) mod poseidon; +pub(crate) mod traits; +// a non-hiding variant of kzg +mod kzg_commitment; +pub(crate) mod util; + +// crate-private modules +mod keccak; +mod tests; + +use halo2curves::bn256::Bn256; + +use self::kzg_commitment::KZGCommitmentEngine; +use crate::{ + provider::{ + bn256_grumpkin::{bn256, grumpkin}, + keccak::Keccak256Transcript, + pedersen::CommitmentEngine as PedersenCommitmentEngine, + poseidon::{PoseidonRO, PoseidonROCircuit}, + }, + traits::{CurveCycleEquipped, Engine}, +}; + +/// An implementation of the Nova `Engine` trait with Grumpkin curve and +/// Pedersen commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct GrumpkinEngine; + +/// An implementation of the Nova `Engine` trait with BN254 curve and Pedersen +/// commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Bn256EngineIPA; + +impl Engine for Bn256EngineIPA { + type Base = bn256::Base; + type CE = PedersenCommitmentEngine; + type GE = bn256::Point; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type Scalar = bn256::Scalar; + type TE = Keccak256Transcript; +} + +impl Engine for GrumpkinEngine { + type Base = grumpkin::Base; + type CE = PedersenCommitmentEngine; + type GE = grumpkin::Point; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type Scalar = grumpkin::Scalar; + type TE = Keccak256Transcript; +} + +/// An implementation of the Nova `Engine` trait with BN254 curve and Zeromorph +/// commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Bn256EngineZM; + +impl Engine for Bn256EngineZM { + type Base = bn256::Base; + type CE = KZGCommitmentEngine; + type GE = bn256::Point; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type Scalar = bn256::Scalar; + type TE = Keccak256Transcript; +} +/// An implementation of Nova traits with HyperKZG over the BN256 curve +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Bn256EngineKZG; + +impl Engine for Bn256EngineKZG { + type Base = bn256::Base; + type CE = KZGCommitmentEngine; + type GE = bn256::Point; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type Scalar = bn256::Scalar; + type TE = Keccak256Transcript; +} + +impl CurveCycleEquipped for Bn256EngineIPA { + type Secondary = GrumpkinEngine; +} + +impl CurveCycleEquipped for Bn256EngineKZG { + type Secondary = GrumpkinEngine; +} + +impl CurveCycleEquipped for Bn256EngineZM { + type Secondary = GrumpkinEngine; +} + +#[cfg(test)] +mod test { + use std::io::Read; + + use digest::{ExtendableOutput, Update}; + use group::{ff::Field, Curve, Group}; + use halo2curves::{CurveAffine, CurveExt}; + use itertools::Itertools as _; + use rand_core::OsRng; + use sha3::Shake256; + + use crate::provider::{ + bn256_grumpkin::{bn256, grumpkin}, + traits::DlogGroup, + util::msm::cpu_best_msm, + }; + + macro_rules! impl_cycle_pair_test { + ($curve:ident) => { + fn from_label_serial(label: &'static [u8], n: usize) -> Vec<$curve::Affine> { + let mut shake = Shake256::default(); + shake.update(label); + let mut reader = shake.finalize_xof(); + (0..n) + .map(|_| { + let mut uniform_bytes = [0u8; 32]; + reader.read_exact(&mut uniform_bytes).unwrap(); + let hash = $curve::Point::hash_to_curve("from_uniform_bytes"); + hash(&uniform_bytes).to_affine() + }) + .collect() + } + + let label = b"test_from_label"; + for n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1021] { + let ck_par = <$curve::Point as DlogGroup>::from_label(label, n); + let ck_ser = from_label_serial(label, n); + assert_eq!(ck_par.len(), n); + assert_eq!(ck_ser.len(), n); + assert_eq!(ck_par, ck_ser); + } + }; + } + + fn test_msm_with>() { + let n = 8; + let coeffs = (0..n).map(|_| F::random(OsRng)).collect::>(); + let bases = (0..n).map(|_| A::from(A::generator() * F::random(OsRng))).collect::>(); + let naive = coeffs + .iter() + .zip_eq(bases.iter()) + .fold(A::CurveExt::identity(), |acc, (coeff, base)| acc + *base * coeff); + + assert_eq!(naive, cpu_best_msm(&bases, &coeffs)) + } + + #[test] + fn test_msm() { + test_msm_with::(); + test_msm_with::(); + } + + #[test] + fn test_bn256_from_label() { + impl_cycle_pair_test!(bn256); + } +} diff --git a/prover/src/provider/pedersen.rs b/prover/src/provider/pedersen.rs new file mode 100644 index 0000000..47e0b09 --- /dev/null +++ b/prover/src/provider/pedersen.rs @@ -0,0 +1,310 @@ +//! This module provides an implementation of a commitment engine +use core::{ + fmt::Debug, + marker::PhantomData, + ops::{Add, Mul, MulAssign}, +}; +use std::io::Cursor; + +use ff::Field; +use group::{ + prime::{PrimeCurve, PrimeCurveAffine}, + Curve, Group, GroupEncoding, +}; +use halo2curves::serde::SerdeObject; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + fast_serde, + fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, + provider::traits::DlogGroup, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, + AbsorbInROTrait, Engine, ROTrait, TranscriptReprTrait, + }, + zip_with, +}; + +/// A type that holds commitment generators +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct CommitmentKey +where + E: Engine, + E::GE: DlogGroup, { + pub ck: Vec<::Affine>, +} + +impl Len for CommitmentKey +where + E: Engine, + E::GE: DlogGroup, +{ + fn length(&self) -> usize { self.ck.len() } +} + +impl FastSerde for CommitmentKey +where + ::Affine: SerdeObject, + E::GE: DlogGroup, +{ + /// Byte format: + /// + /// [0..4] - Magic number (4 bytes) + /// [4] - Serde type: CommitmentKey (u8) + /// [5] - Number of sections (u8 = 1) + /// [6] - Section 1 type: ck (u8) + /// [7..11] - Section 1 size (u32) + /// [11..] - Section 1 data + fn to_bytes(&self) -> Vec { + let mut out = Vec::new(); + + out.extend_from_slice(&fast_serde::MAGIC_NUMBER); + out.push(fast_serde::SerdeByteTypes::CommitmentKey as u8); + out.push(1); // num_sections + + Self::write_section_bytes( + &mut out, + 1, + &self.ck.iter().flat_map(|p| p.to_raw_bytes()).collect::>(), + ); + + out + } + + fn from_bytes(bytes: &Vec) -> Result { + let mut cursor = Cursor::new(bytes); + + // Validate header + Self::validate_header(&mut cursor, SerdeByteTypes::CommitmentKey, 1)?; + + // Read ck section + let ck = Self::read_section_bytes(&mut cursor, 1)? + .chunks(::Affine::identity().to_raw_bytes().len()) + .map(|bytes| { + ::Affine::from_raw_bytes(bytes).ok_or(SerdeByteError::G1DecodeError) + }) + .collect::, _>>()?; + + Ok(Self { ck }) + } +} + +/// A type that holds a commitment +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct Commitment { + pub(crate) comm: E::GE, +} + +/// A type that holds a compressed commitment +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CompressedCommitment +where + E: Engine, + E::GE: DlogGroup, { + pub(crate) comm: ::Compressed, +} + +impl CommitmentTrait for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type CompressedCommitment = CompressedCommitment; + + fn compress(&self) -> Self::CompressedCommitment { + CompressedCommitment { comm: ::to_bytes(&self.comm).into() } + } + + fn to_coordinates(&self) -> (E::Base, E::Base, bool) { self.comm.to_coordinates() } + + fn decompress(c: &Self::CompressedCommitment) -> Result { + let opt_comm = <::GE as GroupEncoding>::from_bytes(&c.comm.clone().into()); + let Some(comm) = Option::from(opt_comm) else { + return Err(NovaError::DecompressionError); + }; + Ok(Self { comm }) + } +} + +impl Default for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn default() -> Self { Self { comm: E::GE::identity() } } +} + +impl TranscriptReprTrait for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn to_transcript_bytes(&self) -> Vec { + let (x, y, is_infinity) = self.comm.to_coordinates(); + let is_infinity_byte = (!is_infinity).into(); + [x.to_transcript_bytes(), y.to_transcript_bytes(), [is_infinity_byte].to_vec()].concat() + } +} + +impl AbsorbInROTrait for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn absorb_in_ro(&self, ro: &mut E::RO) { + let (x, y, is_infinity) = self.comm.to_coordinates(); + ro.absorb(x); + ro.absorb(y); + ro.absorb(if is_infinity { E::Base::ONE } else { E::Base::ZERO }); + } +} + +impl TranscriptReprTrait for CompressedCommitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn to_transcript_bytes(&self) -> Vec { self.comm.to_transcript_bytes() } +} + +impl MulAssign for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn mul_assign(&mut self, scalar: E::Scalar) { *self = Self { comm: self.comm * scalar }; } +} + +impl<'a, 'b, E> Mul<&'b E::Scalar> for &'a Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type Output = Commitment; + + fn mul(self, scalar: &'b E::Scalar) -> Commitment { Commitment { comm: self.comm * scalar } } +} + +impl Mul for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type Output = Self; + + fn mul(self, scalar: E::Scalar) -> Self { Self { comm: self.comm * scalar } } +} + +impl Add for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type Output = Self; + + fn add(self, other: Self) -> Self { Self { comm: self.comm + other.comm } } +} + +/// Provides a commitment engine +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct CommitmentEngine { + _p: PhantomData, +} + +impl CommitmentEngineTrait for CommitmentEngine +where + E: Engine, + E::GE: DlogGroup, +{ + type Commitment = Commitment; + type CommitmentKey = CommitmentKey; + + fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { + Self::CommitmentKey { ck: E::GE::from_label(label, n.next_power_of_two()) } + } + + fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment { + assert!(ck.ck.len() >= v.len()); + Commitment { comm: E::GE::vartime_multiscalar_mul(v, &ck.ck[..v.len()]) } + } +} + +/// A trait listing properties of a commitment key that can be managed in a +/// divide-and-conquer fashion +pub trait CommitmentKeyExtTrait +where + E: Engine, + E::GE: DlogGroup, { + /// Splits the commitment key into two pieces at a specified point + fn split_at(self, n: usize) -> (Self, Self) + where Self: Sized; + + /// Combines two commitment keys into one + fn combine(&self, other: &Self) -> Self; + + /// Folds the two commitment keys into one using the provided weights + fn fold(L: &Self, R: &Self, w1: &E::Scalar, w2: &E::Scalar) -> Self; + + /// Scales the commitment key using the provided scalar + fn scale(&mut self, r: &E::Scalar); + + /// Reinterprets commitments as commitment keys + fn reinterpret_commitments_as_ck( + c: &[<<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait< + E, + >>::CompressedCommitment], + ) -> Result + where + Self: Sized; +} + +impl CommitmentKeyExtTrait for CommitmentKey +where + E: Engine>, + E::GE: DlogGroup, +{ + fn split_at(mut self, n: usize) -> (Self, Self) { + let right = self.ck.split_off(n); + (self, Self { ck: right }) + } + + fn combine(&self, other: &Self) -> Self { + let ck = { self.ck.iter().cloned().chain(other.ck.iter().cloned()).collect::>() }; + Self { ck } + } + + // combines the left and right halves of `self` using `w1` and `w2` as the + // weights + fn fold(L: &Self, R: &Self, w1: &E::Scalar, w2: &E::Scalar) -> Self { + debug_assert!(L.ck.len() == R.ck.len()); + let ck_curve: Vec = zip_with!(par_iter, (L.ck, R.ck), |l, r| { + E::GE::vartime_multiscalar_mul(&[*w1, *w2], &[*l, *r]) + }) + .collect(); + let mut ck_affine = vec![::Affine::identity(); L.ck.len()]; + E::GE::batch_normalize(&ck_curve, &mut ck_affine); + + Self { ck: ck_affine } + } + + /// Scales each element in `self` by `r` + fn scale(&mut self, r: &E::Scalar) { + let ck_scaled: Vec = self.ck.par_iter().map(|g| *g * r).collect(); + E::GE::batch_normalize(&ck_scaled, &mut self.ck); + } + + /// reinterprets a vector of commitments as a set of generators + fn reinterpret_commitments_as_ck(c: &[CompressedCommitment]) -> Result { + let d = c + .par_iter() + .map(|c| Commitment::::decompress(c).map(|c| c.comm)) + .collect::, NovaError>>()?; + let mut ck = vec![::Affine::identity(); d.len()]; + E::GE::batch_normalize(&d, &mut ck); + Ok(Self { ck }) + } +} diff --git a/prover/src/provider/poseidon.rs b/prover/src/provider/poseidon.rs new file mode 100644 index 0000000..8452bae --- /dev/null +++ b/prover/src/provider/poseidon.rs @@ -0,0 +1,222 @@ +//! Poseidon Constants and Poseidon-based RO used in Nova +use core::marker::PhantomData; + +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, SynthesisError, +}; +use ff::{PrimeField, PrimeFieldBits}; +use generic_array::typenum::U24; +use neptune::{ + circuit2::Elt, + poseidon::PoseidonConstants, + sponge::{ + api::{IOPattern, SpongeAPI, SpongeOp}, + circuit::SpongeCircuit, + vanilla::{Mode::Simplex, Sponge, SpongeTrait}, + }, + Strength, +}; +use serde::{Deserialize, Serialize}; + +use crate::traits::{ROCircuitTrait, ROTrait}; + +/// All Poseidon Constants that are used in Nova +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct PoseidonConstantsCircuit(PoseidonConstants); + +impl Default for PoseidonConstantsCircuit { + /// Generate Poseidon constants + fn default() -> Self { Self(Sponge::::api_constants(Strength::Standard)) } +} + +/// A Poseidon-based RO to use outside circuits +#[derive(Debug)] +pub struct PoseidonRO +where + Base: PrimeField, + Scalar: PrimeField, { + state: Vec, + constants: PoseidonConstantsCircuit, + num_absorbs: usize, + squeezed: bool, + _p: PhantomData, +} + +impl ROTrait for PoseidonRO +where + Base: PrimeField + PrimeFieldBits + Serialize + for<'de> Deserialize<'de>, + Scalar: PrimeField, +{ + type CircuitRO = PoseidonROCircuit; + type Constants = PoseidonConstantsCircuit; + + fn new(constants: PoseidonConstantsCircuit, num_absorbs: usize) -> Self { + Self { state: Vec::new(), constants, num_absorbs, squeezed: false, _p: PhantomData } + } + + /// Absorb a new number into the state of the oracle + fn absorb(&mut self, e: Base) { + assert!(!self.squeezed, "Cannot absorb after squeezing"); + self.state.push(e); + } + + /// Compute a challenge by hashing the current state + fn squeeze(&mut self, num_bits: usize) -> Scalar { + // check if we have squeezed already + assert!(!self.squeezed, "Cannot squeeze again after squeezing"); + self.squeezed = true; + + let mut sponge = Sponge::new_with_constants(&self.constants.0, Simplex); + let acc = &mut (); + let parameter = + IOPattern(vec![SpongeOp::Absorb(self.num_absorbs as u32), SpongeOp::Squeeze(1u32)]); + + sponge.start(parameter, None, acc); + assert_eq!(self.num_absorbs, self.state.len()); + SpongeAPI::absorb(&mut sponge, self.num_absorbs as u32, &self.state, acc); + let hash = SpongeAPI::squeeze(&mut sponge, 1, acc); + sponge.finish(acc).unwrap(); + + // Only return `num_bits` + let bits = hash[0].to_le_bits(); + let mut res = Scalar::ZERO; + let mut coeff = Scalar::ONE; + for bit in bits[..num_bits].into_iter() { + if *bit { + res += coeff; + } + coeff += coeff; + } + res + } +} + +/// A Poseidon-based RO gadget to use inside the verifier circuit. +#[derive(Debug)] +pub struct PoseidonROCircuit { + // Internal state + state: Vec>, + constants: PoseidonConstantsCircuit, + num_absorbs: usize, + squeezed: bool, +} + +impl ROCircuitTrait for PoseidonROCircuit +where Scalar: PrimeField + PrimeFieldBits + Serialize + for<'de> Deserialize<'de> +{ + type Constants = PoseidonConstantsCircuit; + type NativeRO = PoseidonRO; + + /// Initialize the internal state and set the poseidon constants + fn new(constants: PoseidonConstantsCircuit, num_absorbs: usize) -> Self { + Self { state: Vec::new(), constants, num_absorbs, squeezed: false } + } + + /// Absorb a new number into the state of the oracle + fn absorb(&mut self, e: &AllocatedNum) { + assert!(!self.squeezed, "Cannot absorb after squeezing"); + self.state.push(e.clone()); + } + + /// Compute a challenge by hashing the current state + fn squeeze>( + &mut self, + mut cs: CS, + num_bits: usize, + ) -> Result, SynthesisError> { + // check if we have squeezed already + assert!(!self.squeezed, "Cannot squeeze again after squeezing"); + self.squeezed = true; + let parameter = + IOPattern(vec![SpongeOp::Absorb(self.num_absorbs as u32), SpongeOp::Squeeze(1u32)]); + let mut ns = cs.namespace(|| "ns"); + + let hash = { + let mut sponge = SpongeCircuit::new_with_constants(&self.constants.0, Simplex); + let acc = &mut ns; + assert_eq!(self.num_absorbs, self.state.len()); + + sponge.start(parameter, None, acc); + SpongeAPI::absorb( + &mut sponge, + self.num_absorbs as u32, + &(0..self.state.len()) + .map(|i| Elt::Allocated(self.state[i].clone())) + .collect::>>(), + acc, + ); + + let output = SpongeAPI::squeeze(&mut sponge, 1, acc); + sponge.finish(acc).unwrap(); + output + }; + + let hash = Elt::ensure_allocated(&hash[0], &mut ns.namespace(|| "ensure allocated"), true)?; + + // return the hash as a vector of bits, truncated + Ok( + hash + .to_bits_le_strict(ns.namespace(|| "poseidon hash to boolean"))? + .iter() + .map(|boolean| match boolean { + Boolean::Is(ref x) => x.clone(), + _ => panic!("Wrong type of input. We should have never reached there"), + }) + .collect::>()[..num_bits] + .into(), + ) + } +} + +#[cfg(test)] +mod tests { + use ff::Field; + use rand::rngs::OsRng; + + use super::*; + use crate::{ + bellpepper::solver::SatisfyingAssignment, + constants::NUM_CHALLENGE_BITS, + gadgets::le_bits_to_num, + provider::{Bn256EngineKZG, GrumpkinEngine}, + traits::Engine, + }; + + fn test_poseidon_ro_with() + where + // we can print the field elements we get from E's Base & Scalar fields, + // and compare their byte representations + <::Base as PrimeField>::Repr: std::fmt::Debug, + <::Scalar as PrimeField>::Repr: std::fmt::Debug, + <::Base as PrimeField>::Repr: + PartialEq<<::Scalar as PrimeField>::Repr>, { + // Check that the number computed inside the circuit is equal to the number + // computed outside the circuit + let mut csprng: OsRng = OsRng; + let constants = PoseidonConstantsCircuit::::default(); + let num_absorbs = 32; + let mut ro: PoseidonRO = PoseidonRO::new(constants.clone(), num_absorbs); + let mut ro_gadget: PoseidonROCircuit = + PoseidonROCircuit::new(constants, num_absorbs); + let mut cs = SatisfyingAssignment::::new(); + for i in 0..num_absorbs { + let num = E::Scalar::random(&mut csprng); + ro.absorb(num); + let num_gadget = AllocatedNum::alloc_infallible(cs.namespace(|| format!("data {i}")), || num); + num_gadget.inputize(&mut cs.namespace(|| format!("input {i}"))).unwrap(); + ro_gadget.absorb(&num_gadget); + } + let num = ro.squeeze(NUM_CHALLENGE_BITS); + let num2_bits = ro_gadget.squeeze(&mut cs, NUM_CHALLENGE_BITS).unwrap(); + let num2 = le_bits_to_num(&mut cs, &num2_bits).unwrap(); + assert_eq!(num.to_repr(), num2.get_value().unwrap().to_repr()); + } + + #[test] + fn test_poseidon_ro() { + test_poseidon_ro_with::(); + test_poseidon_ro_with::(); + } +} diff --git a/prover/src/provider/tests/ipa_pc.rs b/prover/src/provider/tests/ipa_pc.rs new file mode 100644 index 0000000..586cee6 --- /dev/null +++ b/prover/src/provider/tests/ipa_pc.rs @@ -0,0 +1,128 @@ +#[cfg(test)] +mod test { + use group::Curve; + use handlebars::Handlebars; + use serde_json::{json, Map, Value}; + + use crate::provider::{ + ipa_pc::EvaluationEngine, + pedersen::{CommitmentKey, CommitmentKeyExtTrait}, + tests::solidity_compatibility_utils::{ + compressed_commitment_to_json, ec_points_to_json, field_elements_to_json, + generate_pcs_solidity_unit_test_data, + }, + GrumpkinEngine, + }; + + static IPA_COMPATIBILITY_UNIT_TESTING_TEMPLATE: &str = " +// SPDX-License-Identifier: Apache-2.0 +pragma solidity ^0.8.16; +import \"@std/Test.sol\"; +import \"src/blocks/grumpkin/Grumpkin.sol\"; +import \"src/blocks/EqPolynomial.sol\"; +import \"src/Utilities.sol\"; +import \"src/blocks/IpaPcs.sol\"; + +contract IpaTest is Test { +function composeIpaInput() public pure returns (InnerProductArgument.IpaInputGrumpkin memory) { +Grumpkin.GrumpkinAffinePoint[] memory ck_v = new Grumpkin.GrumpkinAffinePoint[]({{ len ck_v }}); +{{ #each ck_v }} ck_v[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} + +Grumpkin.GrumpkinAffinePoint[] memory ck_s = new Grumpkin.GrumpkinAffinePoint[]({{ len ck_s }}); +{{ #each ck_s }} ck_s[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} + +uint256[] memory point = new uint256[]({{ len point }}); +{{ #each point }} point[{{ i }}]={{ val }};\n {{ /each }} + +uint256[] memory L_vec = new uint256[]({{ len L_vec }}); +{{ #each L_vec }} L_vec[{{ i }}]={{ compressed }};\n {{ /each }} + +uint256[] memory R_vec = new uint256[]({{ len R_vec }}); +{{ #each R_vec }} R_vec[{{ i }}]={{ compressed }};\n {{ /each }} + +uint256 a_hat = {{ a_hat }}; + +// InnerProductInstance +Grumpkin.GrumpkinAffinePoint memory commitment = Grumpkin.GrumpkinAffinePoint({{ commitment_x }}, \ + {{ commitment_y }}); + +uint256 eval = {{ eval }}; + +return InnerProductArgument.IpaInputGrumpkin(ck_v, ck_s, point, L_vec, R_vec, commitment, eval, \ + a_hat); +} + +function testIpaGrumpkinVerification_{{ num_vars }}_Variables() public { +InnerProductArgument.IpaInputGrumpkin memory input = composeIpaInput(); +assertTrue(InnerProductArgument.verifyGrumpkin(input, getTranscript())); +} + +function getTranscript() public pure returns (KeccakTranscriptLib.KeccakTranscript memory) { +// b\"TestEval\" in Rust +uint8[] memory label = new uint8[](8); +label[0] = 0x54; +label[1] = 0x65; +label[2] = 0x73; +label[3] = 0x74; +label[4] = 0x45; +label[5] = 0x76; +label[6] = 0x61; +label[7] = 0x6c; + +KeccakTranscriptLib.KeccakTranscript memory keccak_transcript = \ + KeccakTranscriptLib.instantiate(label); +return keccak_transcript; +} +} +"; + + // To generate Solidity unit-test: + // cargo test test_solidity_compatibility_ipa --release -- --ignored --nocapture + // > ipa.t.sol + #[test] + #[ignore] + fn test_solidity_compatibility_ipa() { + let num_vars = 2; + + // Secondary part of verification is IPA over Grumpkin + let (commitment, point, eval, proof, vk) = + generate_pcs_solidity_unit_test_data::<_, EvaluationEngine>(num_vars); + + let num_vars_string = format!("{}", num_vars); + let eval_string = format!("{:?}", eval); + let commitment_x_string = format!("{:?}", commitment.comm.to_affine().x); + let commitment_y_string = format!("{:?}", commitment.comm.to_affine().y); + let proof_a_hat_string = format!("{:?}", proof.a_hat); + + let r_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.R_vec) + .expect("can't reinterpred R_vec"); + let l_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.L_vec) + .expect("can't reinterpred L_vec"); + + let r_vec_array = compressed_commitment_to_json::(&r_vec.ck); + let l_vec_array = compressed_commitment_to_json::(&l_vec.ck); + let point_array = field_elements_to_json::(&point); + let ckv_array = ec_points_to_json::(&vk.ck_v.ck); + let cks_array = ec_points_to_json::(&vk.ck_s.ck); + + let mut map = Map::new(); + map.insert("num_vars".to_string(), Value::String(num_vars_string)); + map.insert("eval".to_string(), Value::String(eval_string)); + map.insert("commitment_x".to_string(), Value::String(commitment_x_string)); + map.insert("commitment_y".to_string(), Value::String(commitment_y_string)); + map.insert("R_vec".to_string(), Value::Array(r_vec_array)); + map.insert("L_vec".to_string(), Value::Array(l_vec_array)); + map.insert("a_hat".to_string(), Value::String(proof_a_hat_string)); + map.insert("point".to_string(), Value::Array(point_array)); + map.insert("ck_v".to_string(), Value::Array(ckv_array)); + map.insert("ck_s".to_string(), Value::Array(cks_array)); + + let mut reg = Handlebars::new(); + reg + .register_template_string("ipa.t.sol", IPA_COMPATIBILITY_UNIT_TESTING_TEMPLATE) + .expect("can't register template"); + + let solidity_unit_test_source = reg.render("ipa.t.sol", &json!(map)).expect("can't render"); + println!("{}", solidity_unit_test_source); + } +} diff --git a/prover/src/provider/tests/mod.rs b/prover/src/provider/tests/mod.rs new file mode 100644 index 0000000..cb37ee8 --- /dev/null +++ b/prover/src/provider/tests/mod.rs @@ -0,0 +1,134 @@ +mod ipa_pc; + +#[cfg(test)] +pub mod solidity_compatibility_utils { + use std::sync::Arc; + + use group::{ + prime::{PrimeCurve, PrimeCurveAffine}, + GroupEncoding, + }; + use rand::rngs::StdRng; + use serde_json::{Map, Value}; + + use crate::{ + provider::traits::DlogGroup, + spartan::polys::multilinear::MultilinearPolynomial, + traits::{commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine}, + }; + + pub(crate) fn generate_pcs_solidity_unit_test_data>( + num_vars: usize, + ) -> ( + >::Commitment, + Vec, + E::Scalar, + EE::EvaluationArgument, + EE::VerifierKey, + ) { + use rand_core::SeedableRng; + + let mut rng = StdRng::seed_from_u64(num_vars as u64); + + let (poly, point, eval) = + crate::provider::util::test_utils::random_poly_with_eval::(num_vars, &mut rng); + + // Mock commitment key. + let ck = E::CE::setup(b"test", 1 << num_vars); + let ck_arc = Arc::new(ck.clone()); + // Commits to the provided vector using the provided generators. + let commitment = E::CE::commit(&ck_arc, poly.evaluations()); + + let (proof, vk) = prove_verify_solidity::(ck_arc, &commitment, &poly, &point, &eval); + + (commitment, point, eval, proof, vk) + } + + fn prove_verify_solidity>( + ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, + commitment: &<::CE as CommitmentEngineTrait>::Commitment, + poly: &MultilinearPolynomial<::Scalar>, + point: &[::Scalar], + eval: &::Scalar, + ) -> (EE::EvaluationArgument, EE::VerifierKey) { + use crate::traits::TranscriptEngineTrait; + + // Generate Prover and verifier key for given commitment key. + let ock = ck.clone(); + let (prover_key, verifier_key) = EE::setup(ck); + + // Generate proof. + let mut prover_transcript = E::TE::new(b"TestEval"); + let proof: EE::EvaluationArgument = EE::prove( + &*ock, + &prover_key, + &mut prover_transcript, + commitment, + poly.evaluations(), + point, + eval, + ) + .unwrap(); + let pcp = prover_transcript.squeeze(b"c").unwrap(); + + // Verify proof. + let mut verifier_transcript = E::TE::new(b"TestEval"); + EE::verify(&verifier_key, &mut verifier_transcript, commitment, point, eval, &proof).unwrap(); + let pcv = verifier_transcript.squeeze(b"c").unwrap(); + + // Check if the prover transcript and verifier transcript are kept in the same + // state. + assert_eq!(pcp, pcv); + + (proof, verifier_key) + } + + pub(crate) fn field_elements_to_json(field_elements: &[E::Scalar]) -> Vec { + let mut value_vector = vec![]; + field_elements.iter().enumerate().for_each(|(i, fe)| { + let mut value = Map::new(); + value.insert("i".to_string(), Value::String(i.to_string())); + value.insert("val".to_string(), Value::String(format!("{:?}", fe))); + value_vector.push(Value::Object(value)); + }); + value_vector + } + + pub(crate) fn ec_points_to_json(ec_points: &[::Affine]) -> Vec + where + E: Engine, + E::GE: DlogGroup, { + let mut value_vector = vec![]; + ec_points.iter().enumerate().for_each(|(i, ec_point)| { + let mut value = Map::new(); + let coordinates_info = ec_point.to_curve().to_coordinates(); + let not_infinity = !coordinates_info.2; + assert!(not_infinity); + value.insert("i".to_string(), Value::String(i.to_string())); + value.insert("x".to_string(), Value::String(format!("{:?}", coordinates_info.0))); + value.insert("y".to_string(), Value::String(format!("{:?}", coordinates_info.1))); + value_vector.push(Value::Object(value)); + }); + value_vector + } + + pub(crate) fn compressed_commitment_to_json( + ec_points: &[::Affine], + ) -> Vec + where + E: Engine, + E::GE: DlogGroup, { + let mut value_vector = vec![]; + ec_points.iter().enumerate().for_each(|(i, ec_point)| { + let mut value = Map::new(); + let compressed_commitment_info = ec_point.to_curve().to_bytes(); + let mut data = compressed_commitment_info.as_ref().to_vec(); + data.reverse(); + + value.insert("i".to_string(), Value::String(i.to_string())); + value.insert("compressed".to_string(), Value::String(format!("0x{}", hex::encode(data)))); + value_vector.push(Value::Object(value)); + }); + value_vector + } +} diff --git a/prover/src/provider/traits.rs b/prover/src/provider/traits.rs new file mode 100644 index 0000000..c5068ec --- /dev/null +++ b/prover/src/provider/traits.rs @@ -0,0 +1,189 @@ +use std::{fmt::Debug, ops::Mul}; + +use group::{ + prime::{PrimeCurve, PrimeCurveAffine}, + GroupEncoding, +}; +use serde::{Deserialize, Serialize}; + +use crate::traits::{Group, TranscriptReprTrait}; + +/// A trait that defines extensions to the Group trait +pub trait DlogGroup: + Group::ScalarExt> + + Serialize + + for<'de> Deserialize<'de> + + PrimeCurve::ScalarExt, Affine = ::AffineExt> { + type ScalarExt; + type AffineExt: Clone + + Debug + + Eq + + Serialize + + for<'de> Deserialize<'de> + + Sync + + Send + // technical bounds, should disappear when associated_type_bounds stabilizes + + Mul + + PrimeCurveAffine; + type Compressed: Clone + + Debug + + Eq + + From<::Repr> + + Into<::Repr> + + Serialize + + for<'de> Deserialize<'de> + + Sync + + Send + + TranscriptReprTrait; + + /// A method to compute a multiexponentation + fn vartime_multiscalar_mul(scalars: &[Self::ScalarExt], bases: &[Self::AffineExt]) -> Self; + + /// Produce a vector of group elements using a static label + fn from_label(label: &'static [u8], n: usize) -> Vec; + + /// Returns the affine coordinates (x, y, infinity) for the point + fn to_coordinates(&self) -> (::Base, ::Base, bool); +} + +/// This implementation behaves in ways specific to the halo2curves suite of +/// curves in: +// - to_coordinates, +// - vartime_multiscalar_mul, where it does not call into accelerated implementations. +// A specific reimplementation exists for the pasta curves in their own module. +#[macro_export] +macro_rules! impl_traits { + ($name:ident, $order_str:literal, $base_str:literal) => { + $crate::impl_traits!($name, $order_str, $base_str, cpu_best_msm); + }; + ($name:ident, $order_str:literal, $base_str:literal, $large_msm_method:ident) => { + // These compile-time assertions check important assumptions in the memory + // representation of group data that supports the use of Abomonation. + static_assertions::assert_eq_size!($name::Affine, [u64; 8]); + static_assertions::assert_eq_size!($name::Point, [u64; 12]); + + impl Group for $name::Point { + type Base = $name::Base; + type Scalar = $name::Scalar; + + fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt) { + let A = $name::Point::a(); + let B = $name::Point::b(); + let order = BigInt::from_str_radix($order_str, 16).unwrap(); + let base = BigInt::from_str_radix($base_str, 16).unwrap(); + + (A, B, order, base) + } + } + + impl DlogGroup for $name::Point { + type AffineExt = $name::Affine; + // note: for halo2curves implementations, $name::Compressed == <$name::Point as + // GroupEncoding>::Repr so the blanket impl From for T and impl + // Into apply. + type Compressed = $name::Compressed; + type ScalarExt = $name::Scalar; + + fn vartime_multiscalar_mul(scalars: &[Self::ScalarExt], bases: &[Self::AffineExt]) -> Self { + #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] + if scalars.len() >= 128 { + $large_msm_method(bases, scalars) + } else { + cpu_best_msm(bases, scalars) + } + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] + cpu_best_msm(bases, scalars) + } + + fn from_label(label: &'static [u8], n: usize) -> Vec { + let mut shake = Shake256::default(); + shake.update(label); + let mut reader = shake.finalize_xof(); + let mut uniform_bytes_vec = Vec::new(); + for _ in 0..n { + let mut uniform_bytes = [0u8; 32]; + reader.read_exact(&mut uniform_bytes).unwrap(); + uniform_bytes_vec.push(uniform_bytes); + } + let gens_proj: Vec<$name::Point> = (0..n) + .into_par_iter() + .map(|i| { + let hash = $name::Point::hash_to_curve("from_uniform_bytes"); + hash(&uniform_bytes_vec[i]) + }) + .collect(); + + let num_threads = rayon::current_num_threads(); + if gens_proj.len() > num_threads { + let chunk = (gens_proj.len() as f64 / num_threads as f64).ceil() as usize; + (0..num_threads) + .into_par_iter() + .flat_map(|i| { + let start = i * chunk; + let end = if i == num_threads - 1 { + gens_proj.len() + } else { + core::cmp::min((i + 1) * chunk, gens_proj.len()) + }; + if end > start { + let mut gens = vec![$name::Affine::identity(); end - start]; + ::batch_normalize(&gens_proj[start..end], &mut gens); + gens + } else { + vec![] + } + }) + .collect() + } else { + let mut gens = vec![$name::Affine::identity(); n]; + ::batch_normalize(&gens_proj, &mut gens); + gens + } + } + + fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { + let coordinates = self.to_affine().coordinates(); + if coordinates.is_some().unwrap_u8() == 1 && ($name::Point::identity() != *self) { + (*coordinates.unwrap().x(), *coordinates.unwrap().y(), false) + } else { + (Self::Base::zero(), Self::Base::zero(), true) + } + } + } + + impl PrimeFieldExt for $name::Scalar { + fn from_uniform(bytes: &[u8]) -> Self { + let bytes_arr: [u8; 64] = bytes.try_into().unwrap(); + $name::Scalar::from_uniform_bytes(&bytes_arr) + } + } + + impl TranscriptReprTrait for $name::Compressed { + fn to_transcript_bytes(&self) -> Vec { self.as_ref().to_vec() } + } + + impl TranscriptReprTrait for $name::Scalar { + fn to_transcript_bytes(&self) -> Vec { self.to_repr().to_vec() } + } + + impl TranscriptReprTrait for $name::Affine { + fn to_transcript_bytes(&self) -> Vec { + let (x, y, is_infinity_byte) = { + let coordinates = self.coordinates(); + if coordinates.is_some().unwrap_u8() == 1 && ($name::Affine::identity() != *self) { + let c = coordinates.unwrap(); + (*c.x(), *c.y(), u8::from(false)) + } else { + ($name::Base::zero(), $name::Base::zero(), u8::from(false)) + } + }; + + x.to_repr() + .into_iter() + .chain(y.to_repr().into_iter()) + .chain(std::iter::once(is_infinity_byte)) + .collect() + } + } + }; +} diff --git a/prover/src/provider/util/fb_msm.rs b/prover/src/provider/util/fb_msm.rs new file mode 100644 index 0000000..9513c94 --- /dev/null +++ b/prover/src/provider/util/fb_msm.rs @@ -0,0 +1,126 @@ +/// # Fixed-base Scalar Multiplication +/// +/// This module provides an implementation of fixed-base scalar multiplication +/// on elliptic curves. +/// +/// The multiplication is optimized through a windowed method, where scalars are +/// broken into fixed-size windows, pre-computation tables are generated, and +/// results are efficiently combined. +use ff::{PrimeField, PrimeFieldBits}; +use group::{prime::PrimeCurve, Curve}; +use rayon::prelude::*; + +/// Determines the window size for scalar multiplication based on the number of +/// scalars. +/// +/// This is used to balance between pre-computation and number of point +/// additions. +pub(crate) fn get_mul_window_size(num_scalars: usize) -> usize { + if num_scalars < 32 { + 3 + } else { + (num_scalars as f64).ln().ceil() as usize + } +} + +/// Generates a table of multiples of a base point `g` for use in windowed +/// scalar multiplication. +/// +/// This pre-computes multiples of a base point for each window and organizes +/// them into a table for quick lookup during the scalar multiplication process. +/// The table is a vector of vectors, each inner vector corresponding to a +/// window and containing the multiples of `g` for that window. +pub(crate) fn get_window_table( + scalar_size: usize, + window: usize, + g: T, +) -> Vec> +where + T: Curve, + T::AffineRepr: Send, +{ + let in_window = 1 << window; + // Number of outer iterations needed to cover the entire scalar + let outerc = (scalar_size + window - 1) / window; + + // Number of multiples of the window's "outer point" needed for each window + // (fewer for the last window) + let last_in_window = 1 << (scalar_size - (outerc - 1) * window); + + let mut multiples_of_g = vec![vec![T::identity(); in_window]; outerc]; + + // Compute the multiples of g for each window + // g_outers = [ 2^{k*window}*g for k in 0..outerc] + let mut g_outer = g; + let mut g_outers = Vec::with_capacity(outerc); + for _ in 0..outerc { + g_outers.push(g_outer); + for _ in 0..window { + g_outer = g_outer.double(); + } + } + multiples_of_g.par_iter_mut().enumerate().zip_eq(g_outers).for_each( + |((outer, multiples_of_g), g_outer)| { + let cur_in_window = if outer == outerc - 1 { last_in_window } else { in_window }; + + // multiples_of_g = [id, g_outer, 2*g_outer, 3*g_outer, ...], + // where g_outer = 2^{outer*window}*g + let mut g_inner = T::identity(); + for inner in multiples_of_g.iter_mut().take(cur_in_window) { + *inner = g_inner; + g_inner.add_assign(&g_outer); + } + }, + ); + multiples_of_g.par_iter().map(|s| s.iter().map(|s| s.to_affine()).collect()).collect() +} + +/// Performs the actual windowed scalar multiplication using a pre-computed +/// table of points. +/// +/// Given a scalar and a table of pre-computed multiples of a base point, this +/// function efficiently computes the scalar multiplication by breaking the +/// scalar into windows and adding the corresponding multiples from the table. +fn windowed_mul( + outerc: usize, + window: usize, + multiples_of_g: &[Vec], + scalar: &T::Scalar, +) -> T +where + T: PrimeCurve, + T::Scalar: PrimeFieldBits, +{ + let modulus_size = ::NUM_BITS as usize; + let scalar_val: Vec = scalar.to_le_bits().into_iter().collect(); + + let mut res = T::identity(); + for outer in 0..outerc { + let mut inner = 0usize; + for i in 0..window { + if outer * window + i < modulus_size && scalar_val[outer * window + i] { + inner |= 1 << i; + } + } + res.add_assign(&multiples_of_g[outer][inner]); + } + res +} + +/// Computes multiple scalar multiplications simultaneously using the windowed +/// method. +pub(crate) fn multi_scalar_mul( + scalar_size: usize, + window: usize, + table: &[Vec], + v: &[T::Scalar], +) -> Vec +where + T: PrimeCurve, + T::Scalar: PrimeFieldBits, +{ + let outerc = (scalar_size + window - 1) / window; + assert!(outerc <= table.len()); + + v.par_iter().map(|e| windowed_mul::(outerc, window, table, e)).collect::>() +} diff --git a/prover/src/provider/util/mod.rs b/prover/src/provider/util/mod.rs new file mode 100644 index 0000000..b1d8c99 --- /dev/null +++ b/prover/src/provider/util/mod.rs @@ -0,0 +1,213 @@ +//! Utilities for provider module. +pub(in crate::provider) mod fb_msm; +pub mod msm { + use halo2curves::{msm::best_multiexp, CurveAffine}; + + // this argument swap is useful until Rust gets named arguments + // and saves significant complexity in macro code + pub fn cpu_best_msm(bases: &[C], scalars: &[C::Scalar]) -> C::Curve { + best_multiexp(scalars, bases) + } +} + +pub mod field { + use ff::{BatchInverter, Field}; + + use crate::errors::NovaError; + + #[inline] + pub fn batch_invert(mut v: Vec) -> Result, NovaError> { + // we only allocate the scratch space if every element of v is nonzero + let mut scratch_space = v + .iter() + .map(|x| if !x.is_zero_vartime() { Ok(*x) } else { Err(NovaError::InternalError) }) + .collect::, _>>()?; + let _ = BatchInverter::invert_with_external_scratch(&mut v, &mut scratch_space[..]); + Ok(v) + } +} + +pub mod iterators { + use std::{ + borrow::Borrow, + iter::DoubleEndedIterator, + ops::{AddAssign, MulAssign}, + }; + + use ff::Field; + use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; + use rayon_scan::ScanParallelIterator; + + pub trait DoubleEndedIteratorExt: DoubleEndedIterator { + /// This function employs Horner's scheme and core traits to create a + /// combination of an iterator input with the powers + /// of a provided coefficient. + fn rlc(&mut self, coefficient: &F) -> T + where + T: Clone + for<'a> MulAssign<&'a F> + for<'r> AddAssign<&'r T>, + Self::Item: Borrow, { + let mut iter = self.rev(); + let Some(fst) = iter.next() else { panic!("input iterator should not be empty") }; + + iter.fold(fst.borrow().clone(), |mut acc, item| { + acc *= coefficient; + acc += item.borrow(); + acc + }) + } + } + + impl DoubleEndedIteratorExt for I {} + + pub trait IndexedParallelIteratorExt: IndexedParallelIterator { + /// This function core traits to create a combination of an iterator + /// input with the powers of a provided coefficient. + fn rlc(self, coefficient: &F) -> T + where + F: Field, + Self::Item: Borrow, + T: Clone + for<'a> MulAssign<&'a F> + for<'r> AddAssign<&'r T> + Send + Sync, { + debug_assert!(self.len() > 0); + // generate an iterator of powers of the right length + let v = { + let mut v = vec![*coefficient; self.len()]; + v[0] = F::ONE; + v + }; + // the collect is due to Scan being unindexed + let powers: Vec<_> = v.into_par_iter().scan(|a, b| *a * *b, F::ONE).collect(); + + self + .zip_eq(powers.into_par_iter()) + .map(|(pt, val)| { + let mut pt = pt.borrow().clone(); + pt *= &val; + pt + }) + .reduce_with(|mut a, b| { + a += &b; + a + }) + .unwrap() + } + } + + impl IndexedParallelIteratorExt for I {} +} + +#[cfg(test)] +pub mod test_utils { + //! Contains utilities for testing and benchmarking. + use std::sync::Arc; + + use ff::Field; + use rand::rngs::StdRng; + use rand_core::{CryptoRng, RngCore}; + + use crate::{ + spartan::polys::multilinear::MultilinearPolynomial, + traits::{commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine}, + }; + + /// Returns a random polynomial, a point and calculate its evaluation. + pub(crate) fn random_poly_with_eval( + num_vars: usize, + mut rng: &mut R, + ) -> ( + MultilinearPolynomial<::Scalar>, + Vec<::Scalar>, + ::Scalar, + ) { + // Generate random polynomial and point. + let poly = MultilinearPolynomial::random(num_vars, &mut rng); + let point = (0..num_vars).map(|_| ::Scalar::random(&mut rng)).collect::>(); + + // Calculation evaluation of point over polynomial. + let eval = poly.evaluate(&point); + + (poly, point, eval) + } + + /// Methods used to test the prove and verify flow of + /// [`MultilinearPolynomial`] Commitment Schemes (PCS). + /// + /// Generates a random polynomial and point from a seed to test a + /// proving/verifying flow of one of our [`EvaluationEngine`]. + pub(crate) fn prove_verify_from_num_vars>( + num_vars: usize, + ) { + use rand_core::SeedableRng; + + let mut rng = StdRng::seed_from_u64(num_vars as u64); + + let (poly, point, eval) = random_poly_with_eval::(num_vars, &mut rng); + + // Mock commitment key. + let ck = E::CE::setup(b"test", 1 << num_vars); + let ck = Arc::new(ck); + // Commits to the provided vector using the provided generators. + let commitment = E::CE::commit(&ck, poly.evaluations()); + + prove_verify_with::(ck, &commitment, &poly, &point, &eval, true) + } + + fn prove_verify_with>( + ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, + commitment: &<::CE as CommitmentEngineTrait>::Commitment, + poly: &MultilinearPolynomial<::Scalar>, + point: &[::Scalar], + eval: &::Scalar, + evaluate_bad_proof: bool, + ) { + use std::ops::Add; + + use crate::traits::TranscriptEngineTrait; + + // Generate Prover and verifier key for given commitment key. + let ock = ck.clone(); + let (prover_key, verifier_key) = EE::setup(ck); + + // Generate proof. + let mut prover_transcript = E::TE::new(b"TestEval"); + let proof = EE::prove( + &*ock, + &prover_key, + &mut prover_transcript, + commitment, + poly.evaluations(), + point, + eval, + ) + .unwrap(); + let pcp = prover_transcript.squeeze(b"c").unwrap(); + + // Verify proof. + let mut verifier_transcript = E::TE::new(b"TestEval"); + EE::verify(&verifier_key, &mut verifier_transcript, commitment, point, eval, &proof).unwrap(); + let pcv = verifier_transcript.squeeze(b"c").unwrap(); + + // Check if the prover transcript and verifier transcript are kept in the same + // state. + assert_eq!(pcp, pcv); + + if evaluate_bad_proof { + // Generate another point to verify proof. Also produce eval. + let altered_verifier_point = + point.iter().map(|s| s.add(::Scalar::ONE)).collect::>(); + let altered_verifier_eval = + MultilinearPolynomial::evaluate_with(poly.evaluations(), &altered_verifier_point); + + // Verify proof, should fail. + let mut verifier_transcript = E::TE::new(b"TestEval"); + assert!(EE::verify( + &verifier_key, + &mut verifier_transcript, + commitment, + &altered_verifier_point, + &altered_verifier_eval, + &proof, + ) + .is_err()); + } + } +} diff --git a/prover/src/r1cs/mod.rs b/prover/src/r1cs/mod.rs new file mode 100644 index 0000000..ef5e3f4 --- /dev/null +++ b/prover/src/r1cs/mod.rs @@ -0,0 +1,833 @@ +//! This module defines R1CS related types and a folding scheme for Relaxed R1CS +mod sparse; +pub(crate) mod util; + +use core::cmp::max; + +use ff::Field; +use once_cell::sync::OnceCell; +use rand_core::{CryptoRng, RngCore}; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; +pub(crate) use sparse::SparseMatrix; + +use crate::{ + constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + gadgets::{f_to_nat, nat_to_limbs, scalar_as_base}, + traits::{ + commitment::CommitmentEngineTrait, AbsorbInROTrait, Engine, ROTrait, TranscriptReprTrait, + }, + zip_with, Commitment, CommitmentKey, CE, +}; + +/// A type that holds the shape of the R1CS matrices +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct R1CSShape { + pub(crate) num_cons: usize, + pub(crate) num_vars: usize, + pub(crate) num_io: usize, + pub(crate) A: SparseMatrix, + pub(crate) B: SparseMatrix, + pub(crate) C: SparseMatrix, + #[serde(skip, default = "OnceCell::new")] + pub(crate) digest: OnceCell, +} + +impl SimpleDigestible for R1CSShape {} + +/// A type that holds the result of a R1CS multiplication +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct R1CSResult { + pub(crate) AZ: Vec, + pub(crate) BZ: Vec, + pub(crate) CZ: Vec, +} + +/// A type that holds a witness for a given R1CS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct R1CSWitness { + W: Vec, +} + +/// A type that holds an R1CS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct R1CSInstance { + pub(crate) comm_W: Commitment, + pub(crate) X: Vec, +} + +/// A type that holds a witness for a given Relaxed R1CS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct RelaxedR1CSWitness { + pub(crate) W: Vec, + pub(crate) E: Vec, +} + +/// A type that holds a Relaxed R1CS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RelaxedR1CSInstance { + pub(crate) comm_W: Commitment, + pub(crate) comm_E: Commitment, + pub(crate) X: Vec, + pub(crate) u: E::Scalar, +} + +/// A type for functions that hints commitment key sizing by returning the floor +/// of the number of required generators. +pub type CommitmentKeyHint = dyn Fn(&R1CSShape) -> usize; + +/// Generates public parameters for a Rank-1 Constraint System (R1CS). +/// +/// This function takes into consideration the shape of the R1CS matrices and a +/// hint function for the number of generators. It returns a `CommitmentKey`. +/// +/// # Arguments +/// +/// * `S`: The shape of the R1CS matrices. +/// * `ck_floor`: A function that provides a floor for the number of generators. A good function to +/// provide is the `commitment_key_floor` field in the trait `RelaxedR1CSSNARKTrait`. +pub fn commitment_key( + S: &R1CSShape, + ck_floor: &CommitmentKeyHint, +) -> CommitmentKey { + let size = commitment_key_size(S, ck_floor); + E::CE::setup(b"ck", size) +} + +/// Computes the number of generators required for the commitment key +/// corresponding to shape `S`. +pub fn commitment_key_size(S: &R1CSShape, ck_floor: &CommitmentKeyHint) -> usize { + let num_cons = S.num_cons; + let num_vars = S.num_vars; + let ck_hint = ck_floor(S); + max(max(num_cons, num_vars), ck_hint) +} + +impl R1CSShape { + /// Create an object of type `R1CSShape` from the explicitly specified R1CS + /// matrices + pub fn new( + num_cons: usize, + num_vars: usize, + num_io: usize, + A: SparseMatrix, + B: SparseMatrix, + C: SparseMatrix, + ) -> Result { + let is_valid = |num_cons: usize, + num_vars: usize, + num_io: usize, + M: &SparseMatrix| + -> Result, NovaError> { + M.iter() + .map(|(row, col, _val)| { + if row >= num_cons || col > num_io + num_vars { + Err(NovaError::InvalidIndex) + } else { + Ok(()) + } + }) + .collect::, NovaError>>() + }; + + is_valid(num_cons, num_vars, num_io, &A)?; + is_valid(num_cons, num_vars, num_io, &B)?; + is_valid(num_cons, num_vars, num_io, &C)?; + + // We require the number of public inputs/outputs to be even + if num_io % 2 != 0 { + return Err(NovaError::InvalidStepCircuitIO); + } + + Ok(Self { num_cons, num_vars, num_io, A, B, C, digest: OnceCell::new() }) + } + + /// Generate a random [`R1CSShape`] with the specified number of + /// constraints, variables, and public inputs/outputs. + pub fn random( + num_cons: usize, + num_vars: usize, + num_io: usize, + num_entries: usize, + mut rng: &mut R, + ) -> Self { + assert!(num_cons.is_power_of_two()); + assert!(num_vars.is_power_of_two()); + assert!(num_entries.is_power_of_two()); + assert!(num_io < num_vars); + + let num_rows = num_cons; + let num_cols = num_vars + 1 + num_io; + + let (NA, NB, NC) = { + let N_div_3 = num_entries / 3; + let NC = num_entries - (2 * N_div_3); + (N_div_3, N_div_3, NC) + }; + + let A = SparseMatrix::random(num_rows, num_cols, NA, &mut rng); + let B = SparseMatrix::random(num_rows, num_cols, NB, &mut rng); + let C = SparseMatrix::random(num_rows, num_cols, NC, &mut rng); + + Self { num_cons, num_vars, num_io, A, B, C, digest: Default::default() } + } + + /// Generate a satisfying [`RelaxedR1CSWitness`] and [`RelaxedR1CSInstance`] + /// for this [`R1CSShape`]. + pub fn random_witness_instance( + &self, + commitment_key: &CommitmentKey, + mut rng: &mut R, + ) -> (RelaxedR1CSWitness, RelaxedR1CSInstance) { + // Sample a random witness and compute the error term + let W = (0..self.num_vars).map(|_| E::Scalar::random(&mut rng)).collect::>(); + let u = E::Scalar::random(&mut rng); + let X = (0..self.num_io).map(|_| E::Scalar::random(&mut rng)).collect::>(); + + let E = self.compute_E(&W, &u, &X).unwrap(); + + let (comm_W, comm_E) = + rayon::join(|| CE::::commit(commitment_key, &W), || CE::::commit(commitment_key, &E)); + + let witness = RelaxedR1CSWitness { W, E }; + let instance = RelaxedR1CSInstance { comm_W, comm_E, u, X }; + + (witness, instance) + } + + /// returned the digest of the `R1CSShape` + pub fn digest(&self) -> E::Scalar { + self + .digest + .get_or_try_init(|| DigestComputer::new(self).digest()) + .cloned() + .expect("Failure retrieving digest") + } + + // Checks regularity conditions on the R1CSShape, required in Spartan-class + // SNARKs Returns false if num_cons or num_vars are not powers of two, or if + // num_io > num_vars + #[inline] + pub(crate) fn is_regular_shape(&self) -> bool { + let cons_valid = self.num_cons.next_power_of_two() == self.num_cons; + let vars_valid = self.num_vars.next_power_of_two() == self.num_vars; + let io_lt_vars = self.num_io < self.num_vars; + cons_valid && vars_valid && io_lt_vars + } + + pub(crate) fn multiply_vec( + &self, + z: &[E::Scalar], + ) -> Result<(Vec, Vec, Vec), NovaError> { + if z.len() != self.num_io + self.num_vars + 1 { + return Err(NovaError::InvalidWitnessLength); + } + + let (Az, (Bz, Cz)) = rayon::join( + || self.A.multiply_vec(z), + || rayon::join(|| self.B.multiply_vec(z), || self.C.multiply_vec(z)), + ); + + Ok((Az, Bz, Cz)) + } + + pub(crate) fn multiply_witness( + &self, + W: &[E::Scalar], + u: &E::Scalar, + X: &[E::Scalar], + ) -> Result<(Vec, Vec, Vec), NovaError> { + if X.len() != self.num_io || W.len() != self.num_vars { + return Err(NovaError::InvalidWitnessLength); + } + + let (Az, (Bz, Cz)) = rayon::join( + || self.A.multiply_witness(W, u, X), + || rayon::join(|| self.B.multiply_witness(W, u, X), || self.C.multiply_witness(W, u, X)), + ); + + Ok((Az, Bz, Cz)) + } + + pub(crate) fn multiply_witness_into( + &self, + W: &[E::Scalar], + u: &E::Scalar, + X: &[E::Scalar], + ABC_Z: &mut R1CSResult, + ) -> Result<(), NovaError> { + if X.len() != self.num_io || W.len() != self.num_vars { + return Err(NovaError::InvalidWitnessLength); + } + + let R1CSResult { AZ, BZ, CZ } = ABC_Z; + + rayon::join( + || self.A.multiply_witness_into(W, u, X, AZ), + || { + rayon::join( + || self.B.multiply_witness_into(W, u, X, BZ), + || self.C.multiply_witness_into(W, u, X, CZ), + ) + }, + ); + + Ok(()) + } + + /// Computes the error term E = Az * Bz - u*Cz. + fn compute_E( + &self, + W: &[E::Scalar], + u: &E::Scalar, + X: &[E::Scalar], + ) -> Result, NovaError> { + if X.len() != self.num_io || W.len() != self.num_vars { + return Err(NovaError::InvalidWitnessLength); + } + + let (Az, (Bz, Cz)) = rayon::join( + || self.A.multiply_witness(W, u, X), + || rayon::join(|| self.B.multiply_witness(W, u, X), || self.C.multiply_witness(W, u, X)), + ); + + let E = zip_with!((Az.into_par_iter(), Bz.into_par_iter(), Cz.into_par_iter()), |a, b, c| a + * b + - c * u) + .collect::>(); + + Ok(E) + } + + /// Checks if the Relaxed R1CS instance is satisfiable given a witness and + /// its shape + pub fn is_sat_relaxed( + &self, + ck: &CommitmentKey, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result<(), NovaError> { + assert_eq!(W.W.len(), self.num_vars); + assert_eq!(W.E.len(), self.num_cons); + assert_eq!(U.X.len(), self.num_io); + + // verify if Az * Bz - u*Cz = E + let E = self.compute_E(&W.W, &U.u, &U.X)?; + W.E.par_iter().zip_eq(E.into_par_iter()).enumerate().try_for_each(|(i, (we, e))| { + if *we != e { + // constraint failed, retrieve constraint name + Err(NovaError::UnSatIndex(i)) + } else { + Ok(()) + } + })?; + + // verify if comm_E and comm_W are commitments to E and W + let res_comm = { + let (comm_W, comm_E) = + rayon::join(|| CE::::commit(ck, &W.W), || CE::::commit(ck, &W.E)); + U.comm_W == comm_W && U.comm_E == comm_E + }; + + if !res_comm { + return Err(NovaError::UnSat); + } + Ok(()) + } + + /// Checks if the R1CS instance is satisfiable given a witness and its shape + pub fn is_sat( + &self, + ck: &CommitmentKey, + U: &R1CSInstance, + W: &R1CSWitness, + ) -> Result<(), NovaError> { + assert_eq!(W.W.len(), self.num_vars); + assert_eq!(U.X.len(), self.num_io); + + // verify if Az * Bz - u*Cz = 0 + let E = self.compute_E(&W.W, &E::Scalar::ONE, &U.X)?; + E.into_par_iter().enumerate().try_for_each(|(i, e)| { + if e != E::Scalar::ZERO { + Err(NovaError::UnSatIndex(i)) + } else { + Ok(()) + } + })?; + + // verify if comm_W is a commitment to W + if U.comm_W != CE::::commit(ck, &W.W) { + return Err(NovaError::UnSat); + } + Ok(()) + } + + /// A method to compute a commitment to the cross-term `T` given a + /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair + pub fn commit_T( + &self, + ck: &CommitmentKey, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) -> Result<(Vec, Commitment), NovaError> { + let (AZ_1, BZ_1, CZ_1) = tracing::trace_span!("AZ_1, BZ_1, CZ_1") + .in_scope(|| self.multiply_witness(&W1.W, &U1.u, &U1.X))?; + + let (AZ_2, BZ_2, CZ_2) = tracing::trace_span!("AZ_2, BZ_2, CZ_2") + .in_scope(|| self.multiply_witness(&W2.W, &E::Scalar::ONE, &U2.X))?; + + let (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) = + tracing::trace_span!("cross terms").in_scope(|| { + let AZ_1_circ_BZ_2 = + (0..AZ_1.len()).into_par_iter().map(|i| AZ_1[i] * BZ_2[i]).collect::>(); + let AZ_2_circ_BZ_1 = + (0..AZ_2.len()).into_par_iter().map(|i| AZ_2[i] * BZ_1[i]).collect::>(); + let u_1_cdot_CZ_2 = + (0..CZ_2.len()).into_par_iter().map(|i| U1.u * CZ_2[i]).collect::>(); + let u_2_cdot_CZ_1 = + (0..CZ_1.len()).into_par_iter().map(|i| CZ_1[i]).collect::>(); + (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) + }); + + let T = tracing::trace_span!("T").in_scope(|| { + AZ_1_circ_BZ_2 + .par_iter() + .zip_eq(&AZ_2_circ_BZ_1) + .zip_eq(&u_1_cdot_CZ_2) + .zip_eq(&u_2_cdot_CZ_1) + .map(|(((a, b), c), d)| *a + *b - *c - *d) + .collect::>() + }); + + let comm_T = CE::::commit(ck, &T); + + Ok((T, comm_T)) + } + + /// A method to compute a commitment to the cross-term `T` given a + /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair + /// + /// This is [`R1CSShape::commit_T`] but into a buffer. + pub fn commit_T_into( + &self, + ck: &CommitmentKey, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + T: &mut Vec, + ABC_Z_1: &mut R1CSResult, + ABC_Z_2: &mut R1CSResult, + ) -> Result, NovaError> { + tracing::info_span!("AZ_1, BZ_1, CZ_1") + .in_scope(|| self.multiply_witness_into(&W1.W, &U1.u, &U1.X, ABC_Z_1))?; + + let R1CSResult { AZ: AZ_1, BZ: BZ_1, CZ: CZ_1 } = ABC_Z_1; + + tracing::info_span!("AZ_2, BZ_2, CZ_2") + .in_scope(|| self.multiply_witness_into(&W2.W, &E::Scalar::ONE, &U2.X, ABC_Z_2))?; + + let R1CSResult { AZ: AZ_2, BZ: BZ_2, CZ: CZ_2 } = ABC_Z_2; + + // this doesn't allocate memory but has bad temporal cache locality -- should + // test to see which is faster + T.clear(); + tracing::info_span!("T").in_scope(|| { + (0..AZ_1.len()) + .into_par_iter() + .map(|i| { + let AZ_1_circ_BZ_2 = AZ_1[i] * BZ_2[i]; + let AZ_2_circ_BZ_1 = AZ_2[i] * BZ_1[i]; + let u_1_cdot_Cz_2_plus_Cz_1 = U1.u * CZ_2[i] + CZ_1[i]; + AZ_1_circ_BZ_2 + AZ_2_circ_BZ_1 - u_1_cdot_Cz_2_plus_Cz_1 + }) + .collect_into_vec(T) + }); + + Ok(CE::::commit(ck, T)) + } + + /// Pads the `R1CSShape` so that the shape passes `is_regular_shape` + /// Renumbers variables to accommodate padded variables + pub fn pad(&self) -> Self { + // check if the provided R1CSShape is already as required + if self.is_regular_shape() { + return self.clone(); + } + + // equalize the number of variables, constraints, and public IO + let m = max(max(self.num_vars, self.num_cons), self.num_io).next_power_of_two(); + + // check if the number of variables are as expected, then + // we simply set the number of constraints to the next power of two + if self.num_vars == m { + return Self { + num_cons: m, + num_vars: m, + num_io: self.num_io, + A: self.A.clone(), + B: self.B.clone(), + C: self.C.clone(), + digest: OnceCell::new(), + }; + } + + // otherwise, we need to pad the number of variables and renumber variable + // accesses + let num_vars_padded = m; + let num_cons_padded = m; + + let apply_pad = |mut M: SparseMatrix| -> SparseMatrix { + M.indices.par_iter_mut().for_each(|c| { + if *c >= self.num_vars { + *c += num_vars_padded - self.num_vars + } + }); + + M.cols += num_vars_padded - self.num_vars; + + let ex = { + let nnz = M.indptr.last().unwrap(); + vec![*nnz; num_cons_padded - self.num_cons] + }; + M.indptr.extend(ex); + M + }; + + let A_padded = apply_pad(self.A.clone()); + let B_padded = apply_pad(self.B.clone()); + let C_padded = apply_pad(self.C.clone()); + + Self { + num_cons: num_cons_padded, + num_vars: num_vars_padded, + num_io: self.num_io, + A: A_padded, + B: B_padded, + C: C_padded, + digest: OnceCell::new(), + } + } +} + +impl R1CSResult { + /// Produces a default `R1CSResult` given an `R1CSShape` + pub fn default(num_cons: usize) -> Self { + Self { + AZ: vec![E::Scalar::ZERO; num_cons], + BZ: vec![E::Scalar::ZERO; num_cons], + CZ: vec![E::Scalar::ZERO; num_cons], + } + } +} + +impl R1CSWitness { + /// A method to create a witness object using a vector of scalars + pub fn new(S: &R1CSShape, W: Vec) -> Result { + if S.num_vars != W.len() { + Err(NovaError::InvalidWitnessLength) + } else { + Ok(Self { W }) + } + } + + /// Commits to the witness using the supplied generators + pub fn commit(&self, ck: &CommitmentKey) -> Commitment { CE::::commit(ck, &self.W) } +} + +impl R1CSInstance { + /// A method to create an instance object using constituent elements + pub fn new( + S: &R1CSShape, + comm_W: Commitment, + X: Vec, + ) -> Result { + if S.num_io != X.len() { + Err(NovaError::InvalidInputLength) + } else { + Ok(Self { comm_W, X }) + } + } +} + +impl AbsorbInROTrait for R1CSInstance { + fn absorb_in_ro(&self, ro: &mut E::RO) { + self.comm_W.absorb_in_ro(ro); + for x in &self.X { + ro.absorb(scalar_as_base::(*x)); + } + } +} + +impl RelaxedR1CSWitness { + /// Produces a default `RelaxedR1CSWitness` given an `R1CSShape` + pub fn default(S: &R1CSShape) -> Self { + Self { W: vec![E::Scalar::ZERO; S.num_vars], E: vec![E::Scalar::ZERO; S.num_cons] } + } + + /// Initializes a new `RelaxedR1CSWitness` from an `R1CSWitness` + pub fn from_r1cs_witness(S: &R1CSShape, witness: R1CSWitness) -> Self { + Self { W: witness.W, E: vec![E::Scalar::ZERO; S.num_cons] } + } + + /// Commits to the witness using the supplied generators + pub fn commit(&self, ck: &CommitmentKey) -> (Commitment, Commitment) { + (CE::::commit(ck, &self.W), CE::::commit(ck, &self.E)) + } + + /// Folds an incoming `R1CSWitness` into the current one + pub fn fold( + &self, + W2: &R1CSWitness, + T: &[E::Scalar], + r: &E::Scalar, + ) -> Result { + let (W1, E1) = (&self.W, &self.E); + let W2 = &W2.W; + + if W1.len() != W2.len() { + return Err(NovaError::InvalidWitnessLength); + } + + let W = zip_with!((W1.par_iter(), W2), |a, b| *a + *r * *b).collect::>(); + let E = zip_with!((E1.par_iter(), T), |a, b| *a + *r * *b).collect::>(); + Ok(Self { W, E }) + } + + /// Mutably folds an incoming `R1CSWitness` into the current one + pub fn fold_mut( + &mut self, + W2: &R1CSWitness, + T: &[E::Scalar], + r: &E::Scalar, + ) -> Result<(), NovaError> { + if self.W.len() != W2.W.len() { + return Err(NovaError::InvalidWitnessLength); + } + + self.W.par_iter_mut().zip_eq(&W2.W).for_each(|(a, b)| *a += *r * *b); + self.E.par_iter_mut().zip_eq(T).for_each(|(a, b)| *a += *r * *b); + + Ok(()) + } + + /// Pads the provided witness to the correct length + pub fn pad(&self, S: &R1CSShape) -> Self { + let mut W = self.W.clone(); + W.extend(vec![E::Scalar::ZERO; S.num_vars - W.len()]); + + let mut E = self.E.clone(); + E.extend(vec![E::Scalar::ZERO; S.num_cons - E.len()]); + + Self { W, E } + } +} + +impl RelaxedR1CSInstance { + /// Produces a default `RelaxedR1CSInstance` given `R1CSGens` and + /// `R1CSShape` + pub fn default(_ck: &CommitmentKey, S: &R1CSShape) -> Self { + let (comm_W, comm_E) = (Commitment::::default(), Commitment::::default()); + Self { comm_W, comm_E, u: E::Scalar::ZERO, X: vec![E::Scalar::ZERO; S.num_io] } + } + + /// Initializes a new `RelaxedR1CSInstance` from an `R1CSInstance` + pub fn from_r1cs_instance( + _ck: &CommitmentKey, + S: &R1CSShape, + instance: R1CSInstance, + ) -> Self { + assert_eq!(S.num_io, instance.X.len()); + + Self { + comm_W: instance.comm_W, + comm_E: Commitment::::default(), + u: E::Scalar::ONE, + X: instance.X, + } + } + + /// Initializes a new `RelaxedR1CSInstance` from an `R1CSInstance` + pub fn from_r1cs_instance_unchecked(comm_W: &Commitment, X: &[E::Scalar]) -> Self { + Self { + comm_W: *comm_W, + comm_E: Commitment::::default(), + u: E::Scalar::ONE, + X: X.to_vec(), + } + } + + /// Folds an incoming `RelaxedR1CSInstance` into the current one + pub fn fold(&self, U2: &R1CSInstance, comm_T: &Commitment, r: &E::Scalar) -> Self { + let (X1, u1, comm_W_1, comm_E_1) = + (&self.X, &self.u, &self.comm_W.clone(), &self.comm_E.clone()); + let (X2, comm_W_2) = (&U2.X, &U2.comm_W); + + // weighted sum of X, comm_W, comm_E, and u + let X = zip_with!((X1.par_iter(), X2), |a, b| *a + *r * *b).collect::>(); + let comm_W = *comm_W_1 + *comm_W_2 * *r; + let comm_E = *comm_E_1 + *comm_T * *r; + let u = *u1 + *r; + + Self { comm_W, comm_E, X, u } + } + + /// Mutably folds an incoming `RelaxedR1CSInstance` into the current one + pub fn fold_mut(&mut self, U2: &R1CSInstance, comm_T: &Commitment, r: &E::Scalar) { + let (X2, comm_W_2) = (&U2.X, &U2.comm_W); + + // weighted sum of X, comm_W, comm_E, and u + self.X.par_iter_mut().zip_eq(X2).for_each(|(a, b)| { + *a += *r * *b; + }); + self.comm_W = self.comm_W + *comm_W_2 * *r; + self.comm_E = self.comm_E + *comm_T * *r; + self.u += *r; + } +} + +impl TranscriptReprTrait for RelaxedR1CSInstance { + fn to_transcript_bytes(&self) -> Vec { + [ + self.comm_W.to_transcript_bytes(), + self.comm_E.to_transcript_bytes(), + self.u.to_transcript_bytes(), + self.X.as_slice().to_transcript_bytes(), + ] + .concat() + } +} + +impl AbsorbInROTrait for RelaxedR1CSInstance { + fn absorb_in_ro(&self, ro: &mut E::RO) { + self.comm_W.absorb_in_ro(ro); + self.comm_E.absorb_in_ro(ro); + ro.absorb(scalar_as_base::(self.u)); + + // absorb each element of self.X in bignum format + for x in &self.X { + let limbs: Vec = nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); + for limb in limbs { + ro.absorb(scalar_as_base::(limb)); + } + } + } +} + +/// Empty buffer for `commit_T_into` +pub fn default_T(num_cons: usize) -> Vec { Vec::with_capacity(num_cons) } + +#[cfg(test)] +pub(crate) mod tests { + use ff::Field; + use rand_chacha::ChaCha20Rng; + use rand_core::SeedableRng; + + use super::*; + use crate::{ + provider::{Bn256EngineIPA, Bn256EngineKZG}, + r1cs::sparse::SparseMatrix, + traits::Engine, + }; + + pub(crate) fn tiny_r1cs(num_vars: usize) -> R1CSShape { + let one = ::ONE; + let (num_cons, num_vars, num_io, A, B, C) = { + let num_cons = 4; + let num_io = 2; + + // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are + // respectively the input and output. The R1CS for this problem + // consists of the following constraints: `I0 * I0 - Z0 = 0` + // `Z0 * I0 - Z1 = 0` + // `(Z1 + I0) * 1 - Z2 = 0` + // `(Z2 + 5) * 1 - I1 = 0` + + // Relaxed R1CS is a set of three sparse matrices (A B C), where there is a row + // for every constraint and a column for every entry in z = (vars, + // u, inputs) An R1CS instance is satisfiable iff: + // Az \circ Bz = u \cdot Cz + E, where z = (vars, 1, inputs) + let mut A: Vec<(usize, usize, E::Scalar)> = Vec::new(); + let mut B: Vec<(usize, usize, E::Scalar)> = Vec::new(); + let mut C: Vec<(usize, usize, E::Scalar)> = Vec::new(); + + // constraint 0 entries in (A,B,C) + // `I0 * I0 - Z0 = 0` + A.push((0, num_vars + 1, one)); + B.push((0, num_vars + 1, one)); + C.push((0, 0, one)); + + // constraint 1 entries in (A,B,C) + // `Z0 * I0 - Z1 = 0` + A.push((1, 0, one)); + B.push((1, num_vars + 1, one)); + C.push((1, 1, one)); + + // constraint 2 entries in (A,B,C) + // `(Z1 + I0) * 1 - Z2 = 0` + A.push((2, 1, one)); + A.push((2, num_vars + 1, one)); + B.push((2, num_vars, one)); + C.push((2, 2, one)); + + // constraint 3 entries in (A,B,C) + // `(Z2 + 5) * 1 - I1 = 0` + A.push((3, 2, one)); + A.push((3, num_vars, one + one + one + one + one)); + B.push((3, num_vars, one)); + C.push((3, num_vars + 2, one)); + + (num_cons, num_vars, num_io, A, B, C) + }; + + // create a shape object + let rows = num_cons; + let cols = num_vars + num_io + 1; + + R1CSShape::new( + num_cons, + num_vars, + num_io, + SparseMatrix::new(&A, rows, cols), + SparseMatrix::new(&B, rows, cols), + SparseMatrix::new(&C, rows, cols), + ) + .unwrap() + } + + fn test_pad_tiny_r1cs_with() { + let padded_r1cs = tiny_r1cs::(3).pad(); + assert!(padded_r1cs.is_regular_shape()); + + let expected_r1cs = tiny_r1cs::(4); + + assert_eq!(padded_r1cs, expected_r1cs); + } + + #[test] + fn test_pad_tiny_r1cs() { test_pad_tiny_r1cs_with::(); } + + fn test_random_r1cs_with() { + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + + let ck_size: usize = 16_384; + let ck = E::CE::setup(b"ipa", ck_size); + + let cases = [(16, 16, 2, 16), (16, 32, 12, 8), (256, 256, 2, 1024)]; + + for (num_cons, num_vars, num_io, num_entries) in cases { + let S = R1CSShape::::random(num_cons, num_vars, num_io, num_entries, &mut rng); + let (W, U) = S.random_witness_instance(&ck, &mut rng); + S.is_sat_relaxed(&ck, &U, &W).unwrap(); + } + } + + #[test] + fn test_random_r1cs() { test_random_r1cs_with::(); } +} diff --git a/prover/src/r1cs/sparse.rs b/prover/src/r1cs/sparse.rs new file mode 100644 index 0000000..c0c3e2d --- /dev/null +++ b/prover/src/r1cs/sparse.rs @@ -0,0 +1,333 @@ +//! # Sparse Matrices +//! +//! This module defines a custom implementation of CSR/CSC sparse matrices. +//! Specifically, we implement sparse matrix / dense vector multiplication +//! to compute the `A z`, `B z`, and `C z` in Nova. + +use std::{cmp::Ordering, collections::BTreeSet}; + +use ff::PrimeField; +use itertools::Itertools as _; +use rand_core::{CryptoRng, RngCore}; +use rayon::prelude::*; +use ref_cast::RefCast; +use serde::{Deserialize, Serialize}; + +/// CSR format sparse matrix, We follow the names used by scipy. +/// Detailed explanation here: +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct SparseMatrix { + /// all non-zero values in the matrix + pub data: Vec, + /// column indices + pub indices: Vec, + /// row information + pub indptr: Vec, + /// number of columns + pub cols: usize, +} + +/// Wrapper type for encode rows of [`SparseMatrix`] +#[derive(Debug, Clone, RefCast)] +#[repr(transparent)] +pub struct RowData([usize; 2]); + +/// [`SparseMatrix`]s are often large, and this helps with cloning bottlenecks +impl Clone for SparseMatrix { + fn clone(&self) -> Self { + Self { + data: self.data.par_iter().cloned().collect(), + indices: self.indices.par_iter().cloned().collect(), + indptr: self.indptr.par_iter().cloned().collect(), + cols: self.cols, + } + } +} + +impl SparseMatrix { + /// 0x0 empty matrix + pub fn empty() -> Self { Self { data: vec![], indices: vec![], indptr: vec![0], cols: 0 } } + + /// Construct from the COO representation; Vec. + /// We assume that the rows are sorted during construction. + pub fn new(matrix: &[(usize, usize, F)], rows: usize, cols: usize) -> Self { + let mut new_matrix = vec![vec![]; rows]; + for (row, col, val) in matrix { + new_matrix[*row].push((*col, *val)); + } + + for row in new_matrix.iter() { + assert!(row.windows(2).all(|w| w[0].0 < w[1].0)); + } + + let mut indptr = vec![0; rows + 1]; + for (i, col) in new_matrix.iter().enumerate() { + indptr[i + 1] = indptr[i] + col.len(); + } + + let mut indices = vec![]; + let mut data = vec![]; + for col in new_matrix { + let (idx, val): (Vec<_>, Vec<_>) = col.into_iter().unzip(); + indices.extend(idx); + data.extend(val); + } + + Self { data, indices, indptr, cols } + } + + /// Samples a new random matrix of size `rows` x `cols` with `num_entries` + /// non-zero entries. + pub fn random( + rows: usize, + cols: usize, + num_entries: usize, + mut rng: &mut R, + ) -> Self { + assert!(num_entries <= rows * cols); + + let mut indices = BTreeSet::<(usize, usize)>::new(); + while indices.len() < num_entries { + let row = rng.next_u32() as usize % rows; + let col = rng.next_u32() as usize % cols; + indices.insert((row, col)); + } + + let matrix = + indices.into_iter().map(|(row, col)| (row, col, F::random(&mut rng))).collect::>(); + + Self::new(&matrix, rows, cols) + } + + /// Returns an iterator into the rows + pub fn iter_rows(&self) -> impl Iterator { + self.indptr.windows(2).map(|ptrs| RowData::ref_cast(ptrs.try_into().unwrap())) + } + + /// Returns a parallel iterator into the rows + pub fn par_iter_rows(&self) -> impl IndexedParallelIterator { + self.indptr.par_windows(2).map(|ptrs| RowData::ref_cast(ptrs.try_into().unwrap())) + } + + /// Retrieves the data for row slice [i..j] from `row`. + /// [`RowData`] **must** be created from unmodified `self` previously to + /// guarentee safety. + pub fn get_row(&self, row: &RowData) -> impl Iterator { + self.data[row.0[0]..row.0[1]].iter().zip_eq(&self.indices[row.0[0]..row.0[1]]) + } + + /// Retrieves the data for row slice [i..j] from `ptrs`. + /// We assume that `ptrs` is indexed from `indptrs` and do not check if the + /// returned slice is actually a valid row. + pub fn get_row_unchecked(&self, ptrs: &[usize; 2]) -> impl Iterator { + self.data[ptrs[0]..ptrs[1]].iter().zip_eq(&self.indices[ptrs[0]..ptrs[1]]) + } + + /// Multiply by a dense vector; uses rayon to parallelize. + pub fn multiply_vec(&self, vector: &[F]) -> Vec { + assert_eq!(self.cols, vector.len(), "invalid shape"); + + self.multiply_vec_unchecked(vector) + } + + /// Multiply by a dense vector; uses rayon to parallelize. + /// This does not check that the shape of the matrix/vector are compatible. + #[tracing::instrument(skip_all, level = "trace", name = "SparseMatrix::multiply_vec_unchecked")] + fn multiply_vec_unchecked(&self, vector: &[F]) -> Vec { + let mut sink: Vec = Vec::with_capacity(self.indptr.len() - 1); + self.multiply_vec_into_unchecked(vector, &mut sink); + sink + } + + fn multiply_vec_into_unchecked(&self, vector: &[F], sink: &mut Vec) { + self + .indptr + .par_windows(2) + .map(|ptrs| { + self + .get_row_unchecked(ptrs.try_into().unwrap()) + .map(|(val, col_idx)| *val * vector[*col_idx]) + .sum() + }) + .collect_into_vec(sink); + } + + /// Multiply by a witness representing a dense vector; uses rayon to + /// parallelize. + pub fn multiply_witness(&self, W: &[F], u: &F, X: &[F]) -> Vec { + assert_eq!(self.cols, W.len() + X.len() + 1, "invalid shape"); + + self.multiply_witness_unchecked(W, u, X) + } + + /// Multiply by a witness representing a dense vector; uses rayon to + /// parallelize. This does not check that the shape of the matrix/vector + /// are compatible. + #[tracing::instrument(skip_all, level = "trace", name = "SparseMatrix::multiply_vec_unchecked")] + fn multiply_witness_unchecked(&self, W: &[F], u: &F, X: &[F]) -> Vec { + // preallocate the result vector + let mut sink = Vec::with_capacity(self.indptr.len() - 1); + self.multiply_witness_into_unchecked(W, u, X, &mut sink); + sink + } + + /// Multiply by a witness representing a dense vector; uses rayon to + /// parallelize. + pub fn multiply_witness_into(&self, W: &[F], u: &F, X: &[F], sink: &mut Vec) { + assert_eq!(self.cols, W.len() + X.len() + 1, "invalid shape"); + + self.multiply_witness_into_unchecked(W, u, X, sink); + } + + /// Multiply by a witness representing a dense vector; uses rayon to + /// parallelize. This does not check that the shape of the matrix/vector + /// are compatible. + fn multiply_witness_into_unchecked(&self, W: &[F], u: &F, X: &[F], sink: &mut Vec) { + let num_vars = W.len(); + self + .indptr + .par_windows(2) + .map(|ptrs| { + self.get_row_unchecked(ptrs.try_into().unwrap()).fold(F::ZERO, |acc, (val, col_idx)| { + let val = match col_idx.cmp(&num_vars) { + Ordering::Less => *val * W[*col_idx], + Ordering::Equal => *val * *u, + Ordering::Greater => *val * X[*col_idx - num_vars - 1], + }; + acc + val + }) + }) + .collect_into_vec(sink); + } + + /// number of non-zero entries + pub fn len(&self) -> usize { *self.indptr.last().unwrap() } + + /// empty matrix + pub fn is_empty(&self) -> bool { self.len() == 0 } + + /// returns a custom iterator + pub fn iter(&self) -> Iter<'_, F> { + let mut row = 0; + while self.indptr[row + 1] == 0 { + row += 1; + } + Iter { matrix: self, row, i: 0, nnz: *self.indptr.last().unwrap() } + } + + pub fn num_rows(&self) -> usize { self.indptr.len() - 1 } + + pub fn num_cols(&self) -> usize { self.cols } +} + +/// Iterator for sparse matrix +#[derive(Debug)] +pub struct Iter<'a, F: PrimeField> { + matrix: &'a SparseMatrix, + row: usize, + i: usize, + nnz: usize, +} + +impl<'a, F: PrimeField> Iterator for Iter<'a, F> { + type Item = (usize, usize, F); + + fn next(&mut self) -> Option { + // are we at the end? + if self.i == self.nnz { + return None; + } + + // compute current item + let curr_item = (self.row, self.matrix.indices[self.i], self.matrix.data[self.i]); + + // advance the iterator + self.i += 1; + // edge case at the end + if self.i == self.nnz { + return Some(curr_item); + } + // if `i` has moved to next row + while self.i >= self.matrix.indptr[self.row + 1] { + self.row += 1; + } + + Some(curr_item) + } +} + +// #[cfg(test)] +// mod tests { +// #[cfg(not(target_arch = "wasm32"))] +// use proptest::{ +// prelude::*, +// strategy::{BoxedStrategy, Just, Strategy}, +// }; + +// use super::SparseMatrix; +// #[cfg(not(target_arch = "wasm32"))] +// use crate::r1cs::util::FWrap; +// use crate::{ +// provider::PallasEngine, +// traits::{Engine, Group}, +// }; + +// type G = ::GE; +// type Fr = ::Scalar; + +// #[test] +// fn test_matrix_creation() { +// let matrix_data = vec![ +// (0, 1, Fr::from(2)), +// (1, 2, Fr::from(3)), +// (2, 0, Fr::from(4)), +// ]; +// let sparse_matrix = SparseMatrix::::new(&matrix_data, 3, 3); + +// assert_eq!( +// sparse_matrix.data, +// vec![Fr::from(2), Fr::from(3), Fr::from(4)] +// ); +// assert_eq!(sparse_matrix.indices, vec![1, 2, 0]); +// assert_eq!(sparse_matrix.indptr, vec![0, 1, 2, 3]); +// } + +// #[test] +// fn test_matrix_vector_multiplication() { +// let matrix_data = vec![ +// (0, 1, Fr::from(2)), +// (0, 2, Fr::from(7)), +// (1, 2, Fr::from(3)), +// (2, 0, Fr::from(4)), +// ]; +// let sparse_matrix = SparseMatrix::::new(&matrix_data, 3, 3); +// let vector = vec![Fr::from(1), Fr::from(2), Fr::from(3)]; + +// let result = sparse_matrix.multiply_vec(&vector); + +// assert_eq!(result, vec![Fr::from(25), Fr::from(9), Fr::from(4)]); +// } + +// #[cfg(not(target_arch = "wasm32"))] +// fn coo_strategy() -> BoxedStrategy)>> { +// let coo_strategy = +// any::>().prop_flat_map(|f| (0usize..100, 0usize..100, +// Just(f))); proptest::collection::vec(coo_strategy, 10).boxed() +// } + +// #[cfg(not(target_arch = "wasm32"))] +// proptest! { +// #[test] +// fn test_matrix_iter(mut coo_matrix in coo_strategy()) { +// // process the randomly generated coo matrix +// coo_matrix.sort_by_key(|(row, col, _val)| (*row, *col)); +// coo_matrix.dedup_by_key(|(row, col, _val)| (*row, *col)); +// let coo_matrix = coo_matrix.into_iter().map(|(row, col, val)| { +// (row, col, val.0) }).collect::>(); + +// let matrix = SparseMatrix::new(&coo_matrix, 100, 100); + +// prop_assert_eq!(coo_matrix, matrix.iter().collect::>()); +// } +// } +// } diff --git a/src/r1cs/util.rs b/prover/src/r1cs/util.rs similarity index 51% rename from src/r1cs/util.rs rename to prover/src/r1cs/util.rs index e81c85d..e8438f1 100644 --- a/src/r1cs/util.rs +++ b/prover/src/r1cs/util.rs @@ -12,18 +12,17 @@ impl Copy for FWrap {} #[cfg(not(target_arch = "wasm32"))] /// Trait implementation for generating `FWrap` instances with proptest impl Arbitrary for FWrap { - type Parameters = (); - type Strategy = BoxedStrategy; - - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - use rand::rngs::StdRng; - use rand_core::SeedableRng; - - let strategy = any::<[u8; 32]>() - .prop_map(|seed| Self(F::random(StdRng::from_seed(seed)))) - .no_shrink(); - strategy.boxed() - } + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + use rand::rngs::StdRng; + use rand_core::SeedableRng; + + let strategy = + any::<[u8; 32]>().prop_map(|seed| Self(F::random(StdRng::from_seed(seed)))).no_shrink(); + strategy.boxed() + } } /// Wrapper struct around a Group element that implements additional traits @@ -35,16 +34,15 @@ impl Copy for GWrap {} #[cfg(not(target_arch = "wasm32"))] /// Trait implementation for generating `GWrap` instances with proptest impl Arbitrary for GWrap { - type Parameters = (); - type Strategy = BoxedStrategy; - - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - use rand::rngs::StdRng; - use rand_core::SeedableRng; - - let strategy = any::<[u8; 32]>() - .prop_map(|seed| Self(G::random(StdRng::from_seed(seed)))) - .no_shrink(); - strategy.boxed() - } + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + use rand::rngs::StdRng; + use rand_core::SeedableRng; + + let strategy = + any::<[u8; 32]>().prop_map(|seed| Self(G::random(StdRng::from_seed(seed)))).no_shrink(); + strategy.boxed() + } } diff --git a/prover/src/spartan/batched.rs b/prover/src/spartan/batched.rs new file mode 100644 index 0000000..ab63abd --- /dev/null +++ b/prover/src/spartan/batched.rs @@ -0,0 +1,581 @@ +//! This module implements `BatchedRelaxedR1CSSNARKTrait` using Spartan that is +//! generic over the polynomial commitment and evaluation argument (i.e., a PCS) +//! This version of Spartan does not use preprocessing so the verifier keeps the +//! entire description of R1CS matrices. This is essentially optimal for the +//! verifier when using an IPA-based polynomial commitment scheme. This batched +//! implementation batches the outer and inner sumchecks of the Spartan SNARK. + +use core::slice; +use std::{iter, sync::Arc}; + +use ff::Field; +use itertools::Itertools; +use once_cell::sync::OnceCell; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use super::{ + compute_eval_table_sparse, + math::Math, + polys::{eq::EqPolynomial, multilinear::MultilinearPolynomial}, + powers, + snark::batch_eval_reduce, + sumcheck::SumcheckProof, + PolyEvalInstance, PolyEvalWitness, +}; +use crate::{ + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness, SparseMatrix}, + spartan::{ + polys::{multilinear::SparsePolynomial, power::PowPolynomial}, + snark::batch_eval_verify, + }, + traits::{ + evaluation::EvaluationEngineTrait, + snark::{BatchedRelaxedR1CSSNARKTrait, DigestHelperTrait, RelaxedR1CSSNARKTrait}, + Engine, TranscriptEngineTrait, + }, + zip_with, CommitmentKey, +}; + +/// A succinct proof of knowledge of a witness to a batch of relaxed R1CS +/// instances The proof is produced using Spartan's combination of the sum-check +/// and the commitment to a vector viewed as a polynomial commitment +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct BatchedRelaxedR1CSSNARK> { + sc_proof_outer: SumcheckProof, + // Claims ([Azᵢ(τᵢ)], [Bzᵢ(τᵢ)], [Czᵢ(τᵢ)]) + claims_outer: Vec<(E::Scalar, E::Scalar, E::Scalar)>, + // [Eᵢ(r_x)] + evals_E: Vec, + sc_proof_inner: SumcheckProof, + // [Wᵢ(r_y[1..])] + evals_W: Vec, + sc_proof_batch: SumcheckProof, + // [Wᵢ(r_z), Eᵢ(r_z)] + evals_batch: Vec, + eval_arg: EE::EvaluationArgument, +} + +/// A type that represents the prover's key +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct ProverKey> { + pub pk_ee: EE::ProverKey, + pub vk_digest: E::Scalar, // digest of the verifier's key +} + +/// A type that represents the verifier's key +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct VerifierKey> { + pub vk_ee: EE::VerifierKey, + S: Vec>, + #[serde(skip, default = "OnceCell::new")] + pub digest: OnceCell, +} + +impl> VerifierKey { + fn new(shapes: Vec>, vk_ee: EE::VerifierKey) -> Self { + Self { vk_ee, S: shapes, digest: OnceCell::new() } + } +} + +impl> SimpleDigestible for VerifierKey {} + +impl> DigestHelperTrait for VerifierKey { + /// Returns the digest of the verifier's key. + fn digest(&self) -> E::Scalar { + self + .digest + .get_or_try_init(|| { + let dc = DigestComputer::::new(self); + dc.digest() + }) + .cloned() + .expect("Failure to retrieve digest!") + } +} + +impl> BatchedRelaxedR1CSSNARKTrait + for BatchedRelaxedR1CSSNARK +{ + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result { + // NOTE: We do not use the verifier key in this context + // TODO: This currently samples a `ck_c` element, does this need to + // be truly secret, if so, retrieve from an SRS. + let (pk_ee, _vk) = EE::setup(ck); + + Ok(ProverKey { pk_ee, vk_digest }) + } + + fn setup( + ck: Arc>, + S: Vec<&R1CSShape>, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + let (pk_ee, vk_ee) = EE::setup(ck); + + let S = S.iter().map(|s| s.pad()).collect(); + + let vk = VerifierKey::new(S, vk_ee); + + let pk = ProverKey { pk_ee, vk_digest: vk.digest() }; + + Ok((pk, vk)) + } + + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: Vec<&R1CSShape>, + U: &[RelaxedR1CSInstance], + W: &[RelaxedR1CSWitness], + ) -> Result { + let num_instances = U.len(); + // Pad shapes and ensure their sizes are correct + let S = S.iter().map(|s| s.pad()).collect::>(); + + // Pad (W,E) for each instance + let W = zip_with!(iter, (W, S), |w, s| w.pad(s)).collect::>>(); + + let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); + + transcript.absorb(b"vk", &pk.vk_digest); + if num_instances > 1 { + let num_instances_field = E::Scalar::from(num_instances as u64); + transcript.absorb(b"n", &num_instances_field); + } + transcript.absorb(b"U", &U); + + let (polys_W, polys_E): (Vec<_>, Vec<_>) = W.into_iter().map(|w| (w.W, w.E)).unzip(); + + // Append public inputs to W: Z = [W, u, X] + let polys_Z = zip_with!(iter, (polys_W, U), |w, u| [w.clone(), vec![u.u], u.X.clone()] + .concat()) + .collect::>>(); + + let (num_rounds_x, num_rounds_y): (Vec<_>, Vec<_>) = + S.iter().map(|s| (s.num_cons.log_2(), s.num_vars.log_2() + 1)).unzip(); + let num_rounds_x_max = *num_rounds_x.iter().max().unwrap(); + let num_rounds_y_max = *num_rounds_y.iter().max().unwrap(); + + // Generate tau polynomial corresponding to eq(τ, τ², τ⁴ , …) + // for a random challenge τ + let tau = transcript.squeeze(b"t")?; + let all_taus = PowPolynomial::squares(&tau, num_rounds_x_max); + + let polys_tau = num_rounds_x + .iter() + .map(|&num_rounds_x| PowPolynomial::evals_with_powers(&all_taus, num_rounds_x)) + .map(MultilinearPolynomial::new) + .collect::>(); + + // Compute MLEs of Az, Bz, Cz, uCz + E + let (polys_Az, polys_Bz, polys_Cz): (Vec<_>, Vec<_>, Vec<_>) = + zip_with!(par_iter, (S, polys_Z), |s, poly_Z| { + let (poly_Az, poly_Bz, poly_Cz) = s.multiply_vec(poly_Z)?; + Ok((poly_Az, poly_Bz, poly_Cz)) + }) + .collect::, NovaError>>()? + .into_iter() + .multiunzip(); + + let polys_uCz_E = zip_with!(par_iter, (U, polys_E, polys_Cz), |u, poly_E, poly_Cz| { + zip_with!(par_iter, (poly_Cz, poly_E), |cz, e| u.u * cz + e).collect::>() + }) + .collect::>(); + + let comb_func_outer = + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + + // Sample challenge for random linear-combination of outer claims + let outer_r = transcript.squeeze(b"out_r")?; + let outer_r_powers = powers(&outer_r, num_instances); + + // Verify outer sumcheck: Az * Bz - uCz_E for each instance + let (sc_proof_outer, r_x, claims_outer) = SumcheckProof::prove_cubic_with_additive_term_batch( + &vec![E::Scalar::ZERO; num_instances], + &num_rounds_x, + polys_tau, + polys_Az.into_iter().map(MultilinearPolynomial::new).collect(), + polys_Bz.into_iter().map(MultilinearPolynomial::new).collect(), + polys_uCz_E.into_iter().map(MultilinearPolynomial::new).collect(), + &outer_r_powers, + comb_func_outer, + &mut transcript, + )?; + + let r_x = num_rounds_x + .iter() + .map(|&num_rounds| r_x[(num_rounds_x_max - num_rounds)..].to_vec()) + .collect::>(); + + // Extract evaluations of Az, Bz from Sumcheck and Cz, E at r_x + let (evals_Az_Bz_Cz, evals_E): (Vec<_>, Vec<_>) = zip_with!( + par_iter, + (claims_outer[1], claims_outer[2], polys_Cz, polys_E, r_x), + |eval_Az, eval_Bz, poly_Cz, poly_E, r_x| { + let (eval_Cz, eval_E) = rayon::join( + || MultilinearPolynomial::evaluate_with(poly_Cz, r_x), + || MultilinearPolynomial::evaluate_with(poly_E, r_x), + ); + ((*eval_Az, *eval_Bz, eval_Cz), eval_E) + } + ) + .unzip(); + + evals_Az_Bz_Cz.iter().zip_eq(evals_E.iter()).for_each( + |(&(eval_Az, eval_Bz, eval_Cz), &eval_E)| { + transcript.absorb(b"claims_outer", &[eval_Az, eval_Bz, eval_Cz, eval_E].as_slice()) + }, + ); + + let inner_r = transcript.squeeze(b"in_r")?; + let inner_r_square = inner_r.square(); + let inner_r_cube = inner_r_square * inner_r; + let inner_r_powers = powers(&inner_r_cube, num_instances); + + let claims_inner_joint = evals_Az_Bz_Cz + .iter() + .map(|(eval_Az, eval_Bz, eval_Cz)| *eval_Az + inner_r * eval_Bz + inner_r_square * eval_Cz) + .collect::>(); + + let polys_ABCs = { + let inner = |M_evals_As: Vec, + M_evals_Bs: Vec, + M_evals_Cs: Vec| + -> Vec { + zip_with!(into_par_iter, (M_evals_As, M_evals_Bs, M_evals_Cs), |eval_A, eval_B, eval_C| { + eval_A + inner_r * eval_B + inner_r_square * eval_C + }) + .collect::>() + }; + + zip_with!(par_iter, (S, r_x), |s, r_x| { + let evals_rx = EqPolynomial::evals_from_points(r_x); + let (eval_A, eval_B, eval_C) = compute_eval_table_sparse(s, &evals_rx); + MultilinearPolynomial::new(inner(eval_A, eval_B, eval_C)) + }) + .collect::>() + }; + + let polys_Z = polys_Z + .into_iter() + .zip_eq(num_rounds_y.iter()) + .map(|(mut z, &num_rounds_y)| { + z.resize(1 << num_rounds_y, E::Scalar::ZERO); + MultilinearPolynomial::new(z) + }) + .collect::>(); + + let comb_func = |poly_A_comp: &E::Scalar, poly_B_comp: &E::Scalar| -> E::Scalar { + *poly_A_comp * *poly_B_comp + }; + + let (sc_proof_inner, r_y, _claims_inner): (SumcheckProof, Vec, (Vec<_>, Vec<_>)) = + SumcheckProof::prove_quad_batch( + &claims_inner_joint, + &num_rounds_y, + polys_ABCs, + polys_Z, + &inner_r_powers, + comb_func, + &mut transcript, + )?; + + let r_y = num_rounds_y + .iter() + .map(|num_rounds| { + let (_, r_y_hi) = r_y.split_at(num_rounds_y_max - num_rounds); + r_y_hi + }) + .collect::>(); + + let evals_W = zip_with!(par_iter, (polys_W, r_y), |poly, r_y| { + MultilinearPolynomial::evaluate_with(poly, &r_y[1..]) + }) + .collect::>(); + + // Create evaluation instances for W(r_y[1..]) and E(r_x) + let (w_vec, u_vec) = + { + let mut w_vec = Vec::with_capacity(2 * num_instances); + let mut u_vec = Vec::with_capacity(2 * num_instances); + w_vec.extend(polys_W.into_iter().map(|poly| PolyEvalWitness { p: poly })); + u_vec.extend(zip_with!(iter, (evals_W, U, r_y), |eval, u, r_y| { + PolyEvalInstance { c: u.comm_W, x: r_y[1..].to_vec(), e: *eval } + })); + + w_vec.extend(polys_E.into_iter().map(|poly| PolyEvalWitness { p: poly })); + u_vec.extend(zip_with!((evals_E.iter(), U.iter(), r_x), |eval_E, u, r_x| { + PolyEvalInstance { c: u.comm_E, x: r_x, e: *eval_E } + })); + (w_vec, u_vec) + }; + + let (batched_u, batched_w, sc_proof_batch, claims_batch_left) = + batch_eval_reduce(u_vec, &w_vec, &mut transcript)?; + + let eval_arg = EE::prove( + ck, + &pk.pk_ee, + &mut transcript, + &batched_u.c, + &batched_w.p, + &batched_u.x, + &batched_u.e, + )?; + + Ok(Self { + sc_proof_outer, + claims_outer: evals_Az_Bz_Cz, + evals_E, + sc_proof_inner, + evals_W, + sc_proof_batch, + evals_batch: claims_batch_left, + eval_arg, + }) + } + + fn verify(&self, vk: &Self::VerifierKey, U: &[RelaxedR1CSInstance]) -> Result<(), NovaError> { + let num_instances = U.len(); + let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); + + transcript.absorb(b"vk", &vk.digest()); + if num_instances > 1 { + let num_instances_field = E::Scalar::from(num_instances as u64); + transcript.absorb(b"n", &num_instances_field); + } + transcript.absorb(b"U", &U); + + let num_instances = U.len(); + + let (num_rounds_x, num_rounds_y): (Vec<_>, Vec<_>) = + vk.S.iter().map(|s| (s.num_cons.log_2(), s.num_vars.log_2() + 1)).unzip(); + let num_rounds_x_max = *num_rounds_x.iter().max().unwrap(); + let num_rounds_y_max = *num_rounds_y.iter().max().unwrap(); + + // Define τ polynomials of the appropriate size for each instance + let tau = transcript.squeeze(b"t")?; + let all_taus = PowPolynomial::squares(&tau, num_rounds_x_max); + + let polys_tau = num_rounds_x + .iter() + .map(|&num_rounds_x| PowPolynomial::evals_with_powers(&all_taus, num_rounds_x)) + .map(MultilinearPolynomial::new) + .collect::>(); + + // Sample challenge for random linear-combination of outer claims + let outer_r = transcript.squeeze(b"out_r")?; + let outer_r_powers = powers(&outer_r, num_instances); + + let (claim_outer_final, r_x) = self.sc_proof_outer.verify_batch( + &vec![E::Scalar::ZERO; num_instances], + &num_rounds_x, + &outer_r_powers, + 3, + &mut transcript, + )?; + + // Since each instance has a different number of rounds, the Sumcheck + // prover skips the first num_rounds_x_max - num_rounds_x rounds. + // The evaluation point for each instance is therefore r_x[num_rounds_x_max - + // num_rounds_x..] + let r_x = num_rounds_x + .iter() + .map(|num_rounds| r_x[(num_rounds_x_max - num_rounds)..].to_vec()) + .collect::>(); + + // Extract evaluations into a vector [(Azᵢ, Bzᵢ, Czᵢ, Eᵢ)] + let ABCE_evals = || self.claims_outer.iter().zip_eq(self.evals_E.iter()); + + // Add evaluations of Az, Bz, Cz, E to transcript + for ((claim_Az, claim_Bz, claim_Cz), eval_E) in ABCE_evals() { + transcript.absorb(b"claims_outer", &[*claim_Az, *claim_Bz, *claim_Cz, *eval_E].as_slice()) + } + + let chis_r_x = + r_x.par_iter().map(|r_x| EqPolynomial::evals_from_points(r_x)).collect::>(); + + // Evaluate τ(rₓ) for each instance + let evals_tau = zip_with!(iter, (polys_tau, chis_r_x), |poly_tau, er_x| { + MultilinearPolynomial::evaluate_with_chis(poly_tau.evaluations(), er_x) + }); + + // Compute expected claim for all instances ∑ᵢ rⁱ⋅τ(rₓ)⋅(Azᵢ⋅Bzᵢ − uᵢ⋅Czᵢ − Eᵢ) + let claim_outer_final_expected = zip_with!( + (ABCE_evals(), U.iter(), evals_tau, outer_r_powers.iter()), + |ABCE_eval, u, eval_tau, r| { + let ((claim_Az, claim_Bz, claim_Cz), eval_E) = ABCE_eval; + *r * eval_tau * (*claim_Az * claim_Bz - u.u * claim_Cz - eval_E) + } + ) + .sum::(); + + if claim_outer_final != claim_outer_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + let inner_r = transcript.squeeze(b"in_r")?; + let inner_r_square = inner_r.square(); + let inner_r_cube = inner_r_square * inner_r; + let inner_r_powers = powers(&inner_r_cube, num_instances); + + // Compute inner claims Mzᵢ = (Azᵢ + r⋅Bzᵢ + r²⋅Czᵢ), + // which are batched by Sumcheck into one claim: ∑ᵢ r³ⁱ⋅Mzᵢ + let claims_inner = self + .claims_outer + .iter() + .map(|(claim_Az, claim_Bz, claim_Cz)| { + *claim_Az + inner_r * claim_Bz + inner_r_square * claim_Cz + }) + .collect::>(); + + let (claim_inner_final, r_y) = self.sc_proof_inner.verify_batch( + &claims_inner, + &num_rounds_y, + &inner_r_powers, + 2, + &mut transcript, + )?; + let r_y: Vec> = num_rounds_y + .iter() + .map(|num_rounds| r_y[(num_rounds_y_max - num_rounds)..].to_vec()) + .collect(); + + // Compute evaluations of Zᵢ = [Wᵢ, uᵢ, Xᵢ] at r_y + // Zᵢ(r_y) = (1−r_y[0])⋅W(r_y[1..]) + r_y[0]⋅MLE([uᵢ, Xᵢ])(r_y[1..]) + let evals_Z = zip_with!(iter, (self.evals_W, U, r_y), |eval_W, U, r_y| { + let eval_X = { + // constant term + let poly_X = iter::once(U.u).chain(U.X.iter().cloned()).collect(); + SparsePolynomial::new(r_y.len() - 1, poly_X).evaluate(&r_y[1..]) + }; + (E::Scalar::ONE - r_y[0]) * eval_W + r_y[0] * eval_X + }) + .collect::>(); + + // compute evaluations of R1CS matrices M(r_x, r_y) = eq(r_y)ᵀ⋅M⋅eq(r_x) + let multi_evaluate = |M_vec: &[&SparseMatrix], + chi_r_x: &[E::Scalar], + r_y: &[E::Scalar]| + -> Vec { + let evaluate_with_table = + |M: &SparseMatrix, T_x: &[E::Scalar], T_y: &[E::Scalar]| -> E::Scalar { + M.par_iter_rows() + .enumerate() + .map(|(row_idx, row)| { + M.get_row(row) + .map(|(val, col_idx)| T_x[row_idx] * T_y[*col_idx] * val) + .sum::() + }) + .sum() + }; + + let T_x = chi_r_x; + let T_y = EqPolynomial::evals_from_points(r_y); + + M_vec.par_iter().map(|&M_vec| evaluate_with_table(M_vec, T_x, &T_y)).collect() + }; + + // Compute inner claim ∑ᵢ r³ⁱ⋅(Aᵢ(r_x, r_y) + r⋅Bᵢ(r_x, r_y) + r²⋅Cᵢ(r_x, + // r_y))⋅Zᵢ(r_y) + let claim_inner_final_expected = zip_with!( + iter, + (vk.S, chis_r_x, r_y, evals_Z, inner_r_powers), + |S, r_x, r_y, eval_Z, r_i| { + let evals = multi_evaluate(&[&S.A, &S.B, &S.C], r_x, r_y); + let eval = evals[0] + inner_r * evals[1] + inner_r_square * evals[2]; + eval * r_i * eval_Z + } + ) + .sum::(); + + if claim_inner_final != claim_inner_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + // Create evaluation instances for W(r_y[1..]) and E(r_x) + let u_vec = { + let mut u_vec = Vec::with_capacity(2 * num_instances); + u_vec.extend(zip_with!(iter, (self.evals_W, U, r_y), |eval, u, r_y| { + PolyEvalInstance { c: u.comm_W, x: r_y[1..].to_vec(), e: *eval } + })); + + u_vec.extend(zip_with!(iter, (self.evals_E, U, r_x), |eval, u, r_x| { + PolyEvalInstance { c: u.comm_E, x: r_x.to_vec(), e: *eval } + })); + u_vec + }; + + let batched_u = + batch_eval_verify(u_vec, &mut transcript, &self.sc_proof_batch, &self.evals_batch)?; + + // verify + EE::verify( + &vk.vk_ee, + &mut transcript, + &batched_u.c, + &batched_u.x, + &batched_u.e, + &self.eval_arg, + )?; + + Ok(()) + } +} + +impl> RelaxedR1CSSNARKTrait + for BatchedRelaxedR1CSSNARK +{ + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + >::ck_floor() + } + + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result { + >::initialize_pk(ck, vk_digest) + } + + fn setup( + ck: Arc>, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + >::setup(ck, vec![S]) + } + + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result { + let slice_U = slice::from_ref(U); + let slice_W = slice::from_ref(W); + >::prove(ck, pk, vec![S], slice_U, slice_W) + } + + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let slice = slice::from_ref(U); + >::verify(self, vk, slice) + } +} diff --git a/prover/src/spartan/batched_ppsnark.rs b/prover/src/spartan/batched_ppsnark.rs new file mode 100644 index 0000000..1fe8d3b --- /dev/null +++ b/prover/src/spartan/batched_ppsnark.rs @@ -0,0 +1,1261 @@ +//! batched pp snark + +use core::slice; +use std::sync::Arc; + +use ff::Field; +use itertools::{chain, Itertools as _}; +use once_cell::sync::*; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use crate::{ + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, + spartan::{ + math::Math, + polys::{ + eq::EqPolynomial, + identity::IdentityPolynomial, + masked_eq::MaskedEqPolynomial, + multilinear::{MultilinearPolynomial, SparsePolynomial}, + power::PowPolynomial, + univariate::{CompressedUniPoly, UniPoly}, + }, + powers, + ppsnark::{R1CSShapeSparkCommitment, R1CSShapeSparkRepr}, + sumcheck::{ + engine::{ + InnerSumcheckInstance, MemorySumcheckInstance, OuterSumcheckInstance, SumcheckEngine, + WitnessBoundSumcheck, + }, + SumcheckProof, + }, + PolyEvalInstance, PolyEvalWitness, + }, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, + evaluation::EvaluationEngineTrait, + snark::{BatchedRelaxedR1CSSNARKTrait, DigestHelperTrait, RelaxedR1CSSNARKTrait}, + Engine, TranscriptEngineTrait, + }, + zip_with, zip_with_for_each, Commitment, CommitmentKey, CompressedCommitment, +}; + +/// A type that represents the prover's key +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct ProverKey> { + pk_ee: EE::ProverKey, + S_repr: Vec>, + S_comm: Vec>, + vk_digest: E::Scalar, // digest of verifier's key +} + +/// A type that represents the verifier's key +#[derive(Debug, Deserialize, Serialize)] +#[serde(bound = "")] +pub struct VerifierKey> { + vk_ee: EE::VerifierKey, + S_comm: Vec>, + num_vars: Vec, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} +impl> VerifierKey { + fn new( + num_vars: Vec, + S_comm: Vec>, + vk_ee: EE::VerifierKey, + ) -> Self { + Self { num_vars, S_comm, vk_ee, digest: Default::default() } + } +} + +impl> SimpleDigestible for VerifierKey {} + +impl> DigestHelperTrait for VerifierKey { + /// Returns the digest of the verifier's key + fn digest(&self) -> E::Scalar { + self + .digest + .get_or_try_init(|| { + let dc = DigestComputer::new(self); + dc.digest() + }) + .cloned() + .expect("Failure to retrieve digest!") + } +} + +/// A succinct proof of knowledge of a witness to a relaxed R1CS instance +/// The proof is produced using Spartan's combination of the sum-check and +/// the commitment to a vector viewed as a polynomial commitment +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct BatchedRelaxedR1CSSNARK> { + // commitment to oracles: the first three are for Az, Bz, Cz, + // and the last two are for memory reads + comms_Az_Bz_Cz: Vec<[CompressedCommitment; 3]>, + comms_L_row_col: Vec<[CompressedCommitment; 2]>, + // commitments to aid the memory checks + // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] + comms_mem_oracles: Vec<[CompressedCommitment; 4]>, + + // claims about Az, Bz, and Cz polynomials + evals_Az_Bz_Cz_at_tau: Vec<[E::Scalar; 3]>, + + // sum-check + sc: SumcheckProof, + + // claims from the end of sum-check + evals_Az_Bz_Cz_W_E: Vec<[E::Scalar; 5]>, + evals_L_row_col: Vec<[E::Scalar; 2]>, + // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] + evals_mem_oracle: Vec<[E::Scalar; 4]>, + // [val_A, val_B, val_C, row, col, ts_row, ts_col] + evals_mem_preprocessed: Vec<[E::Scalar; 7]>, + + // a PCS evaluation argument + eval_arg: EE::EvaluationArgument, +} + +impl> BatchedRelaxedR1CSSNARKTrait + for BatchedRelaxedR1CSSNARK +{ + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + Box::new(|shape: &R1CSShape| -> usize { + // the commitment key should be large enough to commit to the R1CS matrices + std::cmp::max( + shape.A.len() + shape.B.len() + shape.C.len(), + std::cmp::max(shape.num_cons, 2 * shape.num_vars), + ) + }) + } + + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result { + todo!("unimplemented for batched_ppsnark"); + } + + fn setup( + ck: Arc>, + S: Vec<&R1CSShape>, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + for s in S.iter() { + // check the provided commitment key meets minimal requirements + if ck.length() < >::ck_floor()(s) { + // return Err(NovaError::InvalidCommitmentKeyLength); + return Err(NovaError::InternalError); + } + } + let (pk_ee, vk_ee) = EE::setup(ck.clone()); + + let S = S.iter().map(|s| s.pad()).collect::>(); + let S_repr = S.iter().map(R1CSShapeSparkRepr::new).collect::>(); + let S_comm = S_repr.iter().map(|s_repr| s_repr.commit(&*ck)).collect::>(); + let num_vars = S.iter().map(|s| s.num_vars).collect::>(); + let vk = VerifierKey::new(num_vars, S_comm.clone(), vk_ee); + let pk = ProverKey { pk_ee, S_repr, S_comm, vk_digest: vk.digest() }; + Ok((pk, vk)) + } + + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: Vec<&R1CSShape>, + U: &[RelaxedR1CSInstance], + W: &[RelaxedR1CSWitness], + ) -> Result { + // Pad shapes so that num_vars = num_cons = Nᵢ and check the sizes are correct + let S = S.par_iter().map(|s| s.pad()).collect::>(); + + // N[i] = max(|Aᵢ|+|Bᵢ|+|Cᵢ|, 2*num_varsᵢ, num_consᵢ) + let Nis = pk.S_repr.iter().map(|s| s.N).collect::>(); + assert!(Nis.iter().all(|&Ni| Ni.is_power_of_two())); + let N_max = *Nis.iter().max().unwrap(); + + let num_instances = U.len(); + + // Pad [(Wᵢ,Eᵢ)] to the next power of 2 (not to Ni) + let W = zip_with!(par_iter, (W, S), |w, s| w.pad(s)).collect::>>(); + + // number of rounds of sum-check + let num_rounds_sc = N_max.log_2(); + + // Initialize transcript with vk || [Uᵢ] + let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); + transcript.absorb(b"vk", &pk.vk_digest); + if num_instances > 1 { + let num_instances_field = E::Scalar::from(num_instances as u64); + transcript.absorb(b"n", &num_instances_field); + } + transcript.absorb(b"U", &U); + + // Append public inputs to Wᵢ: Zᵢ = [Wᵢ, uᵢ, Xᵢ] + let polys_Z = zip_with!(par_iter, (W, U, Nis), |W, U, Ni| { + // poly_Z will be resized later, so we preallocate the correct capacity + let mut poly_Z = Vec::with_capacity(*Ni); + poly_Z.extend(W.W.iter().chain([&U.u]).chain(U.X.iter())); + poly_Z + }) + .collect::>>(); + + // Move polys_W and polys_E, as well as U.u out of U + let (comms_W_E, us): (Vec<_>, Vec<_>) = U.iter().map(|U| ([U.comm_W, U.comm_E], U.u)).unzip(); + let (polys_W, polys_E): (Vec<_>, Vec<_>) = W.into_iter().map(|w| (w.W, w.E)).unzip(); + + // Compute [Az, Bz, Cz] + let mut polys_Az_Bz_Cz = zip_with!(par_iter, (polys_Z, S), |z, s| { + let (Az, Bz, Cz) = s.multiply_vec(z)?; + Ok([Az, Bz, Cz]) + }) + .collect::, NovaError>>()?; + + // Commit to [Az, Bz, Cz] and add to transcript + let comms_Az_Bz_Cz = polys_Az_Bz_Cz + .par_iter() + .map(|[Az, Bz, Cz]| { + let (comm_Az, (comm_Bz, comm_Cz)) = rayon::join( + || E::CE::commit(ck, Az), + || rayon::join(|| E::CE::commit(ck, Bz), || E::CE::commit(ck, Cz)), + ); + [comm_Az, comm_Bz, comm_Cz] + }) + .collect::>(); + comms_Az_Bz_Cz.iter().for_each(|comms| transcript.absorb(b"c", &comms.as_slice())); + + // Compute eq(tau) for each instance in log2(Ni) variables + let tau = transcript.squeeze(b"t")?; + let all_taus = PowPolynomial::squares(&tau, N_max.log_2()); + + let (polys_tau, coords_tau): (Vec<_>, Vec<_>) = Nis + .par_iter() + .map(|&N_i| { + let log_Ni = N_i.log_2(); + let eqp: EqPolynomial<_> = all_taus[..log_Ni].iter().cloned().collect(); + let evals = eqp.evals(); + let coords = eqp.r; + (evals, coords) + }) + .unzip(); + + // Pad [Az, Bz, Cz] to Ni + polys_Az_Bz_Cz.par_iter_mut().zip_eq(Nis.par_iter()).for_each(|(az_bz_cz, &Ni)| { + az_bz_cz.par_iter_mut().for_each(|mz| mz.resize(Ni, E::Scalar::ZERO)) + }); + + // Evaluate and commit to [Az(tau), Bz(tau), Cz(tau)] + let evals_Az_Bz_Cz_at_tau = + zip_with!(par_iter, (polys_Az_Bz_Cz, coords_tau), |ABCs, tau_coords| { + let [Az, Bz, Cz] = ABCs; + let (eval_Az, (eval_Bz, eval_Cz)) = rayon::join( + || MultilinearPolynomial::evaluate_with(Az, tau_coords), + || { + rayon::join( + || MultilinearPolynomial::evaluate_with(Bz, tau_coords), + || MultilinearPolynomial::evaluate_with(Cz, tau_coords), + ) + }, + ); + [eval_Az, eval_Bz, eval_Cz] + }) + .collect::>(); + + // absorb the claimed evaluations into the transcript + for evals in evals_Az_Bz_Cz_at_tau.iter() { + transcript.absorb(b"e", &evals.as_slice()); + } + + // Pad Zᵢ, E to Nᵢ + let polys_Z = polys_Z + .into_par_iter() + .zip_eq(Nis.par_iter()) + .map(|(mut poly_Z, &Ni)| { + poly_Z.resize(Ni, E::Scalar::ZERO); + poly_Z + }) + .collect::>(); + + // Pad both W,E to have the same size. This is inefficient for W since the + // second half is empty, but it makes it easier to handle the batching + // at the end. + let polys_E = polys_E + .into_par_iter() + .zip_eq(Nis.par_iter()) + .map(|(mut poly_E, &Ni)| { + poly_E.resize(Ni, E::Scalar::ZERO); + poly_E + }) + .collect::>(); + + let polys_W = polys_W + .into_par_iter() + .zip_eq(Nis.par_iter()) + .map(|(mut poly_W, &Ni)| { + poly_W.resize(Ni, E::Scalar::ZERO); + poly_W + }) + .collect::>(); + + // (2) send commitments to the following two oracles + // L_row(i) = eq(tau, row(i)) for all i in [0..Nᵢ] + // L_col(i) = z(col(i)) for all i in [0..Nᵢ] + let polys_L_row_col = + zip_with!(par_iter, (S, Nis, polys_Z, polys_tau), |S, Ni, poly_Z, poly_tau| { + let mut L_row = vec![poly_tau[0]; *Ni]; // we place mem_row[0] since resized row is appended with 0s + let mut L_col = vec![poly_Z[Ni - 1]; *Ni]; // we place mem_col[Ni-1] since resized col is appended with Ni-1 + + for (i, (val_r, val_c)) in S + .A + .iter() + .chain(S.B.iter()) + .chain(S.C.iter()) + .map(|(r, c, _)| (poly_tau[r], poly_Z[c])) + .enumerate() + { + L_row[i] = val_r; + L_col[i] = val_c; + } + + [L_row, L_col] + }) + .collect::>(); + + let comms_L_row_col = polys_L_row_col + .par_iter() + .map(|[L_row, L_col]| { + let (comm_L_row, comm_L_col) = + rayon::join(|| E::CE::commit(ck, L_row), || E::CE::commit(ck, L_col)); + [comm_L_row, comm_L_col] + }) + .collect::>(); + + // absorb commitments to L_row and L_col in the transcript + for comms in comms_L_row_col.iter() { + transcript.absorb(b"e", &comms.as_slice()); + } + + // For each instance, batch Mz = Az + c*Bz + c^2*Cz + let c = transcript.squeeze(b"c")?; + + let polys_Mz: Vec<_> = polys_Az_Bz_Cz + .par_iter() + .map(|polys_Az_Bz_Cz| { + let poly_vec: Vec<&Vec<_>> = polys_Az_Bz_Cz.iter().collect(); + let w = PolyEvalWitness::::batch(&poly_vec[..], &c); + w.p + }) + .collect(); + + let evals_Mz: Vec<_> = zip_with!( + iter, + (comms_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau), + |comm_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau| { + let u = PolyEvalInstance::::batch( + comm_Az_Bz_Cz.as_slice(), + vec![], // ignored by the function + evals_Az_Bz_Cz_at_tau.as_slice(), + &c, + ); + u.e + } + ) + .collect(); + + // we now need to prove three claims for each instance + // (outer) + // 0 = \sum_x poly_tau(x) * (poly_Az(x) * poly_Bz(x) - poly_uCz_E(x)) + // eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau = + // (Az+c*Bz+c^2*Cz)(tau) (inner) + // eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau = \sum_y + // L_row(y) * (val_A(y) + c * val_B(y) + c^2 * val_C(y)) * L_col(y) + // (mem) + // L_row(i) = eq(tau, row(i)) + // L_col(i) = z(col(i)) + let outer_sc_inst = zip_with!( + ( + polys_Az_Bz_Cz.par_iter(), + polys_E.par_iter(), + polys_Mz.into_par_iter(), + polys_tau.par_iter(), + evals_Mz.par_iter(), + us.par_iter() + ), + |poly_ABC, poly_E, poly_Mz, poly_tau, eval_Mz, u| { + let [poly_Az, poly_Bz, poly_Cz] = poly_ABC; + let poly_uCz_E = zip_with!(par_iter, (poly_Cz, poly_E), |cz, e| *u * cz + e).collect(); + OuterSumcheckInstance::new( + poly_tau.clone(), + poly_Az.clone(), + poly_Bz.clone(), + poly_uCz_E, + poly_Mz, // Mz = Az + c * Bz + c^2 * Cz + eval_Mz, // eval_Az_at_tau + c * eval_Az_at_tau + c^2 * eval_Cz_at_tau + ) + } + ) + .collect::>(); + + let inner_sc_inst = + zip_with!(par_iter, (pk.S_repr, evals_Mz, polys_L_row_col), |s_repr, eval_Mz, poly_L| { + let [poly_L_row, poly_L_col] = poly_L; + let c_square = c.square(); + let val = + zip_with!(par_iter, (s_repr.val_A, s_repr.val_B, s_repr.val_C), |v_a, v_b, v_c| *v_a + + c * *v_b + + c_square * *v_c) + .collect::>(); + + InnerSumcheckInstance::new( + *eval_Mz, + MultilinearPolynomial::new(poly_L_row.clone()), + MultilinearPolynomial::new(poly_L_col.clone()), + MultilinearPolynomial::new(val), + ) + }) + .collect::>(); + + // a third sum-check instance to prove the read-only memory claim + // we now need to prove that L_row and L_col are well-formed + let (mem_sc_inst, comms_mem_oracles, polys_mem_oracles) = { + let gamma = transcript.squeeze(b"g")?; + let r = transcript.squeeze(b"r")?; + + // We start by computing oracles and auxiliary polynomials to help prove the + // claim oracles correspond to [t_plus_r_inv_row, w_plus_r_inv_row, + // t_plus_r_inv_col, w_plus_r_inv_col] + let (comms_mem_oracles, polys_mem_oracles, mem_aux) = pk + .S_repr + .iter() + .zip_eq(polys_tau.iter()) + .zip_eq(polys_Z.iter()) + .zip_eq(polys_L_row_col.iter()) + .try_fold( + (Vec::new(), Vec::new(), Vec::new()), + |(mut comms, mut polys, mut aux), (((s_repr, poly_tau), poly_Z), [L_row, L_col])| { + let (comm, poly, a) = MemorySumcheckInstance::::compute_oracles( + ck, + &r, + &gamma, + poly_tau, + &s_repr.row, + L_row, + &s_repr.ts_row, + poly_Z, + &s_repr.col, + L_col, + &s_repr.ts_col, + )?; + + comms.push(comm); + polys.push(poly); + aux.push(a); + + Ok::<_, NovaError>((comms, polys, aux)) + }, + )?; + + // Commit to oracles + for comms in comms_mem_oracles.iter() { + transcript.absorb(b"l", &comms.as_slice()); + } + + // Sample new random variable for eq polynomial + let rho = transcript.squeeze(b"r")?; + let all_rhos = PowPolynomial::squares(&rho, N_max.log_2()); + + let instances = zip_with!( + ( + pk.S_repr.par_iter(), + Nis.par_iter(), + polys_mem_oracles.par_iter(), + mem_aux.into_par_iter() + ), + |s_repr, Ni, polys_mem_oracles, polys_aux| { + MemorySumcheckInstance::::new( + polys_mem_oracles.clone(), + polys_aux, + PowPolynomial::evals_with_powers(&all_rhos, Ni.log_2()), + s_repr.ts_row.clone(), + s_repr.ts_col.clone(), + ) + } + ) + .collect::>(); + (instances, comms_mem_oracles, polys_mem_oracles) + }; + + let witness_sc_inst = zip_with!(par_iter, (polys_W, S), |poly_W, S| { + WitnessBoundSumcheck::new(tau, poly_W.clone(), S.num_vars) + }) + .collect::>(); + + // Run batched Sumcheck for the 3 claims for all instances. + // Note that the polynomials for claims relating to instance i have size Ni. + let (sc, rand_sc, claims_outer, claims_inner, claims_mem, claims_witness) = Self::prove_helper( + num_rounds_sc, + mem_sc_inst, + outer_sc_inst, + inner_sc_inst, + witness_sc_inst, + &mut transcript, + )?; + + let (evals_Az_Bz_Cz_W_E, evals_L_row_col, evals_mem_oracle, evals_mem_preprocessed) = { + let evals_Az_Bz = + claims_outer.into_iter().map(|claims| [claims[0][0], claims[0][1]]).collect::>(); + + let evals_L_row_col = claims_inner + .into_iter() + .map(|claims| { + // [L_row, L_col] + [claims[0][0], claims[0][1]] + }) + .collect::>(); + + let (evals_mem_oracle, evals_mem_ts): (Vec<_>, Vec<_>) = claims_mem + .into_iter() + .map(|claims| { + ( + // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] + [claims[0][0], claims[0][1], claims[1][0], claims[1][1]], + // [ts_row, ts_col] + [claims[0][2], claims[1][2]], + ) + }) + .unzip(); + + let evals_W = claims_witness.into_iter().map(|claims| claims[0][0]).collect::>(); + + let (evals_Cz_E, evals_mem_val_row_col): (Vec<_>, Vec<_>) = + zip_with!(iter, (polys_Az_Bz_Cz, polys_E, pk.S_repr), |ABCzs, poly_E, s_repr| { + let [_, _, Cz] = ABCzs; + let log_Ni = s_repr.N.log_2(); + let (_, rand_sc) = rand_sc.split_at(num_rounds_sc - log_Ni); + let rand_sc_evals = EqPolynomial::evals_from_points(rand_sc); + let e = + [Cz, poly_E, &s_repr.val_A, &s_repr.val_B, &s_repr.val_C, &s_repr.row, &s_repr.col] + .into_iter() + .map(|p| { + // Manually compute evaluation to avoid recomputing rand_sc_evals + zip_with!(par_iter, (p, rand_sc_evals), |p, eq| *p * eq).sum() + }) + .collect::>(); + ([e[0], e[1]], [e[2], e[3], e[4], e[5], e[6]]) + }) + .unzip(); + + let evals_Az_Bz_Cz_W_E = + zip_with!((evals_Az_Bz.into_iter(), evals_Cz_E.into_iter(), evals_W), |Az_Bz, Cz_E, W| { + let [Az, Bz] = Az_Bz; + let [Cz, E] = Cz_E; + [Az, Bz, Cz, W, E] + }) + .collect::>(); + + // [val_A, val_B, val_C, row, col, ts_row, ts_col] + let evals_mem_preprocessed = zip_with!( + (evals_mem_val_row_col.into_iter(), evals_mem_ts), + |eval_mem_val_row_col, eval_mem_ts| { + let [val_A, val_B, val_C, row, col] = eval_mem_val_row_col; + let [ts_row, ts_col] = eval_mem_ts; + [val_A, val_B, val_C, row, col, ts_row, ts_col] + } + ) + .collect::>(); + (evals_Az_Bz_Cz_W_E, evals_L_row_col, evals_mem_oracle, evals_mem_preprocessed) + }; + + let evals_vec = zip_with!( + iter, + (evals_Az_Bz_Cz_W_E, evals_L_row_col, evals_mem_oracle, evals_mem_preprocessed), + |Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed| { + chain![Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed].cloned().collect::>() + } + ) + .collect::>(); + + let comms_vec = zip_with!( + iter, + (comms_Az_Bz_Cz, comms_W_E, comms_L_row_col, comms_mem_oracles, pk.S_comm), + |Az_Bz_Cz, comms_W_E, L_row_col, mem_oracles, S_comm| { + chain![Az_Bz_Cz, comms_W_E, L_row_col, mem_oracles, [ + &S_comm.comm_val_A, + &S_comm.comm_val_B, + &S_comm.comm_val_C, + &S_comm.comm_row, + &S_comm.comm_col, + &S_comm.comm_ts_row, + &S_comm.comm_ts_col, + ]] + } + ) + .flatten() + .cloned() + .collect::>(); + + let w_vec = zip_with!( + ( + polys_Az_Bz_Cz.into_iter(), + polys_W.into_iter(), + polys_E.into_iter(), + polys_L_row_col.into_iter(), + polys_mem_oracles.into_iter(), + pk.S_repr.iter() + ), + |Az_Bz_Cz, W, E, L_row_col, mem_oracles, S_repr| { + chain![Az_Bz_Cz, [W, E], L_row_col, mem_oracles, [ + S_repr.val_A.clone(), + S_repr.val_B.clone(), + S_repr.val_C.clone(), + S_repr.row.clone(), + S_repr.col.clone(), + S_repr.ts_row.clone(), + S_repr.ts_col.clone(), + ]] + } + ) + .flatten() + .map(|p| PolyEvalWitness:: { p }) + .collect::>(); + + for evals in evals_vec.iter() { + transcript.absorb(b"e", &evals.as_slice()); // comm_vec is already + // in the transcript + } + let evals_vec = evals_vec.into_iter().flatten().collect::>(); + + let c = transcript.squeeze(b"c")?; + + // Compute number of variables for each polynomial + let num_vars_u = w_vec.iter().map(|w| w.p.len().log_2()).collect::>(); + let u_batch = + PolyEvalInstance::::batch_diff_size(&comms_vec, &evals_vec, &num_vars_u, rand_sc, c); + let w_batch = + PolyEvalWitness::::batch_diff_size(&w_vec.iter().by_ref().collect::>(), c); + + let eval_arg = + EE::prove(ck, &pk.pk_ee, &mut transcript, &u_batch.c, &w_batch.p, &u_batch.x, &u_batch.e)?; + + let comms_Az_Bz_Cz = + comms_Az_Bz_Cz.into_iter().map(|comms| comms.map(|comm| comm.compress())).collect(); + let comms_L_row_col = + comms_L_row_col.into_iter().map(|comms| comms.map(|comm| comm.compress())).collect(); + let comms_mem_oracles = + comms_mem_oracles.into_iter().map(|comms| comms.map(|comm| comm.compress())).collect(); + + Ok(Self { + comms_Az_Bz_Cz, + comms_L_row_col, + comms_mem_oracles, + evals_Az_Bz_Cz_at_tau, + sc, + evals_Az_Bz_Cz_W_E, + evals_L_row_col, + evals_mem_oracle, + evals_mem_preprocessed, + eval_arg, + }) + } + + fn verify(&self, vk: &Self::VerifierKey, U: &[RelaxedR1CSInstance]) -> Result<(), NovaError> { + let num_instances = U.len(); + let num_claims_per_instance = 10; + + // number of rounds of sum-check + let num_rounds = vk.S_comm.iter().map(|s| s.N.log_2()).collect::>(); + let num_rounds_max = *num_rounds.iter().max().unwrap(); + + let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); + + transcript.absorb(b"vk", &vk.digest()); + if num_instances > 1 { + let num_instances_field = E::Scalar::from(num_instances as u64); + transcript.absorb(b"n", &num_instances_field); + } + transcript.absorb(b"U", &U); + + // Decompress commitments + let comms_Az_Bz_Cz = self + .comms_Az_Bz_Cz + .iter() + .map(|comms| comms.iter().map(Commitment::::decompress).collect::, _>>()) + .collect::, _>>()?; + + let comms_L_row_col = self + .comms_L_row_col + .iter() + .map(|comms| comms.iter().map(Commitment::::decompress).collect::, _>>()) + .collect::, _>>()?; + + let comms_mem_oracles = self + .comms_mem_oracles + .iter() + .map(|comms| comms.iter().map(Commitment::::decompress).collect::, _>>()) + .collect::, _>>()?; + + // Add commitments [Az, Bz, Cz] to the transcript + comms_Az_Bz_Cz.iter().for_each(|comms| transcript.absorb(b"c", &comms.as_slice())); + + let tau = transcript.squeeze(b"t")?; + let tau_coords = PowPolynomial::new(&tau, num_rounds_max).coordinates(); + + // absorb the claimed evaluations into the transcript + self.evals_Az_Bz_Cz_at_tau.iter().for_each(|evals| { + transcript.absorb(b"e", &evals.as_slice()); + }); + + // absorb commitments to L_row and L_col in the transcript + for comms in comms_L_row_col.iter() { + transcript.absorb(b"e", &comms.as_slice()); + } + + // Batch at tau for each instance + let c = transcript.squeeze(b"c")?; + + // Compute eval_Mz = eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau + let evals_Mz: Vec<_> = zip_with!( + iter, + (comms_Az_Bz_Cz, self.evals_Az_Bz_Cz_at_tau), + |comm_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau| { + let u = PolyEvalInstance::::batch( + comm_Az_Bz_Cz.as_slice(), + tau_coords.clone(), + evals_Az_Bz_Cz_at_tau.as_slice(), + &c, + ); + u.e + } + ) + .collect(); + + let gamma = transcript.squeeze(b"g")?; + let r = transcript.squeeze(b"r")?; + + for comms in comms_mem_oracles.iter() { + transcript.absorb(b"l", &comms.as_slice()); + } + + let rho = transcript.squeeze(b"r")?; + + let s = transcript.squeeze(b"r")?; + let s_powers = powers(&s, num_instances * num_claims_per_instance); + + let (claim_sc_final, rand_sc) = { + // Gather all claims into a single vector + let claims = evals_Mz + .iter() + .flat_map(|&eval_Mz| { + let mut claims = vec![E::Scalar::ZERO; num_claims_per_instance]; + claims[7] = eval_Mz; + claims[8] = eval_Mz; + claims.into_iter() + }) + .collect::>(); + + // Number of rounds for each claim + let num_rounds_by_claim = num_rounds + .iter() + .flat_map(|num_rounds_i| vec![*num_rounds_i; num_claims_per_instance].into_iter()) + .collect::>(); + + self.sc.verify_batch(&claims, &num_rounds_by_claim, &s_powers, 3, &mut transcript)? + }; + + // Truncated sumcheck randomness for each instance + let rand_sc_i = num_rounds + .iter() + .map(|num_rounds| rand_sc[(num_rounds_max - num_rounds)..].to_vec()) + .collect::>(); + + let claim_sc_final_expected = zip_with!( + ( + vk.num_vars.iter(), + rand_sc_i.iter(), + U.iter(), + self.evals_Az_Bz_Cz_W_E.iter().cloned(), + self.evals_L_row_col.iter().cloned(), + self.evals_mem_oracle.iter().cloned(), + self.evals_mem_preprocessed.iter().cloned() + ), + |num_vars, + rand_sc, + U, + evals_Az_Bz_Cz_W_E, + evals_L_row_col, + eval_mem_oracle, + eval_mem_preprocessed| { + let [Az, Bz, Cz, W, E] = evals_Az_Bz_Cz_W_E; + let [L_row, L_col] = evals_L_row_col; + let [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] = + eval_mem_oracle; + let [val_A, val_B, val_C, row, col, ts_row, ts_col] = eval_mem_preprocessed; + + let num_rounds_i = rand_sc.len(); + let num_vars_log = num_vars.log_2(); + + let eq_rho = PowPolynomial::new(&rho, num_rounds_i).evaluate(rand_sc); + + let (eq_tau, eq_masked_tau) = { + let eq_tau: EqPolynomial<_> = PowPolynomial::new(&tau, num_rounds_i).into(); + + let eq_tau_at_rand = eq_tau.evaluate(rand_sc); + let eq_masked_tau = MaskedEqPolynomial::new(&eq_tau, num_vars_log).evaluate(rand_sc); + + (eq_tau_at_rand, eq_masked_tau) + }; + + // Evaluate identity polynomial + let id = IdentityPolynomial::new(num_rounds_i).evaluate(rand_sc); + + let Z = { + // rand_sc was padded, so we now remove the padding + let (factor, rand_sc_unpad) = { + let l = num_rounds_i - (num_vars_log + 1); + + let (rand_sc_lo, rand_sc_hi) = rand_sc.split_at(l); + + let factor = + rand_sc_lo.iter().fold(E::Scalar::ONE, |acc, r_p| acc * (E::Scalar::ONE - r_p)); + + (factor, rand_sc_hi) + }; + + let X = { + // constant term + let poly_X = std::iter::once(U.u).chain(U.X.iter().cloned()).collect(); + SparsePolynomial::new(num_vars_log, poly_X).evaluate(&rand_sc_unpad[1..]) + }; + + // W was evaluated as if it was padded to logNi variables, + // so we don't multiply it by (1-rand_sc_unpad[0]) + W + factor * rand_sc_unpad[0] * X + }; + + let t_plus_r_row = { + let addr_row = id; + let val_row = eq_tau; + let t = addr_row + gamma * val_row; + t + r + }; + + let w_plus_r_row = { + let addr_row = row; + let val_row = L_row; + let w = addr_row + gamma * val_row; + w + r + }; + + let t_plus_r_col = { + let addr_col = id; + let val_col = Z; + let t = addr_col + gamma * val_col; + t + r + }; + + let w_plus_r_col = { + let addr_col = col; + let val_col = L_col; + let w = addr_col + gamma * val_col; + w + r + }; + + let claims_mem = [ + t_plus_r_inv_row - w_plus_r_inv_row, + t_plus_r_inv_col - w_plus_r_inv_col, + eq_rho * (t_plus_r_inv_row * t_plus_r_row - ts_row), + eq_rho * (w_plus_r_inv_row * w_plus_r_row - E::Scalar::ONE), + eq_rho * (t_plus_r_inv_col * t_plus_r_col - ts_col), + eq_rho * (w_plus_r_inv_col * w_plus_r_col - E::Scalar::ONE), + ]; + + let claims_outer = [eq_tau * (Az * Bz - U.u * Cz - E), eq_tau * (Az + c * Bz + c * c * Cz)]; + let claims_inner = [L_row * L_col * (val_A + c * val_B + c * c * val_C)]; + + let claims_witness = [eq_masked_tau * W]; + + chain![claims_mem, claims_outer, claims_inner, claims_witness] + } + ) + .flatten() + .zip_eq(s_powers) + .fold(E::Scalar::ZERO, |acc, (claim, s)| acc + s * claim); + + if claim_sc_final_expected != claim_sc_final { + return Err(NovaError::InvalidSumcheckProof); + } + + let evals_vec = zip_with!( + iter, + ( + self.evals_Az_Bz_Cz_W_E, + self.evals_L_row_col, + self.evals_mem_oracle, + self.evals_mem_preprocessed + ), + |Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed| { + chain![Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed].cloned().collect::>() + } + ) + .collect::>(); + + // Add all Sumcheck evaluations to the transcript + for evals in evals_vec.iter() { + transcript.absorb(b"e", &evals.as_slice()); // comm_vec is already + // in the transcript + } + + let c = transcript.squeeze(b"c")?; + + // Compute batched polynomial evaluation instance at rand_sc + let u = { + let num_evals = evals_vec[0].len(); + + let evals_vec = evals_vec.into_iter().flatten().collect::>(); + + let num_vars = num_rounds + .iter() + .flat_map(|num_rounds| vec![*num_rounds; num_evals].into_iter()) + .collect::>(); + + let comms_vec = zip_with!( + ( + comms_Az_Bz_Cz.into_iter(), + U.iter(), + comms_L_row_col.into_iter(), + comms_mem_oracles.into_iter(), + vk.S_comm.iter() + ), + |Az_Bz_Cz, U, L_row_col, mem_oracles, S_comm| { + chain![Az_Bz_Cz, [U.comm_W, U.comm_E], L_row_col, mem_oracles, [ + S_comm.comm_val_A, + S_comm.comm_val_B, + S_comm.comm_val_C, + S_comm.comm_row, + S_comm.comm_col, + S_comm.comm_ts_row, + S_comm.comm_ts_col, + ]] + } + ) + .flatten() + .collect::>(); + + PolyEvalInstance::::batch_diff_size(&comms_vec, &evals_vec, &num_vars, rand_sc, c) + }; + + // verify + EE::verify(&vk.vk_ee, &mut transcript, &u.c, &u.x, &u.e, &self.eval_arg)?; + + Ok(()) + } +} + +impl> BatchedRelaxedR1CSSNARK { + /// Runs the batched Sumcheck protocol for the claims of multiple instance + /// of possibly different sizes. + /// + /// # Details + /// + /// In order to avoid padding all polynomials to the same maximum size, we + /// adopt the following strategy. + /// + /// Let n be the number of variables for the largest instance, + /// and let m be the number of variables for a shorter one. + /// Let P(X_{0},...,X_{m-1}) be one of the MLEs of the short instance, which + /// has been committed to by taking the MSM of its evaluations with the + /// first 2^m basis points of the commitment key. + /// + /// This Sumcheck prover will interpret it as the polynomial + /// P'(X_{0},...,X_{n-1}) = P(X_{n-m},...,X_{n-1}), + /// whose MLE evaluations over {0,1}^m is equal to 2^{n-m} repetitions of + /// the evaluations of P. + /// + /// In order to account for these "imagined" repetitions, the initial claims + /// for this short instances are scaled by 2^{n-m}. + /// + /// For the first n-m rounds, the univariate polynomials relating to this + /// shorter claim will be constant, and equal to the initial claims, + /// scaled by 2^{n-m-i}, where i is the round number. By definition, P' + /// does not depend on X_i, so binding P' to r_i has no effect on the + /// evaluations. The Sumcheck prover will then interpret the polynomial + /// P' as having half as many repetitions in the next round. + /// + /// When we get to round n-m, the Sumcheck proceeds as usual since the + /// polynomials are the expected size for the round. + /// + /// Note that at the end of the protocol, the prover returns the evaluation + /// u' = P'(r_{0},...,r_{n-1}) = P(r_{n-m},...,r_{n-1}) + /// However, the polynomial we actually committed to over {0,1}^n is + /// P''(X_{0},...,X_{n-1}) = L_0(X_{0},...,X_{n-m-1}) * + /// P(X_{n-m},...,X_{n-1}) The SNARK prover/verifier will need to + /// rescale the evaluation by the first Lagrange polynomial + /// u'' = L_0(r_{0},...,r_{n-m-1}) * u' + /// in order batch all evaluations with a single PCS call. + fn prove_helper( + num_rounds: usize, + mut mem: Vec, + mut outer: Vec, + mut inner: Vec, + mut witness: Vec, + transcript: &mut E::TE, + ) -> Result< + ( + SumcheckProof, + Vec, + Vec>>, + Vec>>, + Vec>>, + Vec>>, + ), + NovaError, + > + where + T1: SumcheckEngine, + T2: SumcheckEngine, + T3: SumcheckEngine, + T4: SumcheckEngine, + { + // sanity checks + let num_instances = mem.len(); + assert_eq!(outer.len(), num_instances); + assert_eq!(inner.len(), num_instances); + assert_eq!(witness.len(), num_instances); + + for inst in mem.iter_mut() { + assert!(inst.size().is_power_of_two()); + } + for inst in outer.iter() { + assert!(inst.size().is_power_of_two()); + } + for inst in inner.iter() { + assert!(inst.size().is_power_of_two()); + } + for inst in witness.iter() { + assert!(inst.size().is_power_of_two()); + } + + let degree = mem[0].degree(); + assert!(mem.iter().all(|inst| inst.degree() == degree)); + assert!(outer.iter().all(|inst| inst.degree() == degree)); + assert!(inner.iter().all(|inst| inst.degree() == degree)); + assert!(witness.iter().all(|inst| inst.degree() == degree)); + + // Collect all claims from the instances. If the instances is defined over `m` + // variables, which is less that the total number of rounds `n`, + // the individual claims σ are scaled by 2^{n-m}. + let claims = zip_with!(iter, (mem, outer, inner, witness), |mem, outer, inner, witness| { + Self::scaled_claims(mem, num_rounds) + .into_iter() + .chain(Self::scaled_claims(outer, num_rounds)) + .chain(Self::scaled_claims(inner, num_rounds)) + .chain(Self::scaled_claims(witness, num_rounds)) + }) + .flatten() + .collect::>(); + + // Sample a challenge for the random linear combination of all scaled claims + let s = transcript.squeeze(b"r")?; + let coeffs = powers(&s, claims.len()); + + // At the start of each round, the running claim is equal to the random linear + // combination of the Sumcheck claims, evaluated over the bound + // polynomials. Initially, it is equal to the random linear combination + // of the scaled input claims. + let mut running_claim = zip_with!(iter, (claims, coeffs), |c_1, c_2| *c_1 * c_2).sum(); + + // Keep track of the verifier challenges r, and the univariate polynomials sent + // by the prover in each round + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); + + for i in 0..num_rounds { + // At the start of round i, there input polynomials are defined over at most n-i + // variables. + let remaining_variables = num_rounds - i; + + // For each claim j, compute the evaluations of its univariate polynomial + // S_j(X_i) at X = 0, 2, 3. The polynomial is such that + // S_{j-1}(r_{j-1}) = S_j(0) + S_j(1). If the number of variable m + // of the claim is m < n-i, then the polynomial is constants and + // equal to the initial claim σ_j scaled by 2^{n-m-i-1}. + let evals = + zip_with!(par_iter, (mem, outer, inner, witness), |mem, outer, inner, witness| { + let ((evals_mem, evals_outer), (evals_inner, evals_witness)) = rayon::join( + || { + rayon::join( + || Self::get_evals(mem, remaining_variables), + || Self::get_evals(outer, remaining_variables), + ) + }, + || { + rayon::join( + || Self::get_evals(inner, remaining_variables), + || Self::get_evals(witness, remaining_variables), + ) + }, + ); + evals_mem + .into_par_iter() + .chain(evals_outer.into_par_iter()) + .chain(evals_inner.into_par_iter()) + .chain(evals_witness.into_par_iter()) + }) + .flatten() + .collect::>(); + + assert_eq!(evals.len(), claims.len()); + + // Random linear combination of the univariate evaluations at X_i = 0, 2, 3 + let evals_combined_0 = (0..evals.len()).map(|i| evals[i][0] * coeffs[i]).sum(); + let evals_combined_2 = (0..evals.len()).map(|i| evals[i][1] * coeffs[i]).sum(); + let evals_combined_3 = (0..evals.len()).map(|i| evals[i][2] * coeffs[i]).sum(); + + let evals = vec![ + evals_combined_0, + running_claim - evals_combined_0, + evals_combined_2, + evals_combined_3, + ]; + // Coefficient representation of S(X_i) + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + // Bind the variable X_i of polynomials across all claims to r_i. + // If the claim is defined over m variables and m < n-i, then + // binding has no effect on the polynomial. + zip_with_for_each!( + par_iter_mut, + (mem, outer, inner, witness), + |mem, outer, inner, witness| { + rayon::join( + || { + rayon::join( + || Self::bind(mem, remaining_variables, &r_i), + || Self::bind(outer, remaining_variables, &r_i), + ) + }, + || { + rayon::join( + || Self::bind(inner, remaining_variables, &r_i), + || Self::bind(witness, remaining_variables, &r_i), + ) + }, + ); + } + ); + + running_claim = poly.evaluate(&r_i); + cubic_polys.push(poly.compress()); + } + + // Collect evaluations at (r_{n-m}, ..., r_{n-1}) of polynomials over all + // claims, where m is the initial number of variables the individual + // claims are defined over. + let claims_outer = outer.into_iter().map(|inst| inst.final_claims()).collect(); + let claims_inner = inner.into_iter().map(|inst| inst.final_claims()).collect(); + let claims_mem = mem.into_iter().map(|inst| inst.final_claims()).collect(); + let claims_witness = witness.into_iter().map(|inst| inst.final_claims()).collect(); + + Ok((SumcheckProof::new(cubic_polys), r, claims_outer, claims_inner, claims_mem, claims_witness)) + } + + /// In round i, computes the evaluations at X_i = 0, 2, 3 of the univariate + /// polynomials S(X_i) for each claim in the instance. + /// Let `n` be the total number of Sumcheck rounds, and assume the instance + /// is defined over `m` variables. We define `remaining_variables` as + /// n-i. If m < n-i, then the polynomials in the instance are not + /// defined over X_i, so the univariate polynomial is constant and equal + /// to 2^{n-m-i-1}*σ, where σ is the initial claim. + fn get_evals>(inst: &T, remaining_variables: usize) -> Vec> { + let num_instance_variables = inst.size().log_2(); // m + if num_instance_variables < remaining_variables { + let deg = inst.degree(); + + // The evaluations at X_i = 0, 2, 3 are all equal to the scaled claim + Self::scaled_claims(inst, remaining_variables - 1) + .into_iter() + .map(|scaled_claim| vec![scaled_claim; deg]) + .collect() + } else { + inst.evaluation_points() + } + } + + /// In round i after receiving challenge r_i, we partially evaluate all + /// polynomials in the instance at X_i = r_i. If the instance is defined + /// over m variables m which is less than n-i, then the polynomials do + /// not depend on X_i, so binding them to r_i has no effect. + fn bind>(inst: &mut T, remaining_variables: usize, r: &E::Scalar) { + let num_instance_variables = inst.size().log_2(); // m + if remaining_variables <= num_instance_variables { + inst.bound(r) + } + } + + /// Given an instance defined over m variables, the sum over n = + /// `remaining_variables` is equal to the initial claim scaled by + /// 2^{n-m}, when m ≤ n. + fn scaled_claims>(inst: &T, remaining_variables: usize) -> Vec { + let num_instance_variables = inst.size().log_2(); // m + let num_repetitions = 1 << (remaining_variables - num_instance_variables); + let scaling = E::Scalar::from(num_repetitions as u64); + inst.initial_claims().iter().map(|claim| scaling * claim).collect() + } +} + +impl> RelaxedR1CSSNARKTrait + for BatchedRelaxedR1CSSNARK +{ + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + >::ck_floor() + } + + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result { + >::initialize_pk(ck, vk_digest) + } + + fn setup( + ck: Arc>, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + >::setup(ck, vec![S]) + } + + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result { + let slice_U = slice::from_ref(U); + let slice_W = slice::from_ref(W); + + >::prove(ck, pk, vec![S], slice_U, slice_W) + } + + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let slice = slice::from_ref(U); + >::verify(self, vk, slice) + } +} diff --git a/src/spartan/macros.rs b/prover/src/spartan/macros.rs similarity index 100% rename from src/spartan/macros.rs rename to prover/src/spartan/macros.rs diff --git a/prover/src/spartan/math.rs b/prover/src/spartan/math.rs new file mode 100644 index 0000000..94b35c5 --- /dev/null +++ b/prover/src/spartan/math.rs @@ -0,0 +1,15 @@ +pub trait Math { + fn log_2(self) -> usize; +} + +impl Math for usize { + fn log_2(self) -> usize { + assert_ne!(self, 0); + + if self.is_power_of_two() { + (1usize.leading_zeros() - self.leading_zeros()) as Self + } else { + (0usize.leading_zeros() - self.leading_zeros()) as Self + } + } +} diff --git a/src/spartan/mod.rs b/prover/src/spartan/mod.rs similarity index 55% rename from src/spartan/mod.rs rename to prover/src/spartan/mod.rs index 2e0a078..84aeeca 100644 --- a/src/spartan/mod.rs +++ b/prover/src/spartan/mod.rs @@ -26,196 +26,167 @@ use rayon_scan::ScanParallelIterator as _; use ref_cast::RefCast; use crate::{ - r1cs::{R1CSShape, SparseMatrix}, - traits::Engine, - Commitment, + r1cs::{R1CSShape, SparseMatrix}, + traits::Engine, + Commitment, }; // Creates a vector of the first `n` powers of `s`. /// Creates a vector of the first `n` powers of `s`. pub fn powers(s: &F, n: usize) -> Vec { - assert!(n >= 1); - let mut v = vec![*s; n]; - v[0] = F::ONE; - v.into_par_iter().scan(|a, b| *a * *b, F::ONE).collect() + assert!(n >= 1); + let mut v = vec![*s; n]; + v[0] = F::ONE; + v.into_par_iter().scan(|a, b| *a * *b, F::ONE).collect() } /// A type that holds a witness to a polynomial evaluation instance #[repr(transparent)] #[derive(Debug, RefCast)] struct PolyEvalWitness { - p: Vec, // polynomial + p: Vec, // polynomial } impl PolyEvalWitness { - /// Given [Pᵢ] and s, compute P = ∑ᵢ sⁱ⋅Pᵢ - /// - /// # Details - /// - /// We allow the input polynomials to have different sizes, and interpret - /// smaller ones as being padded with 0 to the maximum size of all - /// polynomials. - fn batch_diff_size(W: &[&Self], s: E::Scalar) -> Self { - let powers = powers(&s, W.len()); - - let size_max = W.iter().map(|w| w.p.len()).max().unwrap(); - let p_vec = W.par_iter().map(|w| &w.p); - // Scale the input polynomials by the power of s - let p = zip_with!((p_vec, powers.par_iter()), |v, weight| { - // compute the weighted sum for each vector - v.iter() - .map(|&x| { - if *weight != E::Scalar::ONE { - x * *weight - } else { - x - } - }) - .collect::>() - }) - .reduce( - || vec![E::Scalar::ZERO; size_max], - |left, right| { - // Sum into the largest polynomial - let (mut big, small) = if left.len() > right.len() { - (left, right) - } else { - (right, left) - }; - - #[allow(clippy::disallowed_methods)] - big.par_iter_mut() - .zip(small.par_iter()) - .for_each(|(b, s)| *b += s); - - big - }, - ); - - Self { p } - } + /// Given [Pᵢ] and s, compute P = ∑ᵢ sⁱ⋅Pᵢ + /// + /// # Details + /// + /// We allow the input polynomials to have different sizes, and interpret + /// smaller ones as being padded with 0 to the maximum size of all + /// polynomials. + fn batch_diff_size(W: &[&Self], s: E::Scalar) -> Self { + let powers = powers(&s, W.len()); + + let size_max = W.iter().map(|w| w.p.len()).max().unwrap(); + let p_vec = W.par_iter().map(|w| &w.p); + // Scale the input polynomials by the power of s + let p = zip_with!((p_vec, powers.par_iter()), |v, weight| { + // compute the weighted sum for each vector + v.iter().map(|&x| if *weight != E::Scalar::ONE { x * *weight } else { x }).collect::>() + }) + .reduce( + || vec![E::Scalar::ZERO; size_max], + |left, right| { + // Sum into the largest polynomial + let (mut big, small) = if left.len() > right.len() { (left, right) } else { (right, left) }; + + #[allow(clippy::disallowed_methods)] + big.par_iter_mut().zip(small.par_iter()).for_each(|(b, s)| *b += s); + + big + }, + ); - /// Given a set of polynomials \[Pᵢ\] and a scalar `s`, this method computes - /// the weighted sum of the polynomials, where each polynomial Pᵢ is - /// scaled by sⁱ. - /// - /// # Panics - /// - /// This method panics if the polynomials in `p_vec` are not all of the same - /// length. - fn batch(p_vec: &[&Vec], s: &E::Scalar) -> Self { - p_vec - .iter() - .skip(1) - .for_each(|p| assert_eq!(p.len(), p_vec[0].len())); - let instances = p_vec.iter().map(|p| Self::ref_cast(p)).collect::>(); - Self::batch_diff_size(&instances, *s) - } + Self { p } + } + + /// Given a set of polynomials \[Pᵢ\] and a scalar `s`, this method computes + /// the weighted sum of the polynomials, where each polynomial Pᵢ is + /// scaled by sⁱ. + /// + /// # Panics + /// + /// This method panics if the polynomials in `p_vec` are not all of the same + /// length. + fn batch(p_vec: &[&Vec], s: &E::Scalar) -> Self { + p_vec.iter().skip(1).for_each(|p| assert_eq!(p.len(), p_vec[0].len())); + let instances = p_vec.iter().map(|p| Self::ref_cast(p)).collect::>(); + Self::batch_diff_size(&instances, *s) + } } /// A type that holds a polynomial evaluation instance #[derive(Debug)] struct PolyEvalInstance { - c: Commitment, // commitment to the polynomial - x: Vec, // evaluation point - e: E::Scalar, // claimed evaluation + c: Commitment, // commitment to the polynomial + x: Vec, // evaluation point + e: E::Scalar, // claimed evaluation } impl PolyEvalInstance { - fn batch_diff_size( - c_vec: &[Commitment], - e_vec: &[E::Scalar], - num_vars: &[usize], - x: Vec, - s: E::Scalar, - ) -> Self { - let num_instances = num_vars.len(); - assert_eq!(c_vec.len(), num_instances); - assert_eq!(e_vec.len(), num_instances); - - let num_vars_max = x.len(); - let powers: Vec = powers(&s, num_instances); - // Rescale evaluations by the first Lagrange polynomial, - // so that we can check its evaluation against x - let evals_scaled = zip_with!(iter, (e_vec, num_vars), |eval, num_rounds| { - // x_lo = [ x[0] , ..., x[n-nᵢ-1] ] - // x_hi = [ x[n-nᵢ], ..., x[n] ] - let (r_lo, _r_hi) = x.split_at(num_vars_max - num_rounds); - // Compute L₀(x_lo) - let lagrange_eval = r_lo - .iter() - .map(|r| E::Scalar::ONE - r) - .product::(); - - // vᵢ = L₀(x_lo)⋅Pᵢ(x_hi) - lagrange_eval * eval - }); - - // C = ∑ᵢ γⁱ⋅Cᵢ - let comm_joint = zip_with!(iter, (c_vec, powers), |c, g_i| *c * *g_i) - .fold(Commitment::::default(), |acc, item| acc + item); - - // v = ∑ᵢ γⁱ⋅vᵢ - let eval_joint = zip_with!((evals_scaled, powers.iter()), |e, g_i| e * g_i).sum(); - - Self { - c: comm_joint, - x, - e: eval_joint, - } - } - - fn batch( - c_vec: &[Commitment], - x: Vec, - e_vec: &[E::Scalar], - s: &E::Scalar, - ) -> Self { - let sizes = vec![x.len(); e_vec.len()]; - Self::batch_diff_size(c_vec, e_vec, &sizes, x, *s) - } + fn batch_diff_size( + c_vec: &[Commitment], + e_vec: &[E::Scalar], + num_vars: &[usize], + x: Vec, + s: E::Scalar, + ) -> Self { + let num_instances = num_vars.len(); + assert_eq!(c_vec.len(), num_instances); + assert_eq!(e_vec.len(), num_instances); + + let num_vars_max = x.len(); + let powers: Vec = powers(&s, num_instances); + // Rescale evaluations by the first Lagrange polynomial, + // so that we can check its evaluation against x + let evals_scaled = zip_with!(iter, (e_vec, num_vars), |eval, num_rounds| { + // x_lo = [ x[0] , ..., x[n-nᵢ-1] ] + // x_hi = [ x[n-nᵢ], ..., x[n] ] + let (r_lo, _r_hi) = x.split_at(num_vars_max - num_rounds); + // Compute L₀(x_lo) + let lagrange_eval = r_lo.iter().map(|r| E::Scalar::ONE - r).product::(); + + // vᵢ = L₀(x_lo)⋅Pᵢ(x_hi) + lagrange_eval * eval + }); + + // C = ∑ᵢ γⁱ⋅Cᵢ + let comm_joint = zip_with!(iter, (c_vec, powers), |c, g_i| *c * *g_i) + .fold(Commitment::::default(), |acc, item| acc + item); + + // v = ∑ᵢ γⁱ⋅vᵢ + let eval_joint = zip_with!((evals_scaled, powers.iter()), |e, g_i| e * g_i).sum(); + + Self { c: comm_joint, x, e: eval_joint } + } + + fn batch(c_vec: &[Commitment], x: Vec, e_vec: &[E::Scalar], s: &E::Scalar) -> Self { + let sizes = vec![x.len(); e_vec.len()]; + Self::batch_diff_size(c_vec, e_vec, &sizes, x, *s) + } } /// Binds "row" variables of (A, B, C) matrices viewed as 2d multilinear /// polynomials fn compute_eval_table_sparse( - S: &R1CSShape, - rx: &[E::Scalar], + S: &R1CSShape, + rx: &[E::Scalar], ) -> (Vec, Vec, Vec) { - assert_eq!(rx.len(), S.num_cons); - - let inner = |M: &SparseMatrix, M_evals: &mut Vec| { - for (row_idx, row) in M.iter_rows().enumerate() { - for (val, col_idx) in M.get_row(row) { - // TODO(@winston-h-zhang): Parallelize? Will need more complicated locking - M_evals[*col_idx] += rx[row_idx] * val; - } - } - }; - - let (A_evals, (B_evals, C_evals)) = rayon::join( + assert_eq!(rx.len(), S.num_cons); + + let inner = |M: &SparseMatrix, M_evals: &mut Vec| { + for (row_idx, row) in M.iter_rows().enumerate() { + for (val, col_idx) in M.get_row(row) { + // TODO(@winston-h-zhang): Parallelize? Will need more complicated locking + M_evals[*col_idx] += rx[row_idx] * val; + } + } + }; + + let (A_evals, (B_evals, C_evals)) = rayon::join( + || { + let mut A_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; + inner(&S.A, &mut A_evals); + A_evals + }, + || { + rayon::join( || { - let mut A_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; - inner(&S.A, &mut A_evals); - A_evals + let mut B_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; + inner(&S.B, &mut B_evals); + B_evals }, || { - rayon::join( - || { - let mut B_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; - inner(&S.B, &mut B_evals); - B_evals - }, - || { - let mut C_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; - inner(&S.C, &mut C_evals); - C_evals - }, - ) + let mut C_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; + inner(&S.C, &mut C_evals); + C_evals }, - ); + ) + }, + ); - (A_evals, B_evals, C_evals) + (A_evals, B_evals, C_evals) } // #[cfg(all(test, not(target_arch = "wasm32")))] diff --git a/prover/src/spartan/polys/eq.rs b/prover/src/spartan/polys/eq.rs new file mode 100644 index 0000000..70abe99 --- /dev/null +++ b/prover/src/spartan/polys/eq.rs @@ -0,0 +1,115 @@ +//! `EqPolynomial`: Represents multilinear extension of equality polynomials, +//! evaluated based on binary input values. + +use ff::PrimeField; +use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; + +/// Represents the multilinear extension polynomial (MLE) of the equality +/// polynomial $eq(x,e)$, denoted as $\tilde{eq}(x, e)$. +/// +/// The polynomial is defined by the formula: +/// $$ +/// \tilde{eq}(x, e) = \prod_{i=1}^m(e_i * x_i + (1 - e_i) * (1 - x_i)) +/// $$ +/// +/// Each element in the vector `r` corresponds to a component $e_i$, +/// representing a bit from the binary representation of an input value $e$. +/// This polynomial evaluates to 1 if every component $x_i$ equals its +/// corresponding $e_i$, and 0 otherwise. +/// +/// For instance, for e = 6 (with a binary representation of 0b110), the vector +/// r would be [1, 1, 0]. +#[derive(Debug)] +pub struct EqPolynomial { + pub(in crate::spartan) r: Vec, +} + +impl EqPolynomial { + /// Creates a new `EqPolynomial` from a vector of Scalars `r`. + /// + /// Each Scalar in `r` corresponds to a bit from the binary representation + /// of an input value `e`. + pub const fn new(r: Vec) -> Self { Self { r } } + + /// Evaluates the `EqPolynomial` at a given point `rx`. + /// + /// This function computes the value of the polynomial at the point + /// specified by `rx`. It expects `rx` to have the same length as the + /// internal vector `r`. + /// + /// Panics if `rx` and `r` have different lengths. + pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { + assert_eq!(self.r.len(), rx.len()); + (0..rx.len()) + .map(|i| self.r[i] * rx[i] + (Scalar::ONE - self.r[i]) * (Scalar::ONE - rx[i])) + .product() + } + + /// Evaluates the `EqPolynomial` at all the `2^|r|` points in its domain. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial + /// evaluation at a specific point. + #[must_use = "this returns an expensive vector and leaves self unchanged"] + pub fn evals(&self) -> Vec { Self::evals_from_points(&self.r) } + + /// Evaluates the `EqPolynomial` from the `2^|r|` points in its domain, + /// without creating an intermediate polynomial representation. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial + /// evaluation at a specific point. + pub fn evals_from_points(r: &[Scalar]) -> Vec { + let ell = r.len(); + let mut evals: Vec = vec![Scalar::ZERO; (2_usize).pow(ell as u32)]; + let mut size = 1; + evals[0] = Scalar::ONE; + + for r in r.iter().rev() { + let (evals_left, evals_right) = evals.split_at_mut(size); + let (evals_right, _) = evals_right.split_at_mut(size); + + evals_left.par_iter_mut().zip_eq(evals_right.par_iter_mut()).for_each(|(x, y)| { + *y = *x * r; + *x -= &*y; + }); + + size *= 2; + } + + evals + } +} + +impl FromIterator for EqPolynomial { + fn from_iter>(iter: I) -> Self { + let r: Vec<_> = iter.into_iter().collect(); + Self { r } + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use crate::provider; + + fn test_eq_polynomial_with() { + let eq_poly = EqPolynomial::::new(vec![F::ONE, F::ZERO, F::ONE]); + let y = eq_poly.evaluate(vec![F::ONE, F::ONE, F::ONE].as_slice()); + assert_eq!(y, F::ZERO); + + let y = eq_poly.evaluate(vec![F::ONE, F::ZERO, F::ONE].as_slice()); + assert_eq!(y, F::ONE); + + let eval_list = eq_poly.evals(); + for (i, &coeff) in eval_list.iter().enumerate().take((2_usize).pow(3)) { + if i == 5 { + assert_eq!(coeff, F::ONE); + } else { + assert_eq!(coeff, F::ZERO); + } + } + } + + #[test] + fn test_eq_polynomial() { test_eq_polynomial_with::(); } +} diff --git a/prover/src/spartan/polys/identity.rs b/prover/src/spartan/polys/identity.rs new file mode 100644 index 0000000..ce7db47 --- /dev/null +++ b/prover/src/spartan/polys/identity.rs @@ -0,0 +1,25 @@ +use core::marker::PhantomData; + +use ff::PrimeField; + +pub struct IdentityPolynomial { + ell: usize, + _p: PhantomData, +} + +impl IdentityPolynomial { + pub fn new(ell: usize) -> Self { Self { ell, _p: PhantomData } } + + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + assert_eq!(self.ell, r.len()); + let mut power_of_two = 1_u64; + (0..self.ell) + .rev() + .map(|i| { + let result = Scalar::from(power_of_two) * r[i]; + power_of_two *= 2; + result + }) + .sum() + } +} diff --git a/prover/src/spartan/polys/masked_eq.rs b/prover/src/spartan/polys/masked_eq.rs new file mode 100644 index 0000000..bca43b3 --- /dev/null +++ b/prover/src/spartan/polys/masked_eq.rs @@ -0,0 +1,131 @@ +//! `MaskedEqPolynomial`: Represents the `eq` polynomial over n variables, where +//! the first 2^m entries are 0. + +use ff::PrimeField; +use itertools::zip_eq; + +use crate::spartan::polys::eq::EqPolynomial; + +/// Represents the multilinear extension polynomial (MLE) of the equality +/// polynomial $eqₘ(x,r)$ over n variables, where the first 2^m evaluations are +/// 0. +/// +/// The polynomial is defined by the formula: +/// eqₘ(x,r) = eq(x,r) - ( ∏_{0 ≤ i < n-m} (1−rᵢ)(1−xᵢ) )⋅( ∏_{n-m ≤ i < n} +/// (1−rᵢ)(1−xᵢ) + rᵢ⋅xᵢ ) +#[derive(Debug)] +pub struct MaskedEqPolynomial<'a, Scalar> { + eq: &'a EqPolynomial, + num_masked_vars: usize, +} + +impl<'a, Scalar: PrimeField> MaskedEqPolynomial<'a, Scalar> { + /// Creates a new `MaskedEqPolynomial` from a vector of Scalars `r` of size + /// n, with the number of masked variables m = `num_masked_vars`. + pub const fn new(eq: &'a EqPolynomial, num_masked_vars: usize) -> Self { + MaskedEqPolynomial { eq, num_masked_vars } + } + + /// Evaluates the `MaskedEqPolynomial` at a given point `rx`. + /// + /// This function computes the value of the polynomial at the point + /// specified by `rx`. It expects `rx` to have the same length as the + /// internal vector `r`. + /// + /// Panics if `rx` and `r` have different lengths. + pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { + let r = &self.eq.r; + assert_eq!(r.len(), rx.len()); + let split_idx = r.len() - self.num_masked_vars; + + let (r_lo, r_hi) = r.split_at(split_idx); + let (rx_lo, rx_hi) = rx.split_at(split_idx); + let eq_lo = zip_eq(r_lo, rx_lo) + .map(|(r, rx)| *r * rx + (Scalar::ONE - r) * (Scalar::ONE - rx)) + .product::(); + let eq_hi = zip_eq(r_hi, rx_hi) + .map(|(r, rx)| *r * rx + (Scalar::ONE - r) * (Scalar::ONE - rx)) + .product::(); + let mask_lo = + zip_eq(r_lo, rx_lo).map(|(r, rx)| (Scalar::ONE - r) * (Scalar::ONE - rx)).product::(); + + (eq_lo - mask_lo) * eq_hi + } + + /// Evaluates the `MaskedEqPolynomial` at all the `2^|r|` points in its + /// domain. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial + /// evaluation at a specific point. + pub fn evals(&self) -> Vec { Self::evals_from_points(&self.eq.r, self.num_masked_vars) } + + /// Evaluates the `MaskedEqPolynomial` from the `2^|r|` points in its + /// domain, without creating an intermediate polynomial representation. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial + /// evaluation at a specific point. + fn evals_from_points(r: &[Scalar], num_masked_vars: usize) -> Vec { + let mut evals = EqPolynomial::evals_from_points(r); + + // replace the first 2^m evaluations with 0 + let num_masked_evals = 1 << num_masked_vars; + evals[..num_masked_evals].iter_mut().for_each(|e| *e = Scalar::ZERO); + + evals + } +} + +#[cfg(test)] +mod tests { + use rand_chacha::ChaCha20Rng; + use rand_core::{CryptoRng, RngCore, SeedableRng}; + + use super::*; + use crate::{provider, spartan::polys::eq::EqPolynomial}; + + fn test_masked_eq_polynomial_with( + num_vars: usize, + num_masked_vars: usize, + mut rng: &mut R, + ) { + let num_masked_evals = 1 << num_masked_vars; + + // random point + let r = std::iter::from_fn(|| Some(F::random(&mut rng))).take(num_vars).collect::>(); + // evaluation point + let rx = std::iter::from_fn(|| Some(F::random(&mut rng))).take(num_vars).collect::>(); + + let poly_eq = EqPolynomial::new(r); + let poly_eq_evals = poly_eq.evals(); + + let masked_eq_poly = MaskedEqPolynomial::new(&poly_eq, num_masked_vars); + let masked_eq_poly_evals = masked_eq_poly.evals(); + + // ensure the first 2^m entries are 0 + assert_eq!(masked_eq_poly_evals[..num_masked_evals], vec![F::ZERO; num_masked_evals]); + // ensure the remaining evaluations match eq(r) + assert_eq!(masked_eq_poly_evals[num_masked_evals..], poly_eq_evals[num_masked_evals..]); + + // compute the evaluation at rx succinctly + let masked_eq_eval = masked_eq_poly.evaluate(&rx); + + // compute the evaluation as a MLE + let rx_evals = EqPolynomial::evals_from_points(&rx); + let expected_masked_eq_eval = + zip_eq(rx_evals, masked_eq_poly_evals).map(|(rx, r)| rx * r).sum(); + + assert_eq!(masked_eq_eval, expected_masked_eq_eval); + } + + #[test] + fn test_masked_eq_polynomial() { + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + let num_vars = 5; + let num_masked_vars = 2; + test_masked_eq_polynomial_with::( + num_vars, + num_masked_vars, + &mut rng, + ); + } +} diff --git a/src/spartan/polys/mod.rs b/prover/src/spartan/polys/mod.rs similarity index 100% rename from src/spartan/polys/mod.rs rename to prover/src/spartan/polys/mod.rs diff --git a/prover/src/spartan/polys/multilinear.rs b/prover/src/spartan/polys/multilinear.rs new file mode 100644 index 0000000..1eb5176 --- /dev/null +++ b/prover/src/spartan/polys/multilinear.rs @@ -0,0 +1,295 @@ +//! Main components: +//! - `MultilinearPolynomial`: Dense representation of multilinear polynomials, represented by +//! evaluations over all possible binary inputs. +//! - `SparsePolynomial`: Efficient representation of sparse multilinear polynomials, storing only +//! non-zero evaluations. + +use std::ops::{Add, Index}; + +use ff::PrimeField; +use itertools::Itertools as _; +use rand_core::{CryptoRng, RngCore}; +use rayon::prelude::{ + IndexedParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelIterator, +}; +use serde::{Deserialize, Serialize}; + +use crate::spartan::{math::Math, polys::eq::EqPolynomial}; + +/// A multilinear extension of a polynomial $Z(\cdot)$, denote it as +/// $\tilde{Z}(x_1, ..., x_m)$ where the degree of each variable is at most one. +/// +/// This is the dense representation of a multilinear poynomial. +/// Let it be $\mathbb{G}(\cdot): \mathbb{F}^m \rightarrow \mathbb{F}$, it can +/// be represented uniquely by the list of evaluations of $\mathbb{G}(\cdot)$ +/// over the Boolean hypercube $\{0, 1\}^m$. +/// +/// For example, a 3 variables multilinear polynomial can be represented by +/// evaluation at points $[0, 2^3-1]$. +/// +/// The implementation follows +/// $$ +/// \tilde{Z}(x_1, ..., x_m) = \sum_{e\in {0,1}^m}Z(e) \cdot \prod_{i=1}^m(x_i +/// \cdot e_i + (1-x_i) \cdot (1-e_i)) $$ +/// +/// Vector $Z$ indicates $Z(e)$ where $e$ ranges from $0$ to $2^m-1$. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct MultilinearPolynomial { + num_vars: usize, // the number of variables in the multilinear polynomial + pub(crate) Z: Vec, /* evaluations of the polynomial in all the 2^num_vars Boolean + * inputs */ +} + +impl MultilinearPolynomial { + /// Creates a new `MultilinearPolynomial` from the given evaluations. + /// + /// # Panics + /// The number of evaluations must be a power of two. + pub fn new(Z: Vec) -> Self { + let num_vars = Z.len().log_2(); + assert_eq!(Z.len(), 1 << num_vars); + Self { num_vars, Z } + } + + /// evaluations of the polynomial in all the 2^num_vars Boolean inputs + pub fn evaluations(&self) -> &[Scalar] { &self.Z[..] } + + /// Returns the number of variables in the multilinear polynomial + pub const fn get_num_vars(&self) -> usize { self.num_vars } + + /// Returns the total number of evaluations. + pub fn len(&self) -> usize { self.Z.len() } + + /// Returns true if no evaluations. + pub fn is_empty(&self) -> bool { self.Z.len() == 0 } + + /// Returns a random polynomial + pub fn random(num_vars: usize, mut rng: &mut R) -> Self { + Self::new(std::iter::from_fn(|| Some(Scalar::random(&mut rng))).take(1 << num_vars).collect()) + } + + /// Binds the polynomial's top variable using the given scalar. + /// + /// This operation modifies the polynomial in-place. + pub fn bind_poly_var_top(&mut self, r: &Scalar) { + assert!(self.num_vars > 0); + + let n = self.len() / 2; + + let (left, right) = self.Z.split_at_mut(n); + + left.par_iter_mut().zip_eq(right.par_iter()).for_each(|(a, b)| { + *a += *r * (*b - *a); + }); + + self.Z.resize(n, Scalar::ZERO); + self.num_vars -= 1; + } + + /// Evaluates the polynomial at the given point. + /// Returns Z(r) in O(n) time. + /// + /// The point must have a value for each variable. + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + // r must have a value for each variable + assert_eq!(r.len(), self.get_num_vars()); + Self::evaluate_with(&self.Z, r) + } + + /// Evaluates the polynomial with the given evaluations and point. + pub fn evaluate_with(Z: &[Scalar], r: &[Scalar]) -> Scalar { + let chis = EqPolynomial::evals_from_points(r); + Self::evaluate_with_chis(Z, &chis) + } + + /// Evaluates the polynomial with the given evaluations and chi coefficients + pub fn evaluate_with_chis(Z: &[Scalar], chis: &[Scalar]) -> Scalar { + zip_with!(par_iter, (chis, Z), |a, b| *a * b).sum() + } +} + +impl Index for MultilinearPolynomial { + type Output = Scalar; + + #[inline(always)] + fn index(&self, _index: usize) -> &Scalar { &(self.Z[_index]) } +} + +/// Sparse multilinear polynomial, which means the $Z(\cdot)$ is zero at most +/// points. In our context, sparse polynomials are non-zeros over the hypercube +/// at locations that map to "small" integers We exploit this property to +/// implement a time-optimal algorithm +pub(crate) struct SparsePolynomial { + num_vars: usize, + Z: Vec, +} + +impl SparsePolynomial { + pub fn new(num_vars: usize, Z: Vec) -> Self { Self { num_vars, Z } } + + // a time-optimal algorithm to evaluate sparse polynomials + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + assert_eq!(self.num_vars, r.len()); + + let num_vars_z = self.Z.len().next_power_of_two().log_2(); + let chis = EqPolynomial::evals_from_points(&r[self.num_vars - 1 - num_vars_z..]); + #[allow(clippy::disallowed_methods)] + let eval_partial: Scalar = self.Z.iter().zip(chis.iter()).map(|(z, chi)| *z * *chi).sum(); + + let common = + (0..self.num_vars - 1 - num_vars_z).map(|i| (Scalar::ONE - r[i])).product::(); + + common * eval_partial + } +} + +/// Adds another multilinear polynomial to `self`. +/// Assumes the two polynomials have the same number of variables. +impl Add for MultilinearPolynomial { + type Output = Result; + + fn add(self, other: Self) -> Self::Output { + if self.get_num_vars() != other.get_num_vars() { + return Err("The two polynomials must have the same number of variables"); + } + + let sum: Vec = zip_with!(into_iter, (self.Z, other.Z), |a, b| a + b).collect(); + + Ok(Self::new(sum)) + } +} + +#[cfg(test)] +mod tests { + use rand_chacha::ChaCha20Rng; + use rand_core::SeedableRng; + + use super::*; + use crate::provider::bn256_grumpkin::bn256; + + fn make_mlp(len: usize, value: F) -> MultilinearPolynomial { + MultilinearPolynomial { num_vars: len.count_ones() as usize, Z: vec![value; len] } + } + + // fn test_multilinear_polynomial_with() { + // // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * + // x_3 // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, + // 0, 1, 0, 2]. + + // let TWO = F::from(2); + + // let Z = vec![ + // F::ZERO, + // F::ZERO, + // F::ZERO, + // F::ONE, + // F::ZERO, + // F::ONE, + // F::ZERO, + // TWO, + // ]; + // let m_poly = MultilinearPolynomial::::new(Z.clone()); + // assert_eq!(m_poly.get_num_vars(), 3); + + // let x = vec![F::ONE, F::ONE, F::ONE]; + // assert_eq!(m_poly.evaluate(x.as_slice()), TWO); + + // let y = MultilinearPolynomial::::evaluate_with(Z.as_slice(), + // x.as_slice()); assert_eq!(y, TWO); + // } + + // fn test_sparse_polynomial_with() { + // // Let the polynomial have 4 variables, but is non-zero at only 3 + // locations (out // of 2^4 = 16) over the hypercube + // let mut Z = vec![F::ONE, F::ONE, F::from(2)]; + // let m_poly = SparsePolynomial::::new(4, Z.clone()); + + // Z.resize(16, F::ZERO); // append with zeros to make it a dense polynomial + // let m_poly_dense = MultilinearPolynomial::new(Z); + + // // evaluation point + // let x = vec![F::from(5), F::from(8), F::from(5), F::from(3)]; + + // // check evaluations + // assert_eq!( + // m_poly.evaluate(x.as_slice()), + // m_poly_dense.evaluate(x.as_slice()) + // ); + // } + + fn test_mlp_add_with() { + let mlp1 = make_mlp(4, F::from(3)); + let mlp2 = make_mlp(4, F::from(7)); + + let mlp3 = mlp1.add(mlp2).unwrap(); + + assert_eq!(mlp3.Z, vec![F::from(10); 4]); + } + + #[test] + fn test_mlp_add() { test_mlp_add_with::(); } + + fn test_evaluation_with() { + let num_evals = 4; + let mut evals: Vec = Vec::with_capacity(num_evals); + for _ in 0..num_evals { + evals.push(F::from(8)); + } + let dense_poly: MultilinearPolynomial = MultilinearPolynomial::new(evals.clone()); + + // Evaluate at 3: + // (0, 0) = 1 + // (0, 1) = 1 + // (1, 0) = 1 + // (1, 1) = 1 + // g(x_0,x_1) => c_0*(1 - x_0)(1 - x_1) + c_1*(1-x_0)(x_1) + c_2*(x_0)(1-x_1) + + // c_3*(x_0)(x_1) g(3, 4) = 8*(1 - 3)(1 - 4) + 8*(1-3)(4) + 8*(3)(1-4) + + // 8*(3)(4) = 48 + -64 + -72 + 96 = 8 g(5, 10) = 8*(1 - 5)(1 - 10) + + // 8*(1 - 5)(10) + 8*(5)(1-10) + 8*(5)(10) = 96 + -16 + -72 + 96 = 8 + assert_eq!(dense_poly.evaluate(vec![F::from(3), F::from(4)].as_slice()), F::from(8)); + assert_eq!(dense_poly.evaluate(vec![F::from(5), F::from(10)].as_slice()), F::from(8)); + } + + #[test] + fn test_evaluation() { test_evaluation_with::(); } + + /// This binds the variables of a multilinear polynomial to a provided + /// sequence of values. + /// + /// Assuming `bind_poly_var_top` defines the "top" variable of the + /// polynomial, this aims to test whether variables should be provided + /// to the `evaluate` function in topmost-first (big endian) of + /// topmost-last (lower endian) order. + fn bind_sequence( + poly: &MultilinearPolynomial, + values: &[F], + ) -> MultilinearPolynomial { + // Assert that the size of the polynomial being evaluated is a power of 2 + // greater than (1 << values.len()) + assert!(poly.Z.len().is_power_of_two()); + assert!(poly.Z.len() >= 1 << values.len()); + + let mut tmp = poly.clone(); + for v in values.iter() { + tmp.bind_poly_var_top(v); + } + tmp + } + + fn bind_and_evaluate_with() { + for i in 0..50 { + // Initialize a random polynomial + let n = 7; + let mut rng = ChaCha20Rng::from_seed([i as u8; 32]); + let poly = MultilinearPolynomial::random(n, &mut rng); + + // draw a random point + let pt: Vec<_> = std::iter::from_fn(|| Some(F::random(&mut rng))).take(n).collect(); + // this shows the order in which coordinates are evaluated + assert_eq!(poly.evaluate(&pt), bind_sequence(&poly, &pt).Z[0]) + } + } + + #[test] + fn test_bind_and_evaluate() { bind_and_evaluate_with::(); } +} diff --git a/prover/src/spartan/polys/power.rs b/prover/src/spartan/polys/power.rs new file mode 100644 index 0000000..6e79700 --- /dev/null +++ b/prover/src/spartan/polys/power.rs @@ -0,0 +1,63 @@ +//! `PowPolynomial`: Represents multilinear extension of power polynomials + +use std::iter::successors; + +use ff::PrimeField; + +use crate::spartan::polys::eq::EqPolynomial; + +/// Represents the multilinear extension polynomial (MLE) of the equality +/// polynomial $pow(x,t)$, denoted as $\tilde{pow}(x, t)$. +/// +/// The polynomial is defined by the formula: +/// $$ +/// \tilde{power}(x, t) = \prod_{i=1}^m(1 + (t^{2^i} - 1) * x_i) +/// $$ +pub struct PowPolynomial { + eq: EqPolynomial, +} + +impl PowPolynomial { + /// Creates a new `PowPolynomial` from a Scalars `t`. + pub fn new(t: &Scalar, ell: usize) -> Self { + // t_pow = [t^{2^0}, t^{2^1}, ..., t^{2^{ell-1}}] + let t_pow = Self::squares(t, ell); + + Self { eq: EqPolynomial::new(t_pow) } + } + + /// Create powers the following powers of `t`: + /// [t^{2^0}, t^{2^1}, ..., t^{2^{ell-1}}] + pub fn squares(t: &Scalar, ell: usize) -> Vec { + successors(Some(*t), |p: &Scalar| Some(p.square())).take(ell).collect::>() + } + + /// Creates the evals corresponding to a `PowPolynomial` from an + /// already-existing vector of powers. `t_pow.len() > ell` must be true. + pub(crate) fn evals_with_powers(powers: &[Scalar], ell: usize) -> Vec { + let t_pow = powers[..ell].to_vec(); + EqPolynomial::evals_from_points(&t_pow) + } + + /// Evaluates the `PowPolynomial` at a given point `rx`. + /// + /// This function computes the value of the polynomial at the point + /// specified by `rx`. It expects `rx` to have the same length as the + /// internal vector `t_pow`. + /// + /// Panics if `rx` and `t_pow` have different lengths. + pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { self.eq.evaluate(rx) } + + pub fn coordinates(self) -> Vec { self.eq.r } + + /// Evaluates the `PowPolynomial` at all the `2^|t_pow|` points in its + /// domain. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial + /// evaluation at a specific point. + pub fn evals(&self) -> Vec { self.eq.evals() } +} + +impl From> for EqPolynomial { + fn from(polynomial: PowPolynomial) -> Self { polynomial.eq } +} diff --git a/prover/src/spartan/polys/univariate.rs b/prover/src/spartan/polys/univariate.rs new file mode 100644 index 0000000..7e4d687 --- /dev/null +++ b/prover/src/spartan/polys/univariate.rs @@ -0,0 +1,373 @@ +//! Main components: +//! - `UniPoly`: an univariate dense polynomial in coefficient form (big endian), +//! - `CompressedUniPoly`: a univariate dense polynomial, compressed (omitted linear term), in +//! coefficient form (little endian), +use std::{ + cmp::Ordering, + ops::{AddAssign, Index, IndexMut, MulAssign, SubAssign}, +}; + +use ff::PrimeField; +use rayon::prelude::{IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; +use ref_cast::RefCast; +use serde::{Deserialize, Serialize}; + +use crate::{ + provider::util::iterators::DoubleEndedIteratorExt as _, + traits::{Group, TranscriptReprTrait}, +}; + +// ax^2 + bx + c stored as vec![c, b, a] +// ax^3 + bx^2 + cx + d stored as vec![d, c, b, a] +#[derive(Debug, Clone, PartialEq, Eq, RefCast)] +#[repr(transparent)] +pub struct UniPoly { + pub coeffs: Vec, +} + +// ax^2 + bx + c stored as vec![c, a] +// ax^3 + bx^2 + cx + d stored as vec![d, c, a] +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct CompressedUniPoly { + coeffs_except_linear_term: Vec, +} + +impl UniPoly { + pub fn new(coeffs: Vec) -> Self { + let mut res = Self { coeffs }; + res.truncate_leading_zeros(); + res + } + + fn zero() -> Self { Self::new(Vec::new()) } + + /// Divide self by another polynomial, and returns the + /// quotient and remainder. + pub fn divide_with_q_and_r(&self, divisor: &Self) -> Option<(Self, Self)> { + if self.is_zero() { + Some((Self::zero(), Self::zero())) + } else if divisor.is_zero() { + None + } else if self.degree() < divisor.degree() { + Some((Self::zero(), self.clone())) + } else { + // Now we know that self.degree() >= divisor.degree(); + let mut quotient = vec![Scalar::ZERO; self.degree() - divisor.degree() + 1]; + let mut remainder: Self = self.clone(); + // Can unwrap here because we know self is not zero. + let divisor_leading_inv = divisor.leading_coefficient().unwrap().invert().unwrap(); + while !remainder.is_zero() && remainder.degree() >= divisor.degree() { + let cur_q_coeff = *remainder.leading_coefficient().unwrap() * divisor_leading_inv; + let cur_q_degree = remainder.degree() - divisor.degree(); + quotient[cur_q_degree] = cur_q_coeff; + + for (i, div_coeff) in divisor.coeffs.iter().enumerate() { + remainder.coeffs[cur_q_degree + i] -= &(cur_q_coeff * div_coeff); + } + while let Some(true) = remainder.coeffs.last().map(|c| c == &Scalar::ZERO) { + remainder.coeffs.pop(); + } + } + Some((Self::new(quotient), remainder)) + } + } + + /// Divides f(x) by x-a and returns quotient polynomial with no reminder + /// This is a common use case for polynomial divisions in KZG-based PCS. + pub fn divide_minus_u(&self, u: Scalar) -> Self { + if self.is_zero() { + Self::zero() + } else { + // On input f(x) and u compute the witness polynomial used to prove + // that f(u) = v. The main part of this is to compute the + // division (f(x) - f(u)) / (x - u), but we don't use a general + // division algorithm, we make use of the fact that the division + // never has a remainder, and that the denominator is always a linear + // polynomial. The cost is (d-1) mults + (d-1) adds in E::Scalar, where + // d is the degree of f. + // + // We use the fact that if we compute the quotient of f(x)/(x-u), + // there will be a remainder, but it'll be v = f(u). Put another way + // the quotient of f(x)/(x-u) and (f(x) - f(v))/(x-u) is the + // same. One advantage is that computing f(u) could be decoupled + // from kzg_open, it could be done later or separate from computing W. + + let d = self.coeffs.len(); + + // Compute h(x) = f(x)/(x - u) + let mut h = vec![Scalar::ZERO; d]; + for i in (1..d).rev() { + h[i - 1] = self.coeffs[i] + h[i] * u; + } + Self::new(h) + } + } + + fn is_zero(&self) -> bool { + self.coeffs.is_empty() || self.coeffs.iter().all(|c| c == &Scalar::ZERO) + } + + fn truncate_leading_zeros(&mut self) { + while self.coeffs.last().map_or(false, |c| c == &Scalar::ZERO) { + self.coeffs.pop(); + } + } + + fn leading_coefficient(&self) -> Option<&Scalar> { self.coeffs.last() } + + pub fn from_evals(evals: &[Scalar]) -> Self { + // we only support degree-2 or degree-3 univariate polynomials + assert!(evals.len() == 3 || evals.len() == 4); + let two_inv = Scalar::from(2).invert().unwrap(); + let coeffs = if evals.len() == 3 { + // ax^2 + bx + c + let c = evals[0]; + let a = two_inv * (evals[2] - evals[1] - evals[1] + c); + let b = evals[1] - c - a; + vec![c, b, a] + } else { + // ax^3 + bx^2 + cx + d + let six_inv = Scalar::from(6).invert().unwrap(); + + let d = evals[0]; + let a = six_inv + * (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]); + let b = two_inv + * (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1] + + evals[2] + + evals[2] + + evals[2] + + evals[2] + - evals[3]); + let c = evals[1] - d - a - b; + vec![d, c, b, a] + }; + + Self { coeffs } + } + + pub fn degree(&self) -> usize { self.coeffs.len() - 1 } + + pub fn eval_at_zero(&self) -> Scalar { self.coeffs[0] } + + pub fn eval_at_one(&self) -> Scalar { + (0..self.coeffs.len()).into_par_iter().map(|i| self.coeffs[i]).sum() + } + + pub fn evaluate(&self, r: &Scalar) -> Scalar { self.coeffs.iter().rlc(r) } + + pub fn compress(&self) -> CompressedUniPoly { + let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat(); + assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len()); + CompressedUniPoly { coeffs_except_linear_term } + } + + #[cfg(test)] + /// Returns a random polynomial + pub fn random(num_vars: usize, mut rng: &mut R) -> Self { + Self::new(std::iter::from_fn(|| Some(Scalar::random(&mut rng))).take(num_vars).collect()) + } +} + +impl CompressedUniPoly { + // we require eval(0) + eval(1) = hint, so we can solve for the linear term as: + // linear_term = hint - 2 * constant_term - deg2 term - deg3 term + pub fn decompress(&self, hint: &Scalar) -> UniPoly { + let mut linear_term = + *hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0]; + for i in 1..self.coeffs_except_linear_term.len() { + linear_term -= self.coeffs_except_linear_term[i]; + } + + let mut coeffs: Vec = Vec::new(); + coeffs.push(self.coeffs_except_linear_term[0]); + coeffs.push(linear_term); + coeffs.extend(&self.coeffs_except_linear_term[1..]); + assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len()); + UniPoly { coeffs } + } +} + +impl TranscriptReprTrait for UniPoly { + fn to_transcript_bytes(&self) -> Vec { + let coeffs = self.compress().coeffs_except_linear_term; + coeffs.iter().flat_map(|&t| t.to_repr().as_ref().to_vec()).collect::>() + } +} + +impl Index for UniPoly { + type Output = Scalar; + + fn index(&self, index: usize) -> &Self::Output { &self.coeffs[index] } +} + +impl IndexMut for UniPoly { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { &mut self.coeffs[index] } +} + +impl AddAssign<&Scalar> for UniPoly { + fn add_assign(&mut self, rhs: &Scalar) { self.coeffs.par_iter_mut().for_each(|c| *c += rhs); } +} + +impl MulAssign<&Scalar> for UniPoly { + fn mul_assign(&mut self, rhs: &Scalar) { self.coeffs.par_iter_mut().for_each(|c| *c *= rhs); } +} + +impl AddAssign<&Self> for UniPoly { + fn add_assign(&mut self, rhs: &Self) { + let ordering = self.coeffs.len().cmp(&rhs.coeffs.len()); + #[allow(clippy::disallowed_methods)] + for (lhs, rhs) in self.coeffs.iter_mut().zip(&rhs.coeffs) { + *lhs += rhs; + } + if matches!(ordering, Ordering::Less) { + self.coeffs.extend(rhs.coeffs[self.coeffs.len()..].iter().cloned()); + } + if matches!(ordering, Ordering::Equal) { + self.truncate_leading_zeros(); + } + } +} + +impl SubAssign<&Self> for UniPoly { + fn sub_assign(&mut self, rhs: &Self) { + let ordering = self.coeffs.len().cmp(&rhs.coeffs.len()); + #[allow(clippy::disallowed_methods)] + for (lhs, rhs) in self.coeffs.iter_mut().zip(&rhs.coeffs) { + *lhs -= rhs; + } + if matches!(ordering, Ordering::Less) { + self.coeffs.extend(rhs.coeffs[self.coeffs.len()..].iter().cloned()); + } + if matches!(ordering, Ordering::Equal) { + self.truncate_leading_zeros(); + } + } +} + +impl AsRef> for UniPoly { + fn as_ref(&self) -> &Vec { &self.coeffs } +} + +#[cfg(test)] +mod tests { + use rand::SeedableRng; + use rand_chacha::ChaCha20Rng; + + use super::*; + use crate::provider::bn256_grumpkin; + + fn test_from_evals_quad_with() { + // polynomial is 2x^2 + 3x + 1 + let e0 = F::ONE; + let e1 = F::from(6); + let e2 = F::from(15); + let evals = vec![e0, e1, e2]; + let poly = UniPoly::from_evals(&evals); + + assert_eq!(poly.eval_at_zero(), e0); + assert_eq!(poly.eval_at_one(), e1); + assert_eq!(poly.coeffs.len(), 3); + assert_eq!(poly.coeffs[0], F::ONE); + assert_eq!(poly.coeffs[1], F::from(3)); + assert_eq!(poly.coeffs[2], F::from(2)); + + let hint = e0 + e1; + let compressed_poly = poly.compress(); + let decompressed_poly = compressed_poly.decompress(&hint); + for i in 0..decompressed_poly.coeffs.len() { + assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); + } + + let e3 = F::from(28); + assert_eq!(poly.evaluate(&F::from(3)), e3); + } + + #[test] + fn test_from_evals_quad() { test_from_evals_quad_with::(); } + + fn test_from_evals_cubic_with() { + // polynomial is x^3 + 2x^2 + 3x + 1 + let e0 = F::ONE; + let e1 = F::from(7); + let e2 = F::from(23); + let e3 = F::from(55); + let evals = vec![e0, e1, e2, e3]; + let poly = UniPoly::from_evals(&evals); + + assert_eq!(poly.eval_at_zero(), e0); + assert_eq!(poly.eval_at_one(), e1); + assert_eq!(poly.coeffs.len(), 4); + + assert_eq!(poly.coeffs[1], F::from(3)); + assert_eq!(poly.coeffs[2], F::from(2)); + assert_eq!(poly.coeffs[3], F::from(1)); + + let hint = e0 + e1; + let compressed_poly = poly.compress(); + let decompressed_poly = compressed_poly.decompress(&hint); + for i in 0..decompressed_poly.coeffs.len() { + assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); + } + + let e4 = F::from(109); + assert_eq!(poly.evaluate(&F::from(4)), e4); + } + + #[test] + fn test_from_evals_cubic() { test_from_evals_cubic_with::(); } + + /// Perform a naive n^2 multiplication of `self` by `other`. + pub fn naive_mul(ours: &UniPoly, other: &UniPoly) -> UniPoly { + if ours.is_zero() || other.is_zero() { + UniPoly::zero() + } else { + let mut result = vec![F::ZERO; ours.degree() + other.degree() + 1]; + for (i, self_coeff) in ours.coeffs.iter().enumerate() { + for (j, other_coeff) in other.coeffs.iter().enumerate() { + result[i + j] += &(*self_coeff * other_coeff); + } + } + UniPoly::new(result) + } + } + + fn divide_polynomials_random() { + let rng = &mut ChaCha20Rng::from_seed([0u8; 32]); + + for a_degree in 0..50 { + for b_degree in 0..50 { + let dividend = UniPoly::::random(a_degree, rng); + let divisor = UniPoly::::random(b_degree, rng); + + if let Some((quotient, remainder)) = UniPoly::divide_with_q_and_r(÷nd, &divisor) { + let mut prod = naive_mul(&divisor, "ient); + prod += &remainder; + assert_eq!(dividend, prod) + } + } + } + } + + #[test] + fn test_divide_minus_u() { + fn test_inner() { + let rng = &mut ChaCha20Rng::from_seed([0u8; 32]); + let dividend = UniPoly::::random(50, rng); + let u = Fr::random(rng); + let divisor = UniPoly::new(vec![-u, Fr::ONE]); + + let (q1, _) = dividend.divide_with_q_and_r(&divisor).unwrap(); + let q2 = dividend.divide_minus_u(u); + + assert_eq!(q1, q2); + } + + test_inner::(); + } + + #[test] + fn test_divide_polynomials_random() { + divide_polynomials_random::(); + } +} diff --git a/prover/src/spartan/ppsnark.rs b/prover/src/spartan/ppsnark.rs new file mode 100644 index 0000000..485011d --- /dev/null +++ b/prover/src/spartan/ppsnark.rs @@ -0,0 +1,1036 @@ +//! This module implements `RelaxedR1CSSNARK` traits using a spark-based +//! approach to prove evaluations of sparse multilinear polynomials involved in +//! Spartan's sum-check protocol, thereby providing a preprocessing SNARK +//! The verifier in this preprocessing SNARK maintains a commitment to R1CS +//! matrices. This is beneficial when using a polynomial commitment scheme in +//! which the verifier's costs is succinct. This code includes experimental +//! optimizations to reduce runtimes and proof sizes. +use core::cmp::max; +use std::sync::Arc; + +use ff::Field; +use itertools::Itertools as _; +use once_cell::sync::OnceCell; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use super::polys::{masked_eq::MaskedEqPolynomial, multilinear::SparsePolynomial}; +use crate::{ + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, + spartan::{ + math::Math, + polys::{ + eq::EqPolynomial, + identity::IdentityPolynomial, + multilinear::MultilinearPolynomial, + power::PowPolynomial, + univariate::{CompressedUniPoly, UniPoly}, + }, + powers, + sumcheck::{ + engine::{ + InnerSumcheckInstance, MemorySumcheckInstance, OuterSumcheckInstance, SumcheckEngine, + WitnessBoundSumcheck, + }, + SumcheckProof, + }, + PolyEvalInstance, PolyEvalWitness, + }, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, + evaluation::EvaluationEngineTrait, + snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, + Engine, TranscriptEngineTrait, TranscriptReprTrait, + }, + zip_with, Commitment, CommitmentKey, CompressedCommitment, +}; + +fn padded(v: &[E::Scalar], n: usize, e: &E::Scalar) -> Vec { + let mut v_padded = vec![*e; n]; + v_padded[..v.len()].copy_from_slice(v); + v_padded +} + +/// A type that holds `R1CSShape` in a form amenable to memory checking +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct R1CSShapeSparkRepr { + pub(in crate::spartan) N: usize, // size of the vectors + + // dense representation + pub(in crate::spartan) row: Vec, + pub(in crate::spartan) col: Vec, + pub(in crate::spartan) val_A: Vec, + pub(in crate::spartan) val_B: Vec, + pub(in crate::spartan) val_C: Vec, + + // timestamp polynomials + pub(in crate::spartan) ts_row: Vec, + pub(in crate::spartan) ts_col: Vec, +} + +/// A type that holds a commitment to a sparse polynomial +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct R1CSShapeSparkCommitment { + pub(in crate::spartan) N: usize, // size of each vector + + // commitments to the dense representation + pub(in crate::spartan) comm_row: Commitment, + pub(in crate::spartan) comm_col: Commitment, + pub(in crate::spartan) comm_val_A: Commitment, + pub(in crate::spartan) comm_val_B: Commitment, + pub(in crate::spartan) comm_val_C: Commitment, + + // commitments to the timestamp polynomials + pub(in crate::spartan) comm_ts_row: Commitment, + pub(in crate::spartan) comm_ts_col: Commitment, +} + +impl TranscriptReprTrait for R1CSShapeSparkCommitment { + fn to_transcript_bytes(&self) -> Vec { + [ + self.comm_row, + self.comm_col, + self.comm_val_A, + self.comm_val_B, + self.comm_val_C, + self.comm_ts_row, + self.comm_ts_col, + ] + .as_slice() + .to_transcript_bytes() + } +} + +impl R1CSShapeSparkRepr { + /// represents `R1CSShape` in a Spark-friendly format amenable to memory + /// checking + pub fn new(S: &R1CSShape) -> Self { + let N = { + let total_nz = S.A.len() + S.B.len() + S.C.len(); + max(total_nz, max(2 * S.num_vars, S.num_cons)).next_power_of_two() + }; + + // we make col lookup into the last entry of z, so we commit to zeros + let (mut row, mut col, mut val_A, mut val_B, mut val_C) = ( + vec![0; N], + vec![N - 1; N], + vec![E::Scalar::ZERO; N], + vec![E::Scalar::ZERO; N], + vec![E::Scalar::ZERO; N], + ); + + for (i, entry) in S.A.iter().enumerate() { + let (r, c, v) = entry; + row[i] = r; + col[i] = c; + val_A[i] = v; + } + + let b_offset = S.A.len(); + for (i, entry) in S.B.iter().enumerate() { + let (r, c, v) = entry; + row[b_offset + i] = r; + col[b_offset + i] = c; + val_B[b_offset + i] = v; + } + + let c_offset = S.A.len() + S.B.len(); + for (i, entry) in S.C.iter().enumerate() { + let (r, c, v) = entry; + row[c_offset + i] = r; + col[c_offset + i] = c; + val_C[c_offset + i] = v; + } + + // timestamp calculation routine + let timestamp_calc = |num_ops: usize, num_cells: usize, addr_trace: &[usize]| -> Vec { + let mut ts = vec![0usize; num_cells]; + + assert!(num_ops >= addr_trace.len()); + for addr in addr_trace { + assert!(*addr < num_cells); + ts[*addr] += 1; + } + ts + }; + + // timestamp polynomials for row + let (ts_row, ts_col) = + rayon::join(|| timestamp_calc(N, N, &row), || timestamp_calc(N, N, &col)); + + // a routine to turn a vector of usize into a vector scalars + let to_vec_scalar = |v: &[usize]| -> Vec { + v.iter().map(|x| E::Scalar::from(*x as u64)).collect::>() + }; + + Self { + N, + + // dense representation + row: to_vec_scalar(&row), + col: to_vec_scalar(&col), + val_A, + val_B, + val_C, + + // timestamp polynomials + ts_row: to_vec_scalar(&ts_row), + ts_col: to_vec_scalar(&ts_col), + } + } + + pub(in crate::spartan) fn commit(&self, ck: &CommitmentKey) -> R1CSShapeSparkCommitment { + let comm_vec: Vec> = + [&self.row, &self.col, &self.val_A, &self.val_B, &self.val_C, &self.ts_row, &self.ts_col] + .par_iter() + .map(|v| E::CE::commit(ck, v)) + .collect(); + + R1CSShapeSparkCommitment { + N: self.row.len(), + comm_row: comm_vec[0], + comm_col: comm_vec[1], + comm_val_A: comm_vec[2], + comm_val_B: comm_vec[3], + comm_val_C: comm_vec[4], + comm_ts_row: comm_vec[5], + comm_ts_col: comm_vec[6], + } + } + + // computes evaluation oracles + fn evaluation_oracles( + &self, + S: &R1CSShape, + r_x: &E::Scalar, + z: &[E::Scalar], + ) -> (Vec, Vec, Vec, Vec) { + let mem_row = PowPolynomial::new(r_x, self.N.log_2()).evals(); + let mem_col = padded::(z, self.N, &E::Scalar::ZERO); + + let (L_row, L_col) = { + let mut L_row = vec![mem_row[0]; self.N]; // we place mem_row[0] since resized row is appended with 0s + let mut L_col = vec![mem_col[self.N - 1]; self.N]; // we place mem_col[N-1] since resized col is appended with N-1 + + for (i, (val_r, val_c)) in S + .A + .iter() + .chain(S.B.iter()) + .chain(S.C.iter()) + .map(|(r, c, _)| (mem_row[r], mem_col[c])) + .enumerate() + { + L_row[i] = val_r; + L_col[i] = val_c; + } + (L_row, L_col) + }; + + (mem_row, mem_col, L_row, L_col) + } +} + +/// A type that represents the prover's key +#[derive(Debug, Clone)] +pub struct ProverKey> { + pk_ee: EE::ProverKey, + S_repr: R1CSShapeSparkRepr, + S_comm: R1CSShapeSparkCommitment, + vk_digest: E::Scalar, // digest of verifier's key +} + +/// A type that represents the verifier's key +#[derive(Debug, Clone, Serialize)] +#[serde(bound = "EE::VerifierKey: Serialize")] +pub struct VerifierKey> { + num_cons: usize, + num_vars: usize, + vk_ee: EE::VerifierKey, + S_comm: R1CSShapeSparkCommitment, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} + +impl> SimpleDigestible for VerifierKey where EE::VerifierKey: Serialize {} + +/// A succinct proof of knowledge of a witness to a relaxed R1CS instance +/// The proof is produced using Spartan's combination of the sum-check and +/// the commitment to a vector viewed as a polynomial commitment +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RelaxedR1CSSNARK> { + // commitment to oracles: the first three are for Az, Bz, Cz, + // and the last two are for memory reads + comm_Az: CompressedCommitment, + comm_Bz: CompressedCommitment, + comm_Cz: CompressedCommitment, + comm_L_row: CompressedCommitment, + comm_L_col: CompressedCommitment, + + // commitments to aid the memory checks + comm_t_plus_r_inv_row: CompressedCommitment, + comm_w_plus_r_inv_row: CompressedCommitment, + comm_t_plus_r_inv_col: CompressedCommitment, + comm_w_plus_r_inv_col: CompressedCommitment, + + // claims about Az, Bz, and Cz polynomials + eval_Az_at_tau: E::Scalar, + eval_Bz_at_tau: E::Scalar, + eval_Cz_at_tau: E::Scalar, + + // sum-check + sc: SumcheckProof, + + // claims from the end of sum-check + eval_Az: E::Scalar, + eval_Bz: E::Scalar, + eval_Cz: E::Scalar, + eval_E: E::Scalar, + eval_L_row: E::Scalar, + eval_L_col: E::Scalar, + eval_val_A: E::Scalar, + eval_val_B: E::Scalar, + eval_val_C: E::Scalar, + + eval_W: E::Scalar, + + eval_t_plus_r_inv_row: E::Scalar, + eval_row: E::Scalar, // address + eval_w_plus_r_inv_row: E::Scalar, + eval_ts_row: E::Scalar, + + eval_t_plus_r_inv_col: E::Scalar, + eval_col: E::Scalar, // address + eval_w_plus_r_inv_col: E::Scalar, + eval_ts_col: E::Scalar, + + // a PCS evaluation argument + eval_arg: EE::EvaluationArgument, +} + +impl> RelaxedR1CSSNARK { + fn prove_helper( + mem: &mut T1, + outer: &mut T2, + inner: &mut T3, + witness: &mut T4, + transcript: &mut E::TE, + ) -> Result< + ( + SumcheckProof, + Vec, + Vec>, + Vec>, + Vec>, + Vec>, + ), + NovaError, + > + where + T1: SumcheckEngine, + T2: SumcheckEngine, + T3: SumcheckEngine, + T4: SumcheckEngine, + { + // sanity checks + assert_eq!(mem.size(), outer.size()); + assert_eq!(mem.size(), inner.size()); + assert_eq!(mem.size(), witness.size()); + assert_eq!(mem.degree(), outer.degree()); + assert_eq!(mem.degree(), inner.degree()); + assert_eq!(mem.degree(), witness.degree()); + + // these claims are already added to the transcript, so we do not need to add + let claims = mem + .initial_claims() + .into_iter() + .chain(outer.initial_claims()) + .chain(inner.initial_claims()) + .chain(witness.initial_claims()) + .collect::>(); + + let s = transcript.squeeze(b"r")?; + let coeffs = powers(&s, claims.len()); + + // compute the joint claim + let claim = zip_with!(iter, (claims, coeffs), |c_1, c_2| *c_1 * c_2).sum(); + + let mut e = claim; + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); + let num_rounds = mem.size().log_2(); + for _ in 0..num_rounds { + let ((evals_mem, evals_outer), (evals_inner, evals_witness)) = rayon::join( + || rayon::join(|| mem.evaluation_points(), || outer.evaluation_points()), + || rayon::join(|| inner.evaluation_points(), || witness.evaluation_points()), + ); + + let evals: Vec> = evals_mem + .into_iter() + .chain(evals_outer.into_iter()) + .chain(evals_inner.into_iter()) + .chain(evals_witness.into_iter()) + .collect::>>(); + assert_eq!(evals.len(), claims.len()); + + let evals_combined_0 = (0..evals.len()).map(|i| evals[i][0] * coeffs[i]).sum(); + let evals_combined_2 = (0..evals.len()).map(|i| evals[i][1] * coeffs[i]).sum(); + let evals_combined_3 = (0..evals.len()).map(|i| evals[i][2] * coeffs[i]).sum(); + + let evals = vec![evals_combined_0, e - evals_combined_0, evals_combined_2, evals_combined_3]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + let _ = rayon::join( + || rayon::join(|| mem.bound(&r_i), || outer.bound(&r_i)), + || rayon::join(|| inner.bound(&r_i), || witness.bound(&r_i)), + ); + + e = poly.evaluate(&r_i); + cubic_polys.push(poly.compress()); + } + + let mem_claims = mem.final_claims(); + let outer_claims = outer.final_claims(); + let inner_claims = inner.final_claims(); + let witness_claims = witness.final_claims(); + + Ok((SumcheckProof::new(cubic_polys), r, mem_claims, outer_claims, inner_claims, witness_claims)) + } +} + +impl> VerifierKey { + fn new( + num_cons: usize, + num_vars: usize, + S_comm: R1CSShapeSparkCommitment, + vk_ee: EE::VerifierKey, + ) -> Self { + Self { num_cons, num_vars, S_comm, vk_ee, digest: Default::default() } + } +} +impl> DigestHelperTrait for VerifierKey { + /// Returns the digest of the verifier's key + fn digest(&self) -> E::Scalar { + self + .digest + .get_or_try_init(|| { + let dc = DigestComputer::new(self); + dc.digest() + }) + .cloned() + .expect("Failure to retrieve digest!") + } +} + +impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK { + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + Box::new(|shape: &R1CSShape| -> usize { + // the commitment key should be large enough to commit to the R1CS matrices + shape.A.len() + shape.B.len() + shape.C.len() + }) + } + + fn initialize_pk( + _ck: Arc>, + _vk_digest: ::Scalar, + ) -> Result { + todo!("not implemented for nova snarks"); + } + + fn setup( + ck: Arc>, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + // check the provided commitment key meets minimal requirements + if ck.length() < Self::ck_floor()(S) { + return Err(NovaError::InvalidCommitmentKeyLength); + } + let (pk_ee, vk_ee) = EE::setup(ck.clone()); + + // pad the R1CS matrices + let S = S.pad(); + + let S_repr = R1CSShapeSparkRepr::new(&S); + let S_comm = S_repr.commit(&*ck); + + let vk = VerifierKey::new(S.num_cons, S.num_vars, S_comm.clone(), vk_ee); + + let pk = ProverKey { pk_ee, S_repr, S_comm, vk_digest: vk.digest() }; + + Ok((pk, vk)) + } + + /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance + #[tracing::instrument(skip_all, name = "PPSNARK::prove")] + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result { + // pad the R1CSShape + let S = S.pad(); + // sanity check that R1CSShape has all required size characteristics + assert!(S.is_regular_shape()); + + let W = W.pad(&S); // pad the witness + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); + + // append the verifier key (which includes commitment to R1CS matrices) and the + // RelaxedR1CSInstance to the transcript + transcript.absorb(b"vk", &pk.vk_digest); + transcript.absorb(b"U", U); + + // compute the full satisfying assignment by concatenating W.W, U.u, and U.X + let z = [W.W.clone(), vec![U.u], U.X.clone()].concat(); + + // compute Az, Bz, Cz + let (mut Az, mut Bz, mut Cz) = S.multiply_vec(&z)?; + + // commit to Az, Bz, Cz + let (comm_Az, (comm_Bz, comm_Cz)) = rayon::join( + || E::CE::commit(ck, &Az), + || rayon::join(|| E::CE::commit(ck, &Bz), || E::CE::commit(ck, &Cz)), + ); + + transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); + + // number of rounds of sum-check + let num_rounds_sc = pk.S_repr.N.log_2(); + let tau = transcript.squeeze(b"t")?; + let tau_coords = PowPolynomial::new(&tau, num_rounds_sc).coordinates(); + + // (1) send commitments to Az, Bz, and Cz along with their evaluations at tau + let (Az, Bz, Cz, W, E) = { + Az.resize(pk.S_repr.N, E::Scalar::ZERO); + Bz.resize(pk.S_repr.N, E::Scalar::ZERO); + Cz.resize(pk.S_repr.N, E::Scalar::ZERO); + let E = padded::(&W.E, pk.S_repr.N, &E::Scalar::ZERO); + let W = padded::(&W.W, pk.S_repr.N, &E::Scalar::ZERO); + + (Az, Bz, Cz, W, E) + }; + let chis_taus = EqPolynomial::evals_from_points(&tau_coords); + let (eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau) = { + let evals_at_tau = [&Az, &Bz, &Cz] + .into_par_iter() + .map(|p| MultilinearPolynomial::evaluate_with_chis(p, &chis_taus)) + .collect::>(); + (evals_at_tau[0], evals_at_tau[1], evals_at_tau[2]) + }; + + // (2) send commitments to the following two oracles + // L_row(i) = eq(tau, row(i)) for all i + // L_col(i) = z(col(i)) for all i + let (mem_row, mem_col, L_row, L_col) = pk.S_repr.evaluation_oracles(&S, &tau, &z); + let (comm_L_row, comm_L_col) = + rayon::join(|| E::CE::commit(ck, &L_row), || E::CE::commit(ck, &L_col)); + + // since all the three polynomials are opened at tau, + // we can combine them into a single polynomial opened at tau + let eval_vec = vec![eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau]; + + // absorb the claimed evaluations into the transcript + transcript.absorb(b"e", &eval_vec.as_slice()); + // absorb commitments to L_row and L_col in the transcript + transcript.absorb(b"e", &vec![comm_L_row, comm_L_col].as_slice()); + let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; + let poly_vec = vec![&Az, &Bz, &Cz]; + let c = transcript.squeeze(b"c")?; + let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); + let u: PolyEvalInstance = + PolyEvalInstance::batch(&comm_vec, tau_coords.clone(), &eval_vec, &c); + + // we now need to prove four claims + // (1) 0 = \sum_x poly_tau(x) * (poly_Az(x) * poly_Bz(x) - poly_uCz_E(x)), and + // eval_Az_at_tau + r * eval_Bz_at_tau + r^2 * eval_Cz_at_tau = + // (Az+r*Bz+r^2*Cz)(tau) (2) eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * + // eval_Cz_at_tau = \sum_y L_row(y) * (val_A(y) + c * val_B(y) + c^2 * val_C(y)) + // * L_col(y) (3) L_row(i) = eq(tau, row(i)) and L_col(i) = z(col(i)) + // (4) Check that the witness polynomial W is well-formed e.g., it is padded + // with only zeros + let gamma = transcript.squeeze(b"g")?; + let r = transcript.squeeze(b"r")?; + + let ((mut outer_sc_inst, mut inner_sc_inst), mem_res) = rayon::join( + || { + // a sum-check instance to prove the first claim + let outer_sc_inst = OuterSumcheckInstance::new( + PowPolynomial::new(&tau, num_rounds_sc).evals(), + Az.clone(), + Bz.clone(), + (0..Cz.len()).map(|i| U.u * Cz[i] + E[i]).collect::>(), + w.p.clone(), // Mz = Az + r * Bz + r^2 * Cz + &u.e, // eval_Az_at_tau + r * eval_Az_at_tau + r^2 * eval_Cz_at_tau + ); + + // a sum-check instance to prove the second claim + let val = zip_with!( + par_iter, + (pk.S_repr.val_A, pk.S_repr.val_B, pk.S_repr.val_C), + |v_a, v_b, v_c| *v_a + c * *v_b + c * c * *v_c + ) + .collect::>(); + let inner_sc_inst = InnerSumcheckInstance { + claim: eval_Az_at_tau + c * eval_Bz_at_tau + c * c * eval_Cz_at_tau, + poly_L_row: MultilinearPolynomial::new(L_row.clone()), + poly_L_col: MultilinearPolynomial::new(L_col.clone()), + poly_val: MultilinearPolynomial::new(val), + }; + + (outer_sc_inst, inner_sc_inst) + }, + || { + // a third sum-check instance to prove the read-only memory claim + // we now need to prove that L_row and L_col are well-formed + + // hash the tuples of (addr,val) memory contents and read responses into a + // single field element using `hash_func` + + let (comm_mem_oracles, mem_oracles, mem_aux) = + MemorySumcheckInstance::::compute_oracles( + ck, + &r, + &gamma, + &mem_row, + &pk.S_repr.row, + &L_row, + &pk.S_repr.ts_row, + &mem_col, + &pk.S_repr.col, + &L_col, + &pk.S_repr.ts_col, + )?; + // absorb the commitments + transcript.absorb(b"l", &comm_mem_oracles.as_slice()); + + let rho = transcript.squeeze(b"r")?; + let poly_eq = MultilinearPolynomial::new(PowPolynomial::new(&rho, num_rounds_sc).evals()); + + Ok::<_, NovaError>(( + MemorySumcheckInstance::new( + mem_oracles.clone(), + mem_aux, + poly_eq.Z, + pk.S_repr.ts_row.clone(), + pk.S_repr.ts_col.clone(), + ), + comm_mem_oracles, + mem_oracles, + )) + }, + ); + + let (mut mem_sc_inst, comm_mem_oracles, mem_oracles) = mem_res?; + + let mut witness_sc_inst = WitnessBoundSumcheck::new(tau, W.clone(), S.num_vars); + + let (sc, rand_sc, claims_mem, claims_outer, claims_inner, claims_witness) = Self::prove_helper( + &mut mem_sc_inst, + &mut outer_sc_inst, + &mut inner_sc_inst, + &mut witness_sc_inst, + &mut transcript, + )?; + + // claims from the end of the sum-check + let eval_Az = claims_outer[0][0]; + let eval_Bz = claims_outer[0][1]; + + let eval_L_row = claims_inner[0][0]; + let eval_L_col = claims_inner[0][1]; + + let eval_t_plus_r_inv_row = claims_mem[0][0]; + let eval_w_plus_r_inv_row = claims_mem[0][1]; + let eval_ts_row = claims_mem[0][2]; + + let eval_t_plus_r_inv_col = claims_mem[1][0]; + let eval_w_plus_r_inv_col = claims_mem[1][1]; + let eval_ts_col = claims_mem[1][2]; + let eval_W = claims_witness[0][0]; + + // compute the remaining claims that did not come for free from the sum-check + // prover + let (eval_Cz, eval_E, eval_val_A, eval_val_B, eval_val_C, eval_row, eval_col) = { + let e = [ + &Cz, + &E, + &pk.S_repr.val_A, + &pk.S_repr.val_B, + &pk.S_repr.val_C, + &pk.S_repr.row, + &pk.S_repr.col, + ] + .into_par_iter() + .map(|p| MultilinearPolynomial::evaluate_with(p, &rand_sc)) + .collect::>(); + (e[0], e[1], e[2], e[3], e[4], e[5], e[6]) + }; + + // all the evaluations are at rand_sc, we can fold them into one claim + let eval_vec = vec![ + eval_W, + eval_Az, + eval_Bz, + eval_Cz, + eval_E, + eval_L_row, + eval_L_col, + eval_val_A, + eval_val_B, + eval_val_C, + eval_t_plus_r_inv_row, + eval_row, + eval_w_plus_r_inv_row, + eval_ts_row, + eval_t_plus_r_inv_col, + eval_col, + eval_w_plus_r_inv_col, + eval_ts_col, + ]; + + let comm_vec = [ + U.comm_W, + comm_Az, + comm_Bz, + comm_Cz, + U.comm_E, + comm_L_row, + comm_L_col, + pk.S_comm.comm_val_A, + pk.S_comm.comm_val_B, + pk.S_comm.comm_val_C, + comm_mem_oracles[0], + pk.S_comm.comm_row, + comm_mem_oracles[1], + pk.S_comm.comm_ts_row, + comm_mem_oracles[2], + pk.S_comm.comm_col, + comm_mem_oracles[3], + pk.S_comm.comm_ts_col, + ]; + let poly_vec = [ + &W, + &Az, + &Bz, + &Cz, + &E, + &L_row, + &L_col, + &pk.S_repr.val_A, + &pk.S_repr.val_B, + &pk.S_repr.val_C, + mem_oracles[0].as_ref(), + &pk.S_repr.row, + mem_oracles[1].as_ref(), + &pk.S_repr.ts_row, + mem_oracles[2].as_ref(), + &pk.S_repr.col, + mem_oracles[3].as_ref(), + &pk.S_repr.ts_col, + ]; + transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript + let c = transcript.squeeze(b"c")?; + let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); + let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, rand_sc.clone(), &eval_vec, &c); + + let eval_arg = EE::prove(ck, &pk.pk_ee, &mut transcript, &u.c, &w.p, &rand_sc, &u.e)?; + + Ok(Self { + comm_Az: comm_Az.compress(), + comm_Bz: comm_Bz.compress(), + comm_Cz: comm_Cz.compress(), + comm_L_row: comm_L_row.compress(), + comm_L_col: comm_L_col.compress(), + + comm_t_plus_r_inv_row: comm_mem_oracles[0].compress(), + comm_w_plus_r_inv_row: comm_mem_oracles[1].compress(), + comm_t_plus_r_inv_col: comm_mem_oracles[2].compress(), + comm_w_plus_r_inv_col: comm_mem_oracles[3].compress(), + + eval_Az_at_tau, + eval_Bz_at_tau, + eval_Cz_at_tau, + + sc, + + eval_Az, + eval_Bz, + eval_Cz, + eval_E, + eval_L_row, + eval_L_col, + eval_val_A, + eval_val_B, + eval_val_C, + + eval_W, + + eval_t_plus_r_inv_row, + eval_row, + eval_w_plus_r_inv_row, + eval_ts_row, + + eval_col, + eval_t_plus_r_inv_col, + eval_w_plus_r_inv_col, + eval_ts_col, + + eval_arg, + }) + } + + /// verifies a proof of satisfiability of a `RelaxedR1CS` instance + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); + + // append the verifier key (including commitment to R1CS matrices) and the + // RelaxedR1CSInstance to the transcript + transcript.absorb(b"vk", &vk.digest()); + transcript.absorb(b"U", U); + + let comm_Az = Commitment::::decompress(&self.comm_Az)?; + let comm_Bz = Commitment::::decompress(&self.comm_Bz)?; + let comm_Cz = Commitment::::decompress(&self.comm_Cz)?; + let comm_L_row = Commitment::::decompress(&self.comm_L_row)?; + let comm_L_col = Commitment::::decompress(&self.comm_L_col)?; + let comm_t_plus_r_inv_row = Commitment::::decompress(&self.comm_t_plus_r_inv_row)?; + let comm_w_plus_r_inv_row = Commitment::::decompress(&self.comm_w_plus_r_inv_row)?; + let comm_t_plus_r_inv_col = Commitment::::decompress(&self.comm_t_plus_r_inv_col)?; + let comm_w_plus_r_inv_col = Commitment::::decompress(&self.comm_w_plus_r_inv_col)?; + + transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); + + let num_rounds_sc = vk.S_comm.N.log_2(); + let tau = transcript.squeeze(b"t")?; + let tau_coords = PowPolynomial::new(&tau, num_rounds_sc).coordinates(); + + // add claims about Az, Bz, and Cz to be checked later + // since all the three polynomials are opened at tau, + // we can combine them into a single polynomial opened at tau + let eval_vec = vec![self.eval_Az_at_tau, self.eval_Bz_at_tau, self.eval_Cz_at_tau]; + + transcript.absorb(b"e", &eval_vec.as_slice()); + + transcript.absorb(b"e", &vec![comm_L_row, comm_L_col].as_slice()); + let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; + let c = transcript.squeeze(b"c")?; + let u: PolyEvalInstance = + PolyEvalInstance::batch(&comm_vec, tau_coords.clone(), &eval_vec, &c); + let claim = u.e; + + let gamma = transcript.squeeze(b"g")?; + + let r = transcript.squeeze(b"r")?; + + transcript.absorb( + b"l", + &vec![ + comm_t_plus_r_inv_row, + comm_w_plus_r_inv_row, + comm_t_plus_r_inv_col, + comm_w_plus_r_inv_col, + ] + .as_slice(), + ); + + let rho = transcript.squeeze(b"r")?; + + let num_claims = 10; + let s = transcript.squeeze(b"r")?; + let coeffs = powers(&s, num_claims); + let claim = (coeffs[7] + coeffs[8]) * claim; // rest are zeros + + // verify sc + let (claim_sc_final, rand_sc) = self.sc.verify(claim, num_rounds_sc, 3, &mut transcript)?; + + // verify claim_sc_final + let claim_sc_final_expected = { + let rand_eq_bound_rand_sc = PowPolynomial::new(&rho, num_rounds_sc).evaluate(&rand_sc); + let eq_tau: EqPolynomial<_> = PowPolynomial::new(&tau, num_rounds_sc).into(); + + let taus_bound_rand_sc = eq_tau.evaluate(&rand_sc); + let taus_masked_bound_rand_sc = + MaskedEqPolynomial::new(&eq_tau, vk.num_vars.log_2()).evaluate(&rand_sc); + + let eval_t_plus_r_row = { + let eval_addr_row = IdentityPolynomial::new(num_rounds_sc).evaluate(&rand_sc); + let eval_val_row = taus_bound_rand_sc; + let eval_t = eval_addr_row + gamma * eval_val_row; + eval_t + r + }; + + let eval_w_plus_r_row = { + let eval_addr_row = self.eval_row; + let eval_val_row = self.eval_L_row; + let eval_w = eval_addr_row + gamma * eval_val_row; + eval_w + r + }; + + let eval_t_plus_r_col = { + let eval_addr_col = IdentityPolynomial::new(num_rounds_sc).evaluate(&rand_sc); + + // memory contents is z, so we compute eval_Z from eval_W and eval_X + let eval_val_col = { + // rand_sc was padded, so we now remove the padding + let (factor, rand_sc_unpad) = { + let l = vk.S_comm.N.log_2() - (2 * vk.num_vars).log_2(); + + let mut factor = E::Scalar::ONE; + for r_p in rand_sc.iter().take(l) { + factor *= E::Scalar::ONE - r_p + } + + let rand_sc_unpad = rand_sc[l..].to_vec(); + + (factor, rand_sc_unpad) + }; + + let eval_X = { + // public IO is (u, X) + let X = vec![U.u].into_iter().chain(U.X.iter().cloned()).collect::>(); + + // evaluate the sparse polynomial at rand_sc_unpad[1..] + let poly_X = SparsePolynomial::new(rand_sc_unpad.len() - 1, X); + poly_X.evaluate(&rand_sc_unpad[1..]) + }; + + self.eval_W + factor * rand_sc_unpad[0] * eval_X + }; + let eval_t = eval_addr_col + gamma * eval_val_col; + eval_t + r + }; + + let eval_w_plus_r_col = { + let eval_addr_col = self.eval_col; + let eval_val_col = self.eval_L_col; + let eval_w = eval_addr_col + gamma * eval_val_col; + eval_w + r + }; + + let claim_mem_final_expected: E::Scalar = coeffs[0] + * (self.eval_t_plus_r_inv_row - self.eval_w_plus_r_inv_row) + + coeffs[1] * (self.eval_t_plus_r_inv_col - self.eval_w_plus_r_inv_col) + + coeffs[2] + * (rand_eq_bound_rand_sc + * (self.eval_t_plus_r_inv_row * eval_t_plus_r_row - self.eval_ts_row)) + + coeffs[3] + * (rand_eq_bound_rand_sc + * (self.eval_w_plus_r_inv_row * eval_w_plus_r_row - E::Scalar::ONE)) + + coeffs[4] + * (rand_eq_bound_rand_sc + * (self.eval_t_plus_r_inv_col * eval_t_plus_r_col - self.eval_ts_col)) + + coeffs[5] + * (rand_eq_bound_rand_sc + * (self.eval_w_plus_r_inv_col * eval_w_plus_r_col - E::Scalar::ONE)); + + let claim_outer_final_expected = coeffs[6] + * taus_bound_rand_sc + * (self.eval_Az * self.eval_Bz - U.u * self.eval_Cz - self.eval_E) + + coeffs[7] * taus_bound_rand_sc * (self.eval_Az + c * self.eval_Bz + c * c * self.eval_Cz); + let claim_inner_final_expected = coeffs[8] + * self.eval_L_row + * self.eval_L_col + * (self.eval_val_A + c * self.eval_val_B + c * c * self.eval_val_C); + + let claim_witness_final_expected = coeffs[9] * taus_masked_bound_rand_sc * self.eval_W; + + claim_mem_final_expected + + claim_outer_final_expected + + claim_inner_final_expected + + claim_witness_final_expected + }; + + if claim_sc_final_expected != claim_sc_final { + return Err(NovaError::InvalidSumcheckProof); + } + + let eval_vec = vec![ + self.eval_W, + self.eval_Az, + self.eval_Bz, + self.eval_Cz, + self.eval_E, + self.eval_L_row, + self.eval_L_col, + self.eval_val_A, + self.eval_val_B, + self.eval_val_C, + self.eval_t_plus_r_inv_row, + self.eval_row, + self.eval_w_plus_r_inv_row, + self.eval_ts_row, + self.eval_t_plus_r_inv_col, + self.eval_col, + self.eval_w_plus_r_inv_col, + self.eval_ts_col, + ]; + + let comm_vec = [ + U.comm_W, + comm_Az, + comm_Bz, + comm_Cz, + U.comm_E, + comm_L_row, + comm_L_col, + vk.S_comm.comm_val_A, + vk.S_comm.comm_val_B, + vk.S_comm.comm_val_C, + comm_t_plus_r_inv_row, + vk.S_comm.comm_row, + comm_w_plus_r_inv_row, + vk.S_comm.comm_ts_row, + comm_t_plus_r_inv_col, + vk.S_comm.comm_col, + comm_w_plus_r_inv_col, + vk.S_comm.comm_ts_col, + ]; + transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript + let c = transcript.squeeze(b"c")?; + let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, rand_sc.clone(), &eval_vec, &c); + + // verify + EE::verify(&vk.vk_ee, &mut transcript, &u.c, &rand_sc, &u.e, &self.eval_arg)?; + + Ok(()) + } +} + +// #[cfg(test)] +// mod tests { +// use ff::Field; +// use pasta_curves::Fq as Scalar; + +// use super::*; +// use crate::provider::PallasEngine; + +// #[test] +// fn test_padded() { +// let mut rng = rand::thread_rng(); +// let e = Scalar::random(&mut rng); +// let v: Vec = (0..10).map(|_| Scalar::random(&mut +// rng)).collect(); let n = 20; + +// let result = padded::(&v, n, &e); + +// assert_eq!(result.len(), n); +// assert_eq!(&result[..10], &v[..]); +// assert!(result[10..].iter().all(|&i| i == e)); +// } +// } diff --git a/prover/src/spartan/snark.rs b/prover/src/spartan/snark.rs new file mode 100644 index 0000000..79acbb2 --- /dev/null +++ b/prover/src/spartan/snark.rs @@ -0,0 +1,502 @@ +//! This module implements `RelaxedR1CSSNARKTrait` using Spartan that is generic +//! over the polynomial commitment and evaluation argument (i.e., a PCS) +//! This version of Spartan does not use preprocessing so the verifier keeps the +//! entire description of R1CS matrices. This is essentially optimal for the +//! verifier when using an IPA-based polynomial commitment scheme. + +use std::sync::Arc; + +use ff::Field; +use itertools::Itertools as _; +use once_cell::sync::OnceCell; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use crate::{ + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness, SparseMatrix}, + spartan::{ + compute_eval_table_sparse, + polys::{ + eq::EqPolynomial, + multilinear::{MultilinearPolynomial, SparsePolynomial}, + power::PowPolynomial, + }, + powers, + sumcheck::SumcheckProof, + PolyEvalInstance, PolyEvalWitness, + }, + traits::{ + evaluation::EvaluationEngineTrait, + snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, + Engine, TranscriptEngineTrait, + }, + CommitmentKey, +}; + +/// A type that represents the prover's key +#[derive(Debug, Clone)] +pub struct ProverKey> { + pub pk_ee: EE::ProverKey, + pub vk_digest: E::Scalar, // digest of the verifier's key +} + +/// A type that represents the verifier's key +#[derive(Debug, Clone, Serialize)] +#[serde(bound = "")] +pub struct VerifierKey> { + vk_ee: EE::VerifierKey, + S: R1CSShape, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} + +impl> SimpleDigestible for VerifierKey {} + +impl> VerifierKey { + fn new(shape: R1CSShape, vk_ee: EE::VerifierKey) -> Self { + Self { vk_ee, S: shape, digest: OnceCell::new() } + } +} + +impl> DigestHelperTrait for VerifierKey { + /// Returns the digest of the verifier's key. + fn digest(&self) -> E::Scalar { + self + .digest + .get_or_try_init(|| { + let dc = DigestComputer::::new(self); + dc.digest() + }) + .cloned() + .expect("Failure to retrieve digest!") + } +} + +/// A succinct proof of knowledge of a witness to a relaxed R1CS instance +/// The proof is produced using Spartan's combination of the sum-check and +/// the commitment to a vector viewed as a polynomial commitment +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RelaxedR1CSSNARK> { + sc_proof_outer: SumcheckProof, + claims_outer: (E::Scalar, E::Scalar, E::Scalar), + eval_E: E::Scalar, + sc_proof_inner: SumcheckProof, + eval_W: E::Scalar, + sc_proof_batch: SumcheckProof, + evals_batch: Vec, + eval_arg: EE::EvaluationArgument, +} + +impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK { + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn initialize_pk( + ck: Arc>, + vk_digest: ::Scalar, + ) -> Result { + todo!("not implemented for nova snarks"); + } + + fn setup( + ck: Arc>, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + let (pk_ee, vk_ee) = EE::setup(ck); + + let S = S.pad(); + + let vk: VerifierKey = VerifierKey::new(S, vk_ee); + + let pk = ProverKey { pk_ee, vk_digest: vk.digest() }; + + Ok((pk, vk)) + } + + /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance + #[tracing::instrument(skip_all, name = "SNARK::prove")] + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result { + // pad the R1CSShape + let S = S.pad(); + // sanity check that R1CSShape has all required size characteristics + assert!(S.is_regular_shape()); + + let W = W.pad(&S); // pad the witness + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); + + // append the digest of vk (which includes R1CS matrices) and the + // RelaxedR1CSInstance to the transcript + transcript.absorb(b"vk", &pk.vk_digest); + transcript.absorb(b"U", U); + + // compute the full satisfying assignment by concatenating W.W, U.u, and U.X + let mut z = [W.W.clone(), vec![U.u], U.X.clone()].concat(); + + let (num_rounds_x, num_rounds_y) = ( + usize::try_from(S.num_cons.ilog2()).unwrap(), + (usize::try_from(S.num_vars.ilog2()).unwrap() + 1), + ); + + // outer sum-check + let tau: EqPolynomial<_> = PowPolynomial::new(&transcript.squeeze(b"t")?, num_rounds_x).into(); + + let mut poly_tau = MultilinearPolynomial::new(tau.evals()); + let (mut poly_Az, mut poly_Bz, poly_Cz, mut poly_uCz_E) = { + let (poly_Az, poly_Bz, poly_Cz) = S.multiply_vec(&z)?; + let poly_uCz_E = (0..S.num_cons) + .into_par_iter() + .map(|i| U.u * poly_Cz[i] + W.E[i]) + .collect::>(); + ( + MultilinearPolynomial::new(poly_Az), + MultilinearPolynomial::new(poly_Bz), + MultilinearPolynomial::new(poly_Cz), + MultilinearPolynomial::new(poly_uCz_E), + ) + }; + + let comb_func_outer = + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + let (sc_proof_outer, r_x, claims_outer) = SumcheckProof::prove_cubic_with_additive_term( + &E::Scalar::ZERO, // claim is zero + num_rounds_x, + &mut poly_tau, + &mut poly_Az, + &mut poly_Bz, + &mut poly_uCz_E, + comb_func_outer, + &mut transcript, + )?; + + // claims from the end of sum-check + let (claim_Az, claim_Bz): (E::Scalar, E::Scalar) = (claims_outer[1], claims_outer[2]); + let chis_r_x = EqPolynomial::evals_from_points(&r_x); + + let claim_Cz = MultilinearPolynomial::evaluate_with_chis(poly_Cz.evaluations(), &chis_r_x); + let eval_E = MultilinearPolynomial::evaluate_with_chis(&W.E, &chis_r_x); + transcript.absorb(b"claims_outer", &[claim_Az, claim_Bz, claim_Cz, eval_E].as_slice()); + + // inner sum-check + let r = transcript.squeeze(b"r")?; + let claim_inner_joint = claim_Az + r * claim_Bz + r * r * claim_Cz; + + let poly_ABC = { + // compute the initial evaluation table for R(\tau, x) + let evals_rx = EqPolynomial::evals_from_points(&r_x.clone()); + + let (evals_A, evals_B, evals_C) = compute_eval_table_sparse(&S, &evals_rx); + + assert_eq!(evals_A.len(), evals_B.len()); + assert_eq!(evals_A.len(), evals_C.len()); + (0..evals_A.len()) + .into_par_iter() + .map(|i| evals_A[i] + r * evals_B[i] + r * r * evals_C[i]) + .collect::>() + }; + + let poly_z = { + z.resize(S.num_vars * 2, E::Scalar::ZERO); + z + }; + + let comb_func = |poly_A_comp: &E::Scalar, poly_B_comp: &E::Scalar| -> E::Scalar { + *poly_A_comp * *poly_B_comp + }; + let (sc_proof_inner, r_y, _claims_inner) = SumcheckProof::prove_quad( + &claim_inner_joint, + num_rounds_y, + &mut MultilinearPolynomial::new(poly_ABC), + &mut MultilinearPolynomial::new(poly_z), + comb_func, + &mut transcript, + )?; + + // Add additional claims about W and E polynomials to the list from CC + // We will reduce a vector of claims of evaluations at different points into + // claims about them at the same point. For example, eval_W =? + // W(r_y[1..]) and eval_E =? E(r_x) into two claims: eval_W_prime =? + // W(rz) and eval_E_prime =? E(rz) We can them combine the two into one: + // eval_W_prime + gamma * eval_E_prime =? (W + gamma*E)(rz), where gamma + // is a public challenge Since commitments to W and E are homomorphic, + // the verifier can compute a commitment to the batched polynomial. + let eval_W = MultilinearPolynomial::evaluate_with(&W.W, &r_y[1..]); + + let w_vec = vec![PolyEvalWitness { p: W.W }, PolyEvalWitness { p: W.E }]; + let u_vec = + vec![PolyEvalInstance { c: U.comm_W, x: r_y[1..].to_vec(), e: eval_W }, PolyEvalInstance { + c: U.comm_E, + x: r_x, + e: eval_E, + }]; + + let (batched_u, batched_w, sc_proof_batch, claims_batch_left) = + batch_eval_reduce(u_vec, &w_vec, &mut transcript)?; + + let eval_arg = EE::prove( + ck, + &pk.pk_ee, + &mut transcript, + &batched_u.c, + &batched_w.p, + &batched_u.x, + &batched_u.e, + )?; + + Ok(Self { + sc_proof_outer, + claims_outer: (claim_Az, claim_Bz, claim_Cz), + eval_E, + sc_proof_inner, + eval_W, + sc_proof_batch, + evals_batch: claims_batch_left, + eval_arg, + }) + } + + /// verifies a proof of satisfiability of a `RelaxedR1CS` instance + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); + + // append the digest of R1CS matrices and the RelaxedR1CSInstance to the + // transcript + transcript.absorb(b"vk", &vk.digest()); + transcript.absorb(b"U", U); + + let (num_rounds_x, num_rounds_y) = ( + usize::try_from(vk.S.num_cons.ilog2()).unwrap(), + (usize::try_from(vk.S.num_vars.ilog2()).unwrap() + 1), + ); + + // outer sum-check + let tau: EqPolynomial<_> = PowPolynomial::new(&transcript.squeeze(b"t")?, num_rounds_x).into(); + + let (claim_outer_final, r_x) = + self.sc_proof_outer.verify(E::Scalar::ZERO, num_rounds_x, 3, &mut transcript)?; + + // verify claim_outer_final + let (claim_Az, claim_Bz, claim_Cz) = self.claims_outer; + let taus_bound_rx = tau.evaluate(&r_x); + let claim_outer_final_expected = + taus_bound_rx * (claim_Az * claim_Bz - U.u * claim_Cz - self.eval_E); + if claim_outer_final != claim_outer_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + transcript.absorb( + b"claims_outer", + &[self.claims_outer.0, self.claims_outer.1, self.claims_outer.2, self.eval_E].as_slice(), + ); + + // inner sum-check + let r = transcript.squeeze(b"r")?; + let claim_inner_joint = + self.claims_outer.0 + r * self.claims_outer.1 + r * r * self.claims_outer.2; + + let (claim_inner_final, r_y) = + self.sc_proof_inner.verify(claim_inner_joint, num_rounds_y, 2, &mut transcript)?; + + // verify claim_inner_final + let eval_Z = { + let eval_X = { + // public IO is (u, X) + let X = vec![U.u].into_iter().chain(U.X.iter().cloned()).collect::>(); + SparsePolynomial::new(usize::try_from(vk.S.num_vars.ilog2()).unwrap(), X) + .evaluate(&r_y[1..]) + }; + (E::Scalar::ONE - r_y[0]) * self.eval_W + r_y[0] * eval_X + }; + + // compute evaluations of R1CS matrices + let multi_evaluate = |M_vec: &[&SparseMatrix], + r_x: &[E::Scalar], + r_y: &[E::Scalar]| + -> Vec { + let evaluate_with_table = + |M: &SparseMatrix, T_x: &[E::Scalar], T_y: &[E::Scalar]| -> E::Scalar { + M.par_iter_rows() + .enumerate() + .map(|(row_idx, row)| { + M.get_row(row) + .map(|(val, col_idx)| T_x[row_idx] * T_y[*col_idx] * val) + .sum::() + }) + .sum() + }; + + let (T_x, T_y) = rayon::join( + || EqPolynomial::evals_from_points(r_x), + || EqPolynomial::evals_from_points(r_y), + ); + + (0..M_vec.len()).into_par_iter().map(|i| evaluate_with_table(M_vec[i], &T_x, &T_y)).collect() + }; + + let evals = multi_evaluate(&[&vk.S.A, &vk.S.B, &vk.S.C], &r_x, &r_y); + + let claim_inner_final_expected = (evals[0] + r * evals[1] + r * r * evals[2]) * eval_Z; + if claim_inner_final != claim_inner_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + // add claims about W and E polynomials + let u_vec: Vec> = vec![ + PolyEvalInstance { c: U.comm_W, x: r_y[1..].to_vec(), e: self.eval_W }, + PolyEvalInstance { c: U.comm_E, x: r_x, e: self.eval_E }, + ]; + + let batched_u = + batch_eval_verify(u_vec, &mut transcript, &self.sc_proof_batch, &self.evals_batch)?; + + // verify + EE::verify( + &vk.vk_ee, + &mut transcript, + &batched_u.c, + &batched_u.x, + &batched_u.e, + &self.eval_arg, + )?; + + Ok(()) + } +} + +/// Reduces a batch of polynomial evaluation claims using Sumcheck +/// to a single claim at the same point. +/// +/// # Details +/// +/// We are given as input a list of instance/witness pairs +/// u = [(Cᵢ, xᵢ, eᵢ)], w = [Pᵢ], such that +/// - nᵢ = |xᵢ| +/// - Cᵢ = Commit(Pᵢ) +/// - eᵢ = Pᵢ(xᵢ) +/// - |Pᵢ| = 2^nᵢ +/// +/// We allow the polynomial Pᵢ to have different sizes, by appropriately scaling +/// the claims and resulting evaluations from Sumcheck. +pub(in crate::spartan) fn batch_eval_reduce( + u_vec: Vec>, + w_vec: &[PolyEvalWitness], + transcript: &mut E::TE, +) -> Result<(PolyEvalInstance, PolyEvalWitness, SumcheckProof, Vec), NovaError> +{ + let num_claims = u_vec.len(); + assert_eq!(w_vec.len(), num_claims); + + // Compute nᵢ and n = maxᵢ{nᵢ} + let num_rounds = u_vec.iter().map(|u| u.x.len()).collect::>(); + + // Check polynomials match number of variables, i.e. |Pᵢ| = 2^nᵢ + zip_with_for_each!(iter, (w_vec, num_rounds), |w, num_vars| assert_eq!(w.p.len(), 1 << num_vars)); + + // generate a challenge, and powers of it for random linear combination + let rho = transcript.squeeze(b"r")?; + let powers_of_rho = powers(&rho, num_claims); + + let (claims, u_xs, comms): (Vec<_>, Vec<_>, Vec<_>) = + u_vec.into_iter().map(|u| (u.e, u.x, u.c)).multiunzip(); + + // Create clones of polynomials to be given to Sumcheck + // Pᵢ(X) + let polys_P: Vec> = + w_vec.iter().map(|w| MultilinearPolynomial::new(w.p.clone())).collect(); + // eq(xᵢ, X) + let polys_eq: Vec> = u_xs + .into_iter() + .map(|ux| MultilinearPolynomial::new(EqPolynomial::evals_from_points(&ux))) + .collect(); + + // For each i, check eᵢ = ∑ₓ Pᵢ(x)eq(xᵢ,x), where x ∈ {0,1}^nᵢ + let comb_func = |poly_P: &E::Scalar, poly_eq: &E::Scalar| -> E::Scalar { *poly_P * *poly_eq }; + let (sc_proof_batch, r, claims_batch) = SumcheckProof::prove_quad_batch( + &claims, + &num_rounds, + polys_P, + polys_eq, + &powers_of_rho, + comb_func, + transcript, + )?; + + let (claims_batch_left, _): (Vec, Vec) = claims_batch; + + transcript.absorb(b"l", &claims_batch_left.as_slice()); + + // we now combine evaluation claims at the same point r into one + let gamma = transcript.squeeze(b"g")?; + + let u_joint = + PolyEvalInstance::batch_diff_size(&comms, &claims_batch_left, &num_rounds, r, gamma); + + // P = ∑ᵢ γⁱ⋅Pᵢ + let w_joint = PolyEvalWitness::batch_diff_size(&w_vec.iter().by_ref().collect::>(), gamma); + + Ok((u_joint, w_joint, sc_proof_batch, claims_batch_left)) +} + +/// Verifies a batch of polynomial evaluation claims using Sumcheck +/// reducing them to a single claim at the same point. +pub(in crate::spartan) fn batch_eval_verify( + u_vec: Vec>, + transcript: &mut E::TE, + sc_proof_batch: &SumcheckProof, + evals_batch: &[E::Scalar], +) -> Result, NovaError> { + let num_claims = u_vec.len(); + assert_eq!(evals_batch.len(), num_claims); + + // generate a challenge + let rho = transcript.squeeze(b"r")?; + let powers_of_rho = powers(&rho, num_claims); + + // Compute nᵢ and n = maxᵢ{nᵢ} + let num_rounds = u_vec.iter().map(|u| u.x.len()).collect::>(); + let num_rounds_max = *num_rounds.iter().max().unwrap(); + + let claims = u_vec.iter().map(|u| u.e).collect::>(); + + let (claim_batch_final, r) = + sc_proof_batch.verify_batch(&claims, &num_rounds, &powers_of_rho, 2, transcript)?; + + let claim_batch_final_expected = { + let evals_r = u_vec.iter().map(|u| { + let (_, r_hi) = r.split_at(num_rounds_max - u.x.len()); + EqPolynomial::new(r_hi.to_vec()).evaluate(&u.x) + }); + + zip_with!((evals_r, evals_batch.iter(), powers_of_rho.iter()), |e_i, p_i, rho_i| e_i + * *p_i + * rho_i) + .sum() + }; + + if claim_batch_final != claim_batch_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + transcript.absorb(b"l", &evals_batch); + + // we now combine evaluation claims at the same point r into one + let gamma = transcript.squeeze(b"g")?; + + let comms = u_vec.into_iter().map(|u| u.c).collect::>(); + + let u_joint = PolyEvalInstance::batch_diff_size(&comms, evals_batch, &num_rounds, r, gamma); + + Ok(u_joint) +} diff --git a/prover/src/spartan/sumcheck/engine.rs b/prover/src/spartan/sumcheck/engine.rs new file mode 100644 index 0000000..bb1c4b1 --- /dev/null +++ b/prover/src/spartan/sumcheck/engine.rs @@ -0,0 +1,571 @@ +use ff::Field; +use rayon::prelude::*; + +use crate::{ + provider::util::field::batch_invert, + spartan::{ + math::Math, + polys::{ + eq::EqPolynomial, masked_eq::MaskedEqPolynomial, multilinear::MultilinearPolynomial, + power::PowPolynomial, + }, + sumcheck::SumcheckProof, + }, + traits::commitment::CommitmentEngineTrait, + Commitment, CommitmentKey, Engine, NovaError, +}; + +/// Defines a trait for implementing sum-check in a generic manner +pub trait SumcheckEngine: Send + Sync { + /// returns the initial claims + fn initial_claims(&self) -> Vec; + + /// degree of the sum-check polynomial + fn degree(&self) -> usize; + + /// the size of the polynomials + fn size(&self) -> usize; + + /// returns evaluation points at 0, 2, d-1 (where d is the degree of the + /// sum-check polynomial) + fn evaluation_points(&self) -> Vec>; + + /// bounds a variable in the constituent polynomials + fn bound(&mut self, r: &E::Scalar); + + /// returns the final claims + fn final_claims(&self) -> Vec>; +} + +/// The [`WitnessBoundSumcheck`] ensures that the witness polynomial W defined +/// over n = log(N) variables, is zero outside of the first `num_vars = 2^m` +/// entries. +/// +/// # Details +/// +/// The `W` polynomial is padded with zeros to size N = 2^n. +/// The `masked_eq` polynomials is defined as with regards to a random challenge +/// `tau` as the eq(tau) polynomial, where the first 2^m evaluations to 0. +/// +/// The instance is given by +/// `0 = ∑_{0≤i<2^n} masked_eq[i] * W[i]`. +/// It is equivalent to the expression +/// `0 = ∑_{2^m≤i<2^n} eq[i] * W[i]` +/// Since `eq` is random, the instance is only satisfied if `W[2^{m}..] = 0`. +pub(in crate::spartan) struct WitnessBoundSumcheck { + poly_W: MultilinearPolynomial, + poly_masked_eq: MultilinearPolynomial, +} + +impl WitnessBoundSumcheck { + pub fn new(tau: E::Scalar, poly_W_padded: Vec, num_vars: usize) -> Self { + let num_vars_log = num_vars.log_2(); + // When num_vars = num_rounds, we shouldn't have to prove anything + // but we still want this instance to compute the evaluation of W + let num_rounds = poly_W_padded.len().log_2(); + assert!(num_vars_log < num_rounds); + + let tau_coords = PowPolynomial::new(&tau, num_rounds).coordinates(); + let poly_masked_eq_evals = + MaskedEqPolynomial::new(&EqPolynomial::new(tau_coords), num_vars_log).evals(); + + Self { + poly_W: MultilinearPolynomial::new(poly_W_padded), + poly_masked_eq: MultilinearPolynomial::new(poly_masked_eq_evals), + } + } +} +impl SumcheckEngine for WitnessBoundSumcheck { + fn initial_claims(&self) -> Vec { vec![E::Scalar::ZERO] } + + fn degree(&self) -> usize { 3 } + + fn size(&self) -> usize { + assert_eq!(self.poly_W.len(), self.poly_masked_eq.len()); + self.poly_W.len() + } + + fn evaluation_points(&self) -> Vec> { + let comb_func = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + _: &E::Scalar| + -> E::Scalar { *poly_A_comp * *poly_B_comp }; + + let (eval_point_0, eval_point_2, eval_point_3) = SumcheckProof::::compute_eval_points_cubic( + &self.poly_masked_eq, + &self.poly_W, + &self.poly_W, // unused + &comb_func, + ); + + vec![vec![eval_point_0, eval_point_2, eval_point_3]] + } + + fn bound(&mut self, r: &E::Scalar) { + [&mut self.poly_W, &mut self.poly_masked_eq] + .par_iter_mut() + .for_each(|poly| poly.bind_poly_var_top(r)); + } + + fn final_claims(&self) -> Vec> { + vec![vec![self.poly_W[0], self.poly_masked_eq[0]]] + } +} + +pub(in crate::spartan) struct MemorySumcheckInstance { + // row + w_plus_r_row: MultilinearPolynomial, + t_plus_r_row: MultilinearPolynomial, + t_plus_r_inv_row: MultilinearPolynomial, + w_plus_r_inv_row: MultilinearPolynomial, + ts_row: MultilinearPolynomial, + + // col + w_plus_r_col: MultilinearPolynomial, + t_plus_r_col: MultilinearPolynomial, + t_plus_r_inv_col: MultilinearPolynomial, + w_plus_r_inv_col: MultilinearPolynomial, + ts_col: MultilinearPolynomial, + + // eq + poly_eq: MultilinearPolynomial, + + // zero polynomial + poly_zero: MultilinearPolynomial, +} + +impl MemorySumcheckInstance { + /// Computes witnesses for `MemoryInstanceSumcheck` + /// + /// # Description + /// We use the logUp protocol to prove that + /// ∑ TS[i]/(T[i] + r) - 1/(W[i] + r) = 0 + /// where + /// T_row[i] = mem_row[i] * gamma + i + /// = eq(tau)[i] * gamma + i + /// W_row[i] = L_row[i] * gamma + addr_row[i] + /// = eq(tau)[row[i]] * gamma + addr_row[i] + /// T_col[i] = mem_col[i] * gamma + i + /// = z[i] * gamma + i + /// W_col[i] = addr_col[i] * gamma + addr_col[i] + /// = z[col[i]] * gamma + addr_col[i] + /// and + /// `TS_row`, `TS_col` are integer-valued vectors representing the number + /// of reads to each memory cell of `L_row`, `L_col` + /// + /// The function returns oracles for the polynomials TS[i]/(T[i] + r), + /// 1/(W[i] + r), as well as auxiliary polynomials T[i] + r, W[i] + r + pub fn compute_oracles( + ck: &CommitmentKey, + r: &E::Scalar, + gamma: &E::Scalar, + mem_row: &[E::Scalar], + addr_row: &[E::Scalar], + L_row: &[E::Scalar], + ts_row: &[E::Scalar], + mem_col: &[E::Scalar], + addr_col: &[E::Scalar], + L_col: &[E::Scalar], + ts_col: &[E::Scalar], + ) -> Result<([Commitment; 4], [Vec; 4], [Vec; 4]), NovaError> { + // hash the tuples of (addr,val) memory contents and read responses into a + // single field element using `hash_func` + let hash_func_vec = |mem: &[E::Scalar], + addr: &[E::Scalar], + lookups: &[E::Scalar]| + -> (Vec, Vec) { + let hash_func = |addr: &E::Scalar, val: &E::Scalar| -> E::Scalar { *val * gamma + *addr }; + assert_eq!(addr.len(), lookups.len()); + rayon::join( + || { + (0..mem.len()) + .map(|i| hash_func(&E::Scalar::from(i as u64), &mem[i])) + .collect::>() + }, + || (0..addr.len()).map(|i| hash_func(&addr[i], &lookups[i])).collect::>(), + ) + }; + + let ((T_row, W_row), (T_col, W_col)) = rayon::join( + || hash_func_vec(mem_row, addr_row, L_row), + || hash_func_vec(mem_col, addr_col, L_col), + ); + + // compute vectors TS[i]/(T[i] + r) and 1/(W[i] + r) + let helper = |T: &[E::Scalar], + W: &[E::Scalar], + TS: &[E::Scalar], + r: &E::Scalar| + -> ( + (Result, NovaError>, Result, NovaError>), + (Vec, Vec), + ) { + rayon::join( + || { + rayon::join( + || { + let inv = batch_invert(T.par_iter().map(|e| *e + *r).collect::>())?; + + // compute inv[i] * TS[i] in parallel + Ok( + zip_with!((inv.into_par_iter(), TS.par_iter()), |e1, e2| e1 * *e2) + .collect::>(), + ) + }, + || batch_invert(W.par_iter().map(|e| *e + *r).collect::>()), + ) + }, + || { + rayon::join( + || T.par_iter().map(|e| *e + *r).collect(), + || W.par_iter().map(|e| *e + *r).collect(), + ) + }, + ) + }; + + let ( + ((t_plus_r_inv_row, w_plus_r_inv_row), (t_plus_r_row, w_plus_r_row)), + ((t_plus_r_inv_col, w_plus_r_inv_col), (t_plus_r_col, w_plus_r_col)), + ) = rayon::join(|| helper(&T_row, &W_row, ts_row, r), || helper(&T_col, &W_col, ts_col, r)); + + let t_plus_r_inv_row = t_plus_r_inv_row?; + let w_plus_r_inv_row = w_plus_r_inv_row?; + let t_plus_r_inv_col = t_plus_r_inv_col?; + let w_plus_r_inv_col = w_plus_r_inv_col?; + + let ( + (comm_t_plus_r_inv_row, comm_w_plus_r_inv_row), + (comm_t_plus_r_inv_col, comm_w_plus_r_inv_col), + ) = rayon::join( + || { + rayon::join( + || E::CE::commit(ck, &t_plus_r_inv_row), + || E::CE::commit(ck, &w_plus_r_inv_row), + ) + }, + || { + rayon::join( + || E::CE::commit(ck, &t_plus_r_inv_col), + || E::CE::commit(ck, &w_plus_r_inv_col), + ) + }, + ); + + let comm_vec = + [comm_t_plus_r_inv_row, comm_w_plus_r_inv_row, comm_t_plus_r_inv_col, comm_w_plus_r_inv_col]; + + let poly_vec = [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col]; + + let aux_poly_vec = [t_plus_r_row, w_plus_r_row, t_plus_r_col, w_plus_r_col]; + + Ok((comm_vec, poly_vec, aux_poly_vec)) + } + + pub fn new( + polys_oracle: [Vec; 4], + polys_aux: [Vec; 4], + poly_eq: Vec, + ts_row: Vec, + ts_col: Vec, + ) -> Self { + let [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] = polys_oracle; + let [t_plus_r_row, w_plus_r_row, t_plus_r_col, w_plus_r_col] = polys_aux; + + let zero = vec![E::Scalar::ZERO; poly_eq.len()]; + + Self { + w_plus_r_row: MultilinearPolynomial::new(w_plus_r_row), + t_plus_r_row: MultilinearPolynomial::new(t_plus_r_row), + t_plus_r_inv_row: MultilinearPolynomial::new(t_plus_r_inv_row), + w_plus_r_inv_row: MultilinearPolynomial::new(w_plus_r_inv_row), + ts_row: MultilinearPolynomial::new(ts_row), + w_plus_r_col: MultilinearPolynomial::new(w_plus_r_col), + t_plus_r_col: MultilinearPolynomial::new(t_plus_r_col), + t_plus_r_inv_col: MultilinearPolynomial::new(t_plus_r_inv_col), + w_plus_r_inv_col: MultilinearPolynomial::new(w_plus_r_inv_col), + ts_col: MultilinearPolynomial::new(ts_col), + poly_eq: MultilinearPolynomial::new(poly_eq), + poly_zero: MultilinearPolynomial::new(zero), + } + } +} + +impl SumcheckEngine for MemorySumcheckInstance { + fn initial_claims(&self) -> Vec { vec![E::Scalar::ZERO; 6] } + + fn degree(&self) -> usize { 3 } + + fn size(&self) -> usize { + // sanity checks + assert_eq!(self.w_plus_r_row.len(), self.t_plus_r_row.len()); + assert_eq!(self.w_plus_r_row.len(), self.ts_row.len()); + assert_eq!(self.w_plus_r_row.len(), self.w_plus_r_col.len()); + assert_eq!(self.w_plus_r_row.len(), self.t_plus_r_col.len()); + assert_eq!(self.w_plus_r_row.len(), self.ts_col.len()); + + self.w_plus_r_row.len() + } + + fn evaluation_points(&self) -> Vec> { + let comb_func = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + _poly_C_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp - *poly_B_comp }; + + let comb_func2 = + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + _poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - E::Scalar::ONE) }; + + let comb_func3 = + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + + // inv related evaluation points + // 0 = ∑ TS[i]/(T[i] + r) - 1/(W[i] + r) + let (eval_inv_0_row, eval_inv_2_row, eval_inv_3_row) = + SumcheckProof::::compute_eval_points_cubic( + &self.t_plus_r_inv_row, + &self.w_plus_r_inv_row, + &self.poly_zero, + &comb_func, + ); + + let (eval_inv_0_col, eval_inv_2_col, eval_inv_3_col) = + SumcheckProof::::compute_eval_points_cubic( + &self.t_plus_r_inv_col, + &self.w_plus_r_inv_col, + &self.poly_zero, + &comb_func, + ); + + // row related evaluation points + // 0 = ∑ eq[i] * (inv_T[i] * (T[i] + r) - TS[i])) + let (eval_T_0_row, eval_T_2_row, eval_T_3_row) = + SumcheckProof::::compute_eval_points_cubic_with_additive_term( + &self.poly_eq, + &self.t_plus_r_inv_row, + &self.t_plus_r_row, + &self.ts_row, + &comb_func3, + ); + // 0 = ∑ eq[i] * (inv_W[i] * (T[i] + r) - 1)) + let (eval_W_0_row, eval_W_2_row, eval_W_3_row) = + SumcheckProof::::compute_eval_points_cubic_with_additive_term( + &self.poly_eq, + &self.w_plus_r_inv_row, + &self.w_plus_r_row, + &self.poly_zero, + &comb_func2, + ); + + // column related evaluation points + let (eval_T_0_col, eval_T_2_col, eval_T_3_col) = + SumcheckProof::::compute_eval_points_cubic_with_additive_term( + &self.poly_eq, + &self.t_plus_r_inv_col, + &self.t_plus_r_col, + &self.ts_col, + &comb_func3, + ); + let (eval_W_0_col, eval_W_2_col, eval_W_3_col) = + SumcheckProof::::compute_eval_points_cubic_with_additive_term( + &self.poly_eq, + &self.w_plus_r_inv_col, + &self.w_plus_r_col, + &self.poly_zero, + &comb_func2, + ); + + vec![ + vec![eval_inv_0_row, eval_inv_2_row, eval_inv_3_row], + vec![eval_inv_0_col, eval_inv_2_col, eval_inv_3_col], + vec![eval_T_0_row, eval_T_2_row, eval_T_3_row], + vec![eval_W_0_row, eval_W_2_row, eval_W_3_row], + vec![eval_T_0_col, eval_T_2_col, eval_T_3_col], + vec![eval_W_0_col, eval_W_2_col, eval_W_3_col], + ] + } + + fn bound(&mut self, r: &E::Scalar) { + [ + &mut self.t_plus_r_row, + &mut self.t_plus_r_inv_row, + &mut self.w_plus_r_row, + &mut self.w_plus_r_inv_row, + &mut self.ts_row, + &mut self.t_plus_r_col, + &mut self.t_plus_r_inv_col, + &mut self.w_plus_r_col, + &mut self.w_plus_r_inv_col, + &mut self.ts_col, + &mut self.poly_eq, + ] + .par_iter_mut() + .for_each(|poly| poly.bind_poly_var_top(r)); + } + + fn final_claims(&self) -> Vec> { + let poly_row_final = vec![self.t_plus_r_inv_row[0], self.w_plus_r_inv_row[0], self.ts_row[0]]; + + let poly_col_final = vec![self.t_plus_r_inv_col[0], self.w_plus_r_inv_col[0], self.ts_col[0]]; + + vec![poly_row_final, poly_col_final] + } +} + +pub(in crate::spartan) struct OuterSumcheckInstance { + poly_tau: MultilinearPolynomial, + poly_Az: MultilinearPolynomial, + poly_Bz: MultilinearPolynomial, + poly_uCz_E: MultilinearPolynomial, + + poly_Mz: MultilinearPolynomial, + eval_Mz_at_tau: E::Scalar, + + poly_zero: MultilinearPolynomial, +} + +impl OuterSumcheckInstance { + pub fn new( + tau: Vec, + Az: Vec, + Bz: Vec, + uCz_E: Vec, + Mz: Vec, + eval_Mz_at_tau: &E::Scalar, + ) -> Self { + let zero = vec![E::Scalar::ZERO; tau.len()]; + Self { + poly_tau: MultilinearPolynomial::new(tau), + poly_Az: MultilinearPolynomial::new(Az), + poly_Bz: MultilinearPolynomial::new(Bz), + poly_uCz_E: MultilinearPolynomial::new(uCz_E), + poly_Mz: MultilinearPolynomial::new(Mz), + eval_Mz_at_tau: *eval_Mz_at_tau, + poly_zero: MultilinearPolynomial::new(zero), + } + } +} + +impl SumcheckEngine for OuterSumcheckInstance { + fn initial_claims(&self) -> Vec { vec![E::Scalar::ZERO, self.eval_Mz_at_tau] } + + fn degree(&self) -> usize { 3 } + + fn size(&self) -> usize { + assert_eq!(self.poly_tau.len(), self.poly_Az.len()); + assert_eq!(self.poly_tau.len(), self.poly_Bz.len()); + assert_eq!(self.poly_tau.len(), self.poly_uCz_E.len()); + assert_eq!(self.poly_tau.len(), self.poly_Mz.len()); + self.poly_tau.len() + } + + fn evaluation_points(&self) -> Vec> { + let comb_func = + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + + let (eval_point_h_0, eval_point_h_2, eval_point_h_3) = + SumcheckProof::::compute_eval_points_cubic_with_additive_term( + &self.poly_tau, + &self.poly_Az, + &self.poly_Bz, + &self.poly_uCz_E, + &comb_func, + ); + + let comb_func2 = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + _poly_C_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * *poly_B_comp }; + + let (eval_point_e_0, eval_point_e_2, eval_point_e_3) = + SumcheckProof::::compute_eval_points_cubic( + &self.poly_tau, + &self.poly_Mz, + &self.poly_zero, + &comb_func2, + ); + + vec![vec![eval_point_h_0, eval_point_h_2, eval_point_h_3], vec![ + eval_point_e_0, + eval_point_e_2, + eval_point_e_3, + ]] + } + + fn bound(&mut self, r: &E::Scalar) { + [ + &mut self.poly_tau, + &mut self.poly_Az, + &mut self.poly_Bz, + &mut self.poly_uCz_E, + &mut self.poly_Mz, + ] + .par_iter_mut() + .for_each(|poly| poly.bind_poly_var_top(r)); + } + + fn final_claims(&self) -> Vec> { vec![vec![self.poly_Az[0], self.poly_Bz[0]]] } +} + +pub(in crate::spartan) struct InnerSumcheckInstance { + pub(in crate::spartan) claim: E::Scalar, + pub(in crate::spartan) poly_L_row: MultilinearPolynomial, + pub(in crate::spartan) poly_L_col: MultilinearPolynomial, + pub(in crate::spartan) poly_val: MultilinearPolynomial, +} +impl InnerSumcheckInstance { + pub fn new( + claim: E::Scalar, + poly_L_row: MultilinearPolynomial, + poly_L_col: MultilinearPolynomial, + poly_val: MultilinearPolynomial, + ) -> Self { + Self { claim, poly_L_row, poly_L_col, poly_val } + } +} +impl SumcheckEngine for InnerSumcheckInstance { + fn initial_claims(&self) -> Vec { vec![self.claim] } + + fn degree(&self) -> usize { 3 } + + fn size(&self) -> usize { + assert_eq!(self.poly_L_row.len(), self.poly_val.len()); + assert_eq!(self.poly_L_row.len(), self.poly_L_col.len()); + self.poly_L_row.len() + } + + fn evaluation_points(&self) -> Vec> { + let (poly_A, poly_B, poly_C) = (&self.poly_L_row, &self.poly_L_col, &self.poly_val); + let comb_func = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; + + let (eval_point_0, eval_point_2, eval_point_3) = + SumcheckProof::::compute_eval_points_cubic(poly_A, poly_B, poly_C, &comb_func); + + vec![vec![eval_point_0, eval_point_2, eval_point_3]] + } + + fn bound(&mut self, r: &E::Scalar) { + [&mut self.poly_L_row, &mut self.poly_L_col, &mut self.poly_val] + .par_iter_mut() + .for_each(|poly| poly.bind_poly_var_top(r)); + } + + fn final_claims(&self) -> Vec> { + vec![vec![self.poly_L_row[0], self.poly_L_col[0]]] + } +} diff --git a/prover/src/spartan/sumcheck/mod.rs b/prover/src/spartan/sumcheck/mod.rs new file mode 100644 index 0000000..0cd7756 --- /dev/null +++ b/prover/src/spartan/sumcheck/mod.rs @@ -0,0 +1,542 @@ +use ff::Field; +use itertools::Itertools as _; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + spartan::polys::{ + multilinear::MultilinearPolynomial, + univariate::{CompressedUniPoly, UniPoly}, + }, + traits::{Engine, TranscriptEngineTrait}, +}; + +pub(in crate::spartan) mod engine; + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub(crate) struct SumcheckProof { + compressed_polys: Vec>, +} + +impl SumcheckProof { + pub fn new(compressed_polys: Vec>) -> Self { + Self { compressed_polys } + } + + pub fn verify( + &self, + claim: E::Scalar, + num_rounds: usize, + degree_bound: usize, + transcript: &mut E::TE, + ) -> Result<(E::Scalar, Vec), NovaError> { + let mut e = claim; + let mut r: Vec = Vec::new(); + + // verify that there is a univariate polynomial for each round + if self.compressed_polys.len() != num_rounds { + return Err(NovaError::InvalidSumcheckProof); + } + + for i in 0..self.compressed_polys.len() { + let poly = self.compressed_polys[i].decompress(&e); + + // verify degree bound + if poly.degree() != degree_bound { + return Err(NovaError::InvalidSumcheckProof); + } + + // we do not need to check if poly(0) + poly(1) = e, as + // decompress() call above already ensures that holds + debug_assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + + r.push(r_i); + + // evaluate the claimed degree-ell polynomial at r_i + e = poly.evaluate(&r_i); + } + + Ok((e, r)) + } + + pub fn verify_batch( + &self, + claims: &[E::Scalar], + num_rounds: &[usize], + coeffs: &[E::Scalar], + degree_bound: usize, + transcript: &mut E::TE, + ) -> Result<(E::Scalar, Vec), NovaError> { + let num_instances = claims.len(); + assert_eq!(num_rounds.len(), num_instances); + assert_eq!(coeffs.len(), num_instances); + + // n = maxᵢ{nᵢ} + let num_rounds_max = *num_rounds.iter().max().unwrap(); + + // Random linear combination of claims, + // where each claim is scaled by 2^{n-nᵢ} to account for the padding. + // + // claim = ∑ᵢ coeffᵢ⋅2^{n-nᵢ}⋅cᵢ + let claim = zip_with!( + ( + zip_with!(iter, (claims, num_rounds), |claim, num_rounds| { + let scaling_factor = 1 << (num_rounds_max - num_rounds); + E::Scalar::from(scaling_factor as u64) * claim + }), + coeffs.iter() + ), + |scaled_claim, coeff| scaled_claim * coeff + ) + .sum(); + + self.verify(claim, num_rounds_max, degree_bound, transcript) + } + + #[inline] + fn compute_eval_points_quad( + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, + comb_func: &F, + ) -> (E::Scalar, E::Scalar) + where + F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let len = poly_A.len() / 2; + (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let eval_point_2 = comb_func(&poly_A_bound_point, &poly_B_bound_point); + (eval_point_0, eval_point_2) + }) + .reduce(|| (E::Scalar::ZERO, E::Scalar::ZERO), |a, b| (a.0 + b.0, a.1 + b.1)) + } + + pub fn prove_quad( + claim: &E::Scalar, + num_rounds: usize, + poly_A: &mut MultilinearPolynomial, + poly_B: &mut MultilinearPolynomial, + comb_func: F, + transcript: &mut E::TE, + ) -> Result<(Self, Vec, Vec), NovaError> + where + F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let mut r: Vec = Vec::new(); + let mut polys: Vec> = Vec::new(); + let mut claim_per_round = *claim; + for _ in 0..num_rounds { + let poly = { + let (eval_point_0, eval_point_2) = + Self::compute_eval_points_quad(poly_A, poly_B, &comb_func); + + let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2]; + UniPoly::from_evals(&evals) + }; + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + polys.push(poly.compress()); + + // Set up next round + claim_per_round = poly.evaluate(&r_i); + + // bind all tables to the verifier's challenge + rayon::join(|| poly_A.bind_poly_var_top(&r_i), || poly_B.bind_poly_var_top(&r_i)); + } + + Ok((Self { compressed_polys: polys }, r, vec![poly_A[0], poly_B[0]])) + } + + pub fn prove_quad_batch( + claims: &[E::Scalar], + num_rounds: &[usize], + mut poly_A_vec: Vec>, + mut poly_B_vec: Vec>, + coeffs: &[E::Scalar], + comb_func: F, + transcript: &mut E::TE, + ) -> Result<(Self, Vec, (Vec, Vec)), NovaError> + where + F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let num_claims = claims.len(); + + assert_eq!(num_rounds.len(), num_claims); + assert_eq!(poly_A_vec.len(), num_claims); + assert_eq!(poly_B_vec.len(), num_claims); + assert_eq!(coeffs.len(), num_claims); + + for (i, &num_rounds) in num_rounds.iter().enumerate() { + let expected_size = 1 << num_rounds; + + // Direct indexing with the assumption that the index will always be in bounds + let a = &poly_A_vec[i]; + let b = &poly_B_vec[i]; + + for (l, polyname) in [(a.len(), "poly_A_vec"), (b.len(), "poly_B_vec")].iter() { + assert_eq!(*l, expected_size, "Mismatch in size for {} at index {}", polyname, i); + } + } + + let num_rounds_max = *num_rounds.iter().max().unwrap(); + let mut e = zip_with!(iter, (claims, num_rounds, coeffs), |claim, num_rounds, coeff| { + let scaled_claim = E::Scalar::from((1 << (num_rounds_max - num_rounds)) as u64) * claim; + scaled_claim * coeff + }) + .sum(); + let mut r: Vec = Vec::new(); + let mut quad_polys: Vec> = Vec::new(); + + for current_round in 0..num_rounds_max { + let remaining_rounds = num_rounds_max - current_round; + let evals: Vec<(E::Scalar, E::Scalar)> = zip_with!( + par_iter, + (num_rounds, claims, poly_A_vec, poly_B_vec), + |num_rounds, claim, poly_A, poly_B| { + if remaining_rounds <= *num_rounds { + Self::compute_eval_points_quad(poly_A, poly_B, &comb_func) + } else { + let remaining_variables = remaining_rounds - num_rounds - 1; + let scaled_claim = E::Scalar::from((1 << remaining_variables) as u64) * claim; + (scaled_claim, scaled_claim) + } + } + ) + .collect(); + + let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum(); + let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum(); + + let evals = vec![evals_combined_0, e - evals_combined_0, evals_combined_2]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + // bound all tables to the verifier's challenge + zip_with_for_each!( + (num_rounds.par_iter(), poly_A_vec.par_iter_mut(), poly_B_vec.par_iter_mut()), + |num_rounds, poly_A, poly_B| { + if remaining_rounds <= *num_rounds { + let _ = + rayon::join(|| poly_A.bind_poly_var_top(&r_i), || poly_B.bind_poly_var_top(&r_i)); + } + } + ); + + e = poly.evaluate(&r_i); + quad_polys.push(poly.compress()); + } + poly_A_vec.iter().for_each(|p| assert_eq!(p.len(), 1)); + poly_B_vec.iter().for_each(|p| assert_eq!(p.len(), 1)); + + let poly_A_final = poly_A_vec.into_iter().map(|poly| poly[0]).collect::>(); + let poly_B_final = poly_B_vec.into_iter().map(|poly| poly[0]).collect::>(); + + let eval_expected = + zip_with!(iter, (poly_A_final, poly_B_final, coeffs), |eA, eB, coeff| comb_func(eA, eB) + * coeff) + .sum::(); + assert_eq!(e, eval_expected); + + let claims_prod = (poly_A_final, poly_B_final); + + Ok((Self::new(quad_polys), r, claims_prod)) + } + + #[inline] + fn compute_eval_points_cubic( + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, + poly_C: &MultilinearPolynomial, + comb_func: &F, + ) -> (E::Scalar, E::Scalar, E::Scalar) + where + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let len = poly_A.len() / 2; + (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); + + let poly_A_right_term = poly_A[len + i] - poly_A[i]; + let poly_B_right_term = poly_B[len + i] - poly_B[i]; + let poly_C_right_term = poly_C[len + i] - poly_C[i]; + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A_right_term; + let poly_B_bound_point = poly_B[len + i] + poly_B_right_term; + let poly_C_bound_point = poly_C[len + i] + poly_C_right_term; + let eval_point_2 = comb_func(&poly_A_bound_point, &poly_B_bound_point, &poly_C_bound_point); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with + // bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A_right_term; + let poly_B_bound_point = poly_B_bound_point + poly_B_right_term; + let poly_C_bound_point = poly_C_bound_point + poly_C_right_term; + let eval_point_3 = comb_func(&poly_A_bound_point, &poly_B_bound_point, &poly_C_bound_point); + (eval_point_0, eval_point_2, eval_point_3) + }) + .reduce( + || (E::Scalar::ZERO, E::Scalar::ZERO, E::Scalar::ZERO), + |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), + ) + } + + #[inline] + fn compute_eval_points_cubic_with_additive_term( + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, + poly_C: &MultilinearPolynomial, + poly_D: &MultilinearPolynomial, + comb_func: &F, + ) -> (E::Scalar, E::Scalar, E::Scalar) + where + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let len = poly_A.len() / 2; + (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); + + let poly_A_right_term = poly_A[len + i] - poly_A[i]; + let poly_B_right_term = poly_B[len + i] - poly_B[i]; + let poly_C_right_term = poly_C[len + i] - poly_C[i]; + let poly_D_right_term = poly_D[len + i] - poly_D[i]; + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A_right_term; + let poly_B_bound_point = poly_B[len + i] + poly_B_right_term; + let poly_C_bound_point = poly_C[len + i] + poly_C_right_term; + let poly_D_bound_point = poly_D[len + i] + poly_D_right_term; + let eval_point_2 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with + // bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A_right_term; + let poly_B_bound_point = poly_B_bound_point + poly_B_right_term; + let poly_C_bound_point = poly_C_bound_point + poly_C_right_term; + let poly_D_bound_point = poly_D_bound_point + poly_D_right_term; + let eval_point_3 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + (eval_point_0, eval_point_2, eval_point_3) + }) + .reduce( + || (E::Scalar::ZERO, E::Scalar::ZERO, E::Scalar::ZERO), + |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), + ) + } + + pub fn prove_cubic_with_additive_term( + claim: &E::Scalar, + num_rounds: usize, + poly_A: &mut MultilinearPolynomial, + poly_B: &mut MultilinearPolynomial, + poly_C: &mut MultilinearPolynomial, + poly_D: &mut MultilinearPolynomial, + comb_func: F, + transcript: &mut E::TE, + ) -> Result<(Self, Vec, Vec), NovaError> + where + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let mut r: Vec = Vec::new(); + let mut polys: Vec> = Vec::new(); + let mut claim_per_round = *claim; + + for _ in 0..num_rounds { + let poly = { + // Make an iterator returning the contributions to the evaluations + let (eval_point_0, eval_point_2, eval_point_3) = + Self::compute_eval_points_cubic_with_additive_term( + poly_A, poly_B, poly_C, poly_D, &comb_func, + ); + + let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2, eval_point_3]; + UniPoly::from_evals(&evals) + }; + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + polys.push(poly.compress()); + + // Set up next round + claim_per_round = poly.evaluate(&r_i); + + // bound all tables to the verifier's challenge + rayon::join( + || rayon::join(|| poly_A.bind_poly_var_top(&r_i), || poly_B.bind_poly_var_top(&r_i)), + || rayon::join(|| poly_C.bind_poly_var_top(&r_i), || poly_D.bind_poly_var_top(&r_i)), + ); + } + + Ok((Self { compressed_polys: polys }, r, vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]])) + } + + pub fn prove_cubic_with_additive_term_batch( + claims: &[E::Scalar], + num_rounds: &[usize], + mut poly_A_vec: Vec>, + mut poly_B_vec: Vec>, + mut poly_C_vec: Vec>, + mut poly_D_vec: Vec>, + coeffs: &[E::Scalar], + comb_func: F, + transcript: &mut E::TE, + ) -> Result<(Self, Vec, Vec>), NovaError> + where + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let num_instances = claims.len(); + assert_eq!(num_rounds.len(), num_instances); + assert_eq!(coeffs.len(), num_instances); + assert_eq!(poly_A_vec.len(), num_instances); + assert_eq!(poly_B_vec.len(), num_instances); + assert_eq!(poly_C_vec.len(), num_instances); + assert_eq!(poly_D_vec.len(), num_instances); + + for (i, &num_rounds) in num_rounds.iter().enumerate() { + let expected_size = 1 << num_rounds; + + // Direct indexing with the assumption that the index will always be in bounds + let a = &poly_A_vec[i]; + let b = &poly_B_vec[i]; + let c = &poly_C_vec[i]; + let d = &poly_D_vec[i]; + + for (l, polyname) in + [(a.len(), "poly_A"), (b.len(), "poly_B"), (c.len(), "poly_C"), (d.len(), "poly_D")].iter() + { + assert_eq!(*l, expected_size, "Mismatch in size for {} at index {}", polyname, i); + } + } + + let num_rounds_max = *num_rounds.iter().max().unwrap(); + + let mut r: Vec = Vec::new(); + let mut polys: Vec> = Vec::new(); + let mut claim_per_round = + zip_with!(iter, (claims, num_rounds, coeffs), |claim, num_rounds, coeff| { + let scaled_claim = E::Scalar::from((1 << (num_rounds_max - num_rounds)) as u64) * claim; + scaled_claim * *coeff + }) + .sum(); + + for current_round in 0..num_rounds_max { + let remaining_rounds = num_rounds_max - current_round; + let evals: Vec<(E::Scalar, E::Scalar, E::Scalar)> = zip_with!( + par_iter, + (num_rounds, claims, poly_A_vec, poly_B_vec, poly_C_vec, poly_D_vec), + |num_rounds, claim, poly_A, poly_B, poly_C, poly_D| { + if remaining_rounds <= *num_rounds { + Self::compute_eval_points_cubic_with_additive_term( + poly_A, poly_B, poly_C, poly_D, &comb_func, + ) + } else { + let remaining_variables = remaining_rounds - num_rounds - 1; + let scaled_claim = E::Scalar::from((1 << remaining_variables) as u64) * claim; + (scaled_claim, scaled_claim, scaled_claim) + } + } + ) + .collect(); + + let evals_combined_0 = (0..num_instances).map(|i| evals[i].0 * coeffs[i]).sum(); + let evals_combined_2 = (0..num_instances).map(|i| evals[i].1 * coeffs[i]).sum(); + let evals_combined_3 = (0..num_instances).map(|i| evals[i].2 * coeffs[i]).sum(); + + let evals = vec![ + evals_combined_0, + claim_per_round - evals_combined_0, + evals_combined_2, + evals_combined_3, + ]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + polys.push(poly.compress()); + + // Set up next round + claim_per_round = poly.evaluate(&r_i); + + // bound all the tables to the verifier's challenge + + zip_with_for_each!( + ( + num_rounds.par_iter(), + poly_A_vec.par_iter_mut(), + poly_B_vec.par_iter_mut(), + poly_C_vec.par_iter_mut(), + poly_D_vec.par_iter_mut() + ), + |num_rounds, poly_A, poly_B, poly_C, poly_D| { + if remaining_rounds <= *num_rounds { + let _ = rayon::join( + || rayon::join(|| poly_A.bind_poly_var_top(&r_i), || poly_B.bind_poly_var_top(&r_i)), + || rayon::join(|| poly_C.bind_poly_var_top(&r_i), || poly_D.bind_poly_var_top(&r_i)), + ); + } + } + ); + } + + let poly_A_final = poly_A_vec.into_iter().map(|poly| poly[0]).collect(); + let poly_B_final = poly_B_vec.into_iter().map(|poly| poly[0]).collect(); + let poly_C_final = poly_C_vec.into_iter().map(|poly| poly[0]).collect(); + let poly_D_final = poly_D_vec.into_iter().map(|poly| poly[0]).collect(); + + Ok((Self { compressed_polys: polys }, r, vec![ + poly_A_final, + poly_B_final, + poly_C_final, + poly_D_final, + ])) + } +} diff --git a/src/supernova/Readme.md b/prover/src/supernova/Readme.md similarity index 100% rename from src/supernova/Readme.md rename to prover/src/supernova/Readme.md diff --git a/prover/src/supernova/circuit.rs b/prover/src/supernova/circuit.rs new file mode 100644 index 0000000..ff93acb --- /dev/null +++ b/prover/src/supernova/circuit.rs @@ -0,0 +1,769 @@ +//! Supernova implementation support arbitrary argumented circuits and running +//! instances. There are two Verification Circuits for each argumented circuit: +//! The primary and the secondary. Each of them is over a cycle curve but +//! only the primary executes the next step of the computation. +//! Each circuit takes as input 2 hashes. +//! Each circuit folds the last invocation of the other into the respective +//! running instance, specified by `augmented_circuit_index` +//! +//! The augmented circuit F' for `SuperNova` that includes everything from Nova +//! and additionally checks: +//! 1. Ui[] are contained in X[0] hash pre-image. +//! 2. R1CS Instance u is folded into Ui[augmented_circuit_index] correctly; just like Nova IVC. +//! 3. (optional by F logic) F circuit might check `program_counter_{i}` invoked current F +//! circuit is legal or not. +//! 3. F circuit produce `program_counter_{i+1}` and sent to next round to optionally constraint +//! the next F' argumented circuit. +use std::marker::PhantomData; + +use bellpepper::gadgets::{boolean_utils::conditionally_select_slice, Assignment}; +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, SynthesisError, +}; +use ff::{Field, PrimeField}; +use itertools::Itertools as _; +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::{NIO_NOVA_FOLD, NUM_HASH_BITS}, + gadgets::{ + alloc_num_equals, alloc_scalar_as_base, alloc_zero, conditionally_select_alloc_relaxed_r1cs, + conditionally_select_vec_allocated_relaxed_r1cs_instance, le_bits_to_num, AllocatedPoint, + AllocatedR1CSInstance, AllocatedRelaxedR1CSInstance, + }, + r1cs::{R1CSInstance, RelaxedR1CSInstance}, + supernova::{ + num_ro_inputs, + utils::{get_from_vec_alloc_relaxed_r1cs, get_selector_vec_from_index}, + }, + traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, + zip_with, Commitment, +}; + +// NOTE: This trait below is actually useful outside of this if you want to +// implement a step circuit on your own type. We use it in our prover code. +// However, there is a conflicting "StepCircuit" in +// `crate::traits::circuit::StepCircuit` which I deleted. We should likely have +// a supertrait here for NIVC that provides the circuit index because we only +// want that when we are using NIVC. Program counter should be able to be put to +// `None` otherwise, or we could handle that slightly differently too + +/// A helper trait for a step of the incremental computation for `SuperNova` +/// (i.e., circuit for F) -- to be implemented by applications. +pub trait StepCircuit: Send + Sync + Clone { + /// Return the the number of inputs or outputs of each step + /// (this method is called only at circuit synthesis time) + /// `synthesize` and `output` methods are expected to take as + /// input a vector of size equal to arity and output a vector of size equal + /// to arity + fn arity(&self) -> usize; + + /// Return this `StepCircuit`'s assigned index, for use when enforcing the + /// program counter. + fn circuit_index(&self) -> usize; + + /// Synthesize the circuit for a computation step and return variable + /// that corresponds to the output of the step `pc_{i+1}` and `z_{i+1}` + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError>; +} + +// NOTES: This seems to just enforce that when we call a circuit at a given +// step, it matches the set program counter. + +/// A helper trait for a step of the incremental computation for `SuperNova` +/// (i.e., circuit for F) -- automatically implemented for `StepCircuit` and +/// used internally to enforce that the circuit selected by the program counter +/// is used at each step. +pub trait EnforcingStepCircuit: Send + Sync + Clone + StepCircuit { + /// Delegate synthesis to `StepCircuit::synthesize`, and additionally, + /// enforce the constraint that program counter `pc`, if supplied, is + /// equal to the circuit's assigned index. + fn enforcing_synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + if let Some(pc) = pc { + let circuit_index = F::from(self.circuit_index() as u64); + + // pc * 1 = circuit_index + cs.enforce( + || "pc matches circuit index", + |lc| lc + pc.get_variable(), + |lc| lc + CS::one(), + |lc| lc + (circuit_index, CS::one()), + ); + } + self.synthesize(cs, pc, z) + } +} + +impl> EnforcingStepCircuit for S {} + +/// A trivial step circuit that simply returns the input +/// NOTE: Should only be used as secondary circuit!!! +#[derive(Clone, Debug, Default)] +pub struct TrivialCircuit { + _p: PhantomData, +} + +impl StepCircuit for TrivialCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { 1 } + + /// This will not interfere with other circuit indices in the primary + /// circuit. + fn circuit_index(&self) -> usize { 0 } + + fn synthesize>( + &self, + _cs: &mut CS, + program_counter: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + Ok((program_counter.cloned(), z.to_vec())) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct SuperNovaAugmentedCircuitParams { + limb_width: usize, + n_limbs: usize, + is_primary_circuit: bool, // A boolean indicating if this is the primary circuit +} + +impl SuperNovaAugmentedCircuitParams { + pub const fn new(limb_width: usize, n_limbs: usize, is_primary_circuit: bool) -> Self { + Self { limb_width, n_limbs, is_primary_circuit } + } + + pub fn get_n_limbs(&self) -> usize { self.n_limbs } +} + +#[derive(Debug)] +pub struct SuperNovaAugmentedCircuitInputs<'a, E: Engine> { + pp_digest: E::Scalar, + i: E::Base, + /// Input to the circuit for the base case + z0: &'a [E::Base], + /// Input to the circuit for the non-base case + zi: Option<&'a [E::Base]>, + /// List of `RelaxedR1CSInstance`. + /// `None` if this is the base case. + /// Elements are `None` if the circuit at that index was not yet executed. + U: Option<&'a [Option>]>, + /// R1CS proof to be folded into U + u: Option<&'a R1CSInstance>, + /// Nova folding proof for accumulating u into U[j] + T: Option<&'a Commitment>, + /// Index of the current circuit + program_counter: Option, + /// Index j of circuit being folded into U[j] + last_augmented_circuit_index: E::Base, +} + +impl<'a, E: Engine> SuperNovaAugmentedCircuitInputs<'a, E> { + /// Create new inputs/witness for the verification circuit + pub fn new( + pp_digest: E::Scalar, + i: E::Base, + z0: &'a [E::Base], + zi: Option<&'a [E::Base]>, + U: Option<&'a [Option>]>, + u: Option<&'a R1CSInstance>, + T: Option<&'a Commitment>, + program_counter: Option, + last_augmented_circuit_index: E::Base, + ) -> Self { + Self { pp_digest, i, z0, zi, U, u, T, program_counter, last_augmented_circuit_index } + } +} + +/// The augmented circuit F' in `SuperNova` that includes a step circuit F +/// and the circuit for the verifier in `SuperNova`'s non-interactive folding +/// scheme, `SuperNova` NIFS will fold strictly r1cs instance u with respective +/// relaxed r1cs instance `U[last_augmented_circuit_index]` +pub struct SuperNovaAugmentedCircuit<'a, E: Engine, SC: EnforcingStepCircuit> { + params: &'a SuperNovaAugmentedCircuitParams, + ro_consts: ROConstantsCircuit, + inputs: Option>, + step_circuit: &'a SC, // The function that is applied for each step + num_augmented_circuits: usize, // number of overall augmented circuits +} + +impl<'a, E: Engine, SC: EnforcingStepCircuit> SuperNovaAugmentedCircuit<'a, E, SC> { + /// Create a new verification circuit for the input relaxed r1cs instances + pub const fn new( + params: &'a SuperNovaAugmentedCircuitParams, + inputs: Option>, + step_circuit: &'a SC, + ro_consts: ROConstantsCircuit, + num_augmented_circuits: usize, + ) -> Self { + Self { params, inputs, step_circuit, ro_consts, num_augmented_circuits } + } + + /// Allocate all witnesses from the augmented function's non-deterministic + /// inputs. Optional entries are allocated as their default values. + fn alloc_witness::Base>>( + &self, + mut cs: CS, + arity: usize, + num_augmented_circuits: usize, + ) -> Result< + ( + AllocatedNum, + AllocatedNum, + Vec>, + Vec>, + Vec>, + AllocatedR1CSInstance, + AllocatedPoint, + Option>, + Vec, + ), + SynthesisError, + > { + let last_augmented_circuit_index = + AllocatedNum::alloc(cs.namespace(|| "last_augmented_circuit_index"), || { + Ok(self.inputs.get()?.last_augmented_circuit_index) + })?; + + // Allocate the params + let params = alloc_scalar_as_base::( + cs.namespace(|| "params"), + self.inputs.as_ref().map(|inputs| inputs.pp_digest), + )?; + + // Allocate i + let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; + + // Allocate program_counter only on primary circuit + let program_counter = if self.params.is_primary_circuit { + Some(AllocatedNum::alloc(cs.namespace(|| "program_counter"), || { + Ok(self.inputs.get()?.program_counter.expect("program_counter missing")) + })?) + } else { + None + }; + + // Allocate z0 + let z_0 = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || Ok(self.inputs.get()?.z0[i])) + }) + .collect::>, _>>()?; + + // Allocate zi. If inputs.zi is not provided (base case) allocate default value + // 0 + let zero = vec![E::Base::ZERO; arity]; + let z_i = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { + Ok(self.inputs.get()?.zi.unwrap_or(&zero)[i]) + }) + }) + .collect::>, _>>()?; + + // Allocate the running instances + let U = (0..num_augmented_circuits) + .map(|i| { + AllocatedRelaxedR1CSInstance::alloc( + cs.namespace(|| format!("Allocate U {:?}", i)), + self.inputs.as_ref().and_then(|inputs| inputs.U.and_then(|U| U[i].as_ref())), + self.params.limb_width, + self.params.n_limbs, + ) + }) + .collect::>, _>>()?; + + // Allocate the r1cs instance to be folded in + let u = AllocatedR1CSInstance::alloc( + cs.namespace(|| "allocate instance u to fold"), + self.inputs.as_ref().and_then(|inputs| inputs.u), + )?; + + // Allocate T + let T = AllocatedPoint::alloc( + cs.namespace(|| "allocate T"), + self.inputs.as_ref().and_then(|inputs| inputs.T.map(|T| T.to_coordinates())), + )?; + T.check_on_curve(cs.namespace(|| "check T on curve"))?; + + // Compute instance selector + let last_augmented_circuit_selector = get_selector_vec_from_index( + cs.namespace(|| "instance selector"), + &last_augmented_circuit_index, + num_augmented_circuits, + )?; + + Ok((params, i, z_0, z_i, U, u, T, program_counter, last_augmented_circuit_selector)) + } + + /// Synthesizes base case and returns the new relaxed `R1CSInstance` + fn synthesize_base_case::Base>>( + &self, + mut cs: CS, + u: AllocatedR1CSInstance, + last_augmented_circuit_selector: &[Boolean], + ) -> Result>, SynthesisError> { + let mut cs = cs.namespace(|| "alloc U_i default"); + + // Allocate a default relaxed r1cs instance + let default = AllocatedRelaxedR1CSInstance::default( + cs.namespace(|| "Allocate primary U_default".to_string()), + self.params.limb_width, + self.params.n_limbs, + )?; + + // The primary circuit just initialize single AllocatedRelaxedR1CSInstance + let U_default = if self.params.is_primary_circuit { + vec![default] + } else { + // The secondary circuit convert the incoming R1CS instance on index which match + // last_augmented_circuit_index + let incoming_r1cs = AllocatedRelaxedR1CSInstance::from_r1cs_instance( + cs.namespace(|| "Allocate incoming_r1cs"), + u, + self.params.limb_width, + self.params.n_limbs, + )?; + + last_augmented_circuit_selector + .iter() + .enumerate() + .map(|(i, equal_bit)| { + // If index match last_augmented_circuit_index, then return incoming_r1cs, + // otherwise return the default one + conditionally_select_alloc_relaxed_r1cs( + cs.namespace(|| format!("select on index namespace {:?}", i)), + &incoming_r1cs, + &default, + equal_bit, + ) + }) + .collect::>, _>>()? + }; + Ok(U_default) + } + + /// Synthesizes non base case and returns the new relaxed `R1CSInstance` + /// And a boolean indicating if all checks pass + fn synthesize_non_base_case::Base>>( + &self, + mut cs: CS, + params: &AllocatedNum, + i: &AllocatedNum, + z_0: &[AllocatedNum], + z_i: &[AllocatedNum], + U: &[AllocatedRelaxedR1CSInstance], + u: &AllocatedR1CSInstance, + T: &AllocatedPoint, + arity: usize, + last_augmented_circuit_selector: &[Boolean], + program_counter: &Option>, + ) -> Result<(Vec>, AllocatedBit), SynthesisError> + { + // Check that u.x[0] = Hash(params, i, program_counter, z0, zi, U[]) + let mut ro = E::ROCircuit::new( + self.ro_consts.clone(), + num_ro_inputs( + self.num_augmented_circuits, + self.params.get_n_limbs(), + arity, + self.params.is_primary_circuit, + ), + ); + ro.absorb(params); + ro.absorb(i); + + if self.params.is_primary_circuit { + let Some(program_counter) = program_counter.as_ref() else { + return Err(SynthesisError::AssignmentMissing); + }; + ro.absorb(program_counter) + } + + for e in z_0 { + ro.absorb(e); + } + for e in z_i { + ro.absorb(e); + } + + U.iter().enumerate().try_for_each(|(i, U)| { + U.absorb_in_ro(cs.namespace(|| format!("absorb U {:?}", i)), &mut ro) + })?; + + let hash_bits = ro.squeeze(cs.namespace(|| "Input hash"), NUM_HASH_BITS)?; + let hash = le_bits_to_num(cs.namespace(|| "bits to hash"), &hash_bits)?; + let check_pass: AllocatedBit = alloc_num_equals( + cs.namespace(|| "check consistency of u.X[0] with H(params, U, i, z0, zi)"), + &u.X[0], + &hash, + )?; + + // Run NIFS Verifier + let U_to_fold = get_from_vec_alloc_relaxed_r1cs( + cs.namespace(|| "U to fold"), + U, + last_augmented_circuit_selector, + )?; + let U_fold = U_to_fold.fold_with_r1cs( + cs.namespace(|| "compute fold of U and u"), + params, + u, + T, + self.ro_consts.clone(), + self.params.limb_width, + self.params.n_limbs, + )?; + + // update AllocatedRelaxedR1CSInstance on index match augmented circuit index + let U_next: Vec> = + zip_with!((U.iter(), last_augmented_circuit_selector.iter()), |U, equal_bit| { + conditionally_select_alloc_relaxed_r1cs( + cs.namespace(|| "select on index namespace"), + &U_fold, + U, + equal_bit, + ) + }) + .collect::>, _>>()?; + + Ok((U_next, check_pass)) + } + + pub fn synthesize::Base>>( + self, + cs: &mut CS, + ) -> Result<(Option>, Vec>), SynthesisError> { + let arity = self.step_circuit.arity(); + let num_augmented_circuits = if self.params.is_primary_circuit { + // primary circuit only fold single running instance with secondary output + // strict r1cs instance + 1 + } else { + // secondary circuit contains the logic to choose one of multiple augments + // running instance to fold + self.num_augmented_circuits + }; + + if self.inputs.is_some() { + // Check arity of z0 + let z0_len = self.inputs.as_ref().map_or(0, |inputs| inputs.z0.len()); + if self.step_circuit.arity() != z0_len { + return Err(SynthesisError::IncompatibleLengthVector(format!( + "z0_len {:?} != arity length {:?}", + z0_len, + self.step_circuit.arity() + ))); + } + + // The primary curve should always fold the circuit with index 0 + let last_augmented_circuit_index = + self.inputs.get().map_or(E::Base::ZERO, |inputs| inputs.last_augmented_circuit_index); + if self.params.is_primary_circuit && last_augmented_circuit_index != E::Base::ZERO { + return Err(SynthesisError::IncompatibleLengthVector( + "primary circuit running instance only valid on index 0".to_string(), + )); + } + } + + // Allocate witnesses + let (params, i, z_0, z_i, U, u, T, program_counter, last_augmented_circuit_selector) = self + .alloc_witness( + cs.namespace(|| "allocate the circuit witness"), + arity, + num_augmented_circuits, + )?; + + // Compute variable indicating if this is the base case + let zero = alloc_zero(cs.namespace(|| "zero")); + let is_base_case = alloc_num_equals(cs.namespace(|| "Check if base case"), &i.clone(), &zero)?; + + // Synthesize the circuit for the non-base case and get the new running + // instances along with a boolean indicating if all checks have passed + // must use return `last_augmented_circuit_index_checked` since it got range + // checked + let (U_next_non_base, check_non_base_pass) = self.synthesize_non_base_case( + cs.namespace(|| "synthesize non base case"), + ¶ms, + &i, + &z_0, + &z_i, + &U, + &u, + &T, + arity, + &last_augmented_circuit_selector, + &program_counter, + )?; + + // Synthesize the circuit for the base case and get the new running instances + let U_next_base = self.synthesize_base_case( + cs.namespace(|| "base case"), + u.clone(), + &last_augmented_circuit_selector, + )?; + + // Either check_non_base_pass=true or we are in the base case + let should_be_false = AllocatedBit::nor( + cs.namespace(|| "check_non_base_pass nor base_case"), + &check_non_base_pass, + &is_base_case, + )?; + cs.enforce( + || "check_non_base_pass nor base_case = false", + |lc| lc + should_be_false.get_variable(), + |lc| lc + CS::one(), + |lc| lc, + ); + + // Compute the U_next + let U_next = conditionally_select_vec_allocated_relaxed_r1cs_instance( + cs.namespace(|| "U_next"), + &U_next_base[..], + &U_next_non_base[..], + &Boolean::from(is_base_case.clone()), + )?; + + // Compute i + 1 + let i_next = + AllocatedNum::alloc(cs.namespace(|| "i + 1"), || Ok(*i.get_value().get()? + E::Base::ONE))?; + cs.enforce( + || "check i + 1", + |lc| lc + i.get_variable() + CS::one(), + |lc| lc + CS::one(), + |lc| lc + i_next.get_variable(), + ); + + // Compute z_{i+1} + let z_input = conditionally_select_slice( + cs.namespace(|| "select input to F"), + &z_0, + &z_i, + &Boolean::from(is_base_case), + )?; + + let (program_counter_new, z_next) = self.step_circuit.enforcing_synthesize( + &mut cs.namespace(|| "F"), + program_counter.as_ref(), + &z_input, + )?; + + if z_next.len() != arity { + return Err(SynthesisError::IncompatibleLengthVector("z_next".to_string())); + } + + // To check correct folding sequencing we are just going to make a hash. + // The next RunningInstance folding can take the pre-image of this hash as + // witness and check. + + // "Finally, there is a subtle sizing issue in the above description: in each + // step, because Ui+1 is produced as the public IO of F0 + // program_counter+1, it must be contained in the public IO of instance + // ui+1. In the next iteration, because ui+1 is folded + // into Ui+1[program_counter+1], this means that Ui+1[program_counter+1] is at + // least as large as Ui by the properties of the folding scheme. This + // means that the list of running instances grows in each step. To + // alleviate this issue, we have each F0j only produce a hash + // of its outputs as public output. In the subsequent step, the next augmented + // function takes as non-deterministic input a preimage to this hash." pg.16 + + // https://eprint.iacr.org/2022/1758.pdf + + // Compute the new hash H(params, i+1, program_counter, z0, z_{i+1}, U_next) + let mut ro = E::ROCircuit::new( + self.ro_consts.clone(), + num_ro_inputs( + self.num_augmented_circuits, + self.params.get_n_limbs(), + self.step_circuit.arity(), + self.params.is_primary_circuit, + ), + ); + ro.absorb(¶ms); + ro.absorb(&i_next); + // optionally absorb program counter if exist + if program_counter.is_some() { + ro.absorb(program_counter_new.as_ref().expect("new program counter missing")) + } + for e in &z_0 { + ro.absorb(e); + } + for e in &z_next { + ro.absorb(e); + } + U_next.iter().enumerate().try_for_each(|(i, U)| { + U.absorb_in_ro(cs.namespace(|| format!("absorb U_new {:?}", i)), &mut ro) + })?; + + let hash_bits = ro.squeeze(cs.namespace(|| "output hash bits"), NUM_HASH_BITS)?; + let hash = le_bits_to_num(cs.namespace(|| "convert hash to num"), &hash_bits)?; + + // We are cycling of curve implementation, so primary/secondary will rotate hash + // in IO for the others to check bypass unmodified hash of other circuit + // as next X[0] and output the computed the computed hash as next X[1] + u.X[1].inputize(cs.namespace(|| "bypass unmodified hash of the other circuit"))?; + hash.inputize(cs.namespace(|| "output new hash of this circuit"))?; + + Ok((program_counter_new, z_next)) + } +} + +#[cfg(test)] +mod tests { + use expect_test::{expect, Expect}; + + use super::*; + use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + solver::SatisfyingAssignment, + test_shape_cs::TestShapeCS, + }, + constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, + gadgets::scalar_as_base, + provider::{poseidon::PoseidonConstantsCircuit, Bn256EngineIPA, GrumpkinEngine}, + supernova::circuit::TrivialCircuit, + traits::{snark::default_ck_hint, CurveCycleEquipped, Dual}, + }; + + // In the following we use 1 to refer to the primary, and 2 to refer to the + // secondary circuit + fn test_supernova_recursive_circuit_with( + primary_params: &SuperNovaAugmentedCircuitParams, + secondary_params: &SuperNovaAugmentedCircuitParams, + ro_consts1: ROConstantsCircuit>, + ro_consts2: ROConstantsCircuit, + num_constraints_primary: &Expect, + num_constraints_secondary: &Expect, + num_augmented_circuits: usize, + ) where + E1: CurveCycleEquipped, + { + let tc1 = TrivialCircuit::default(); + // Initialize the shape and ck for the primary + let circuit1: SuperNovaAugmentedCircuit< + '_, + Dual, + TrivialCircuit< as Engine>::Base>, + > = SuperNovaAugmentedCircuit::new( + primary_params, + None, + &tc1, + ro_consts1.clone(), + num_augmented_circuits, + ); + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = circuit1.synthesize(&mut cs); + let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + num_constraints_primary.assert_eq(&cs.num_constraints().to_string()); + + let tc2 = TrivialCircuit::default(); + // Initialize the shape and ck for the secondary + let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = + SuperNovaAugmentedCircuit::new( + secondary_params, + None, + &tc2, + ro_consts2.clone(), + num_augmented_circuits, + ); + let mut cs: TestShapeCS> = TestShapeCS::new(); + let _ = circuit2.synthesize(&mut cs); + let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + num_constraints_secondary.assert_eq(&cs.num_constraints().to_string()); + + // Execute the base case for the primary + let zero1 = < as Engine>::Base as Field>::ZERO; + let mut cs1 = SatisfyingAssignment::::new(); + let vzero1 = vec![zero1]; + let inputs1: SuperNovaAugmentedCircuitInputs<'_, Dual> = + SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::(zero1), // pass zero for testing + zero1, + &vzero1, + None, + None, + None, + None, + Some(zero1), + zero1, + ); + let circuit1: SuperNovaAugmentedCircuit< + '_, + Dual, + TrivialCircuit< as Engine>::Base>, + > = SuperNovaAugmentedCircuit::new( + primary_params, + Some(inputs1), + &tc1, + ro_consts1, + num_augmented_circuits, + ); + let _ = circuit1.synthesize(&mut cs1); + let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, &ck1).unwrap(); + // Make sure that this is satisfiable + shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); + + // Execute the base case for the secondary + let zero2 = <::Base as Field>::ZERO; + let mut cs2 = SatisfyingAssignment::>::new(); + let vzero2 = vec![zero2]; + let inputs2: SuperNovaAugmentedCircuitInputs<'_, E1> = SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::>(zero2), // pass zero for testing + zero2, + &vzero2, + None, + None, + Some(&inst1), + None, + Some(zero2), + zero2, + ); + let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = + SuperNovaAugmentedCircuit::new( + secondary_params, + Some(inputs2), + &tc2, + ro_consts2, + num_augmented_circuits, + ); + let _ = circuit2.synthesize(&mut cs2); + let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, &ck2).unwrap(); + // Make sure that it is satisfiable + shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); + } + + #[test] + fn test_supernova_recursive_circuit_grumpkin() { + let params1 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + let params2 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); + let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + + test_supernova_recursive_circuit_with::( + ¶ms1, + ¶ms2, + ro_consts1, + ro_consts2, + &expect!["10004"], + &expect!["10573"], + 1, + ); + // TODO: extend to num_augmented_circuits >= 2 + } +} diff --git a/src/supernova/error.rs b/prover/src/supernova/error.rs similarity index 59% rename from src/supernova/error.rs rename to prover/src/supernova/error.rs index 95a25d8..84cebea 100644 --- a/src/supernova/error.rs +++ b/prover/src/supernova/error.rs @@ -11,13 +11,13 @@ use crate::errors::NovaError; /// Errors returned by Nova #[derive(Debug, Eq, PartialEq, Error)] pub enum SuperNovaError { - /// Nova error - #[error("NovaError")] - NovaError(#[from] NovaError), - /// missing commitment key - #[error("MissingCK")] - MissingCK, - /// Extended error for supernova - #[error("UnSatIndex")] - UnSatIndex(&'static str, usize), + /// Nova error + #[error("NovaError")] + NovaError(#[from] NovaError), + /// missing commitment key + #[error("MissingCK")] + MissingCK, + /// Extended error for supernova + #[error("UnSatIndex")] + UnSatIndex(&'static str, usize), } diff --git a/prover/src/supernova/mod.rs b/prover/src/supernova/mod.rs new file mode 100644 index 0000000..1812a20 --- /dev/null +++ b/prover/src/supernova/mod.rs @@ -0,0 +1,1189 @@ +#![doc = include_str!("./Readme.md")] + +use std::{ops::Index, sync::Arc}; + +use bellpepper_core::{ConstraintSystem, SynthesisError}; +use ff::Field; +use itertools::Itertools as _; +use once_cell::sync::OnceCell; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; +use tracing::debug; + +use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_HASH_BITS}, + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + nifs::NIFS, + r1cs::{ + self, commitment_key_size, CommitmentKeyHint, R1CSInstance, R1CSResult, R1CSShape, R1CSWitness, + RelaxedR1CSInstance, RelaxedR1CSWitness, + }, + scalar_as_base, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait}, + AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROConstants, ROConstantsCircuit, ROTrait, + }, + Commitment, CommitmentKey, R1CSWithArity, +}; + +mod circuit; // declare the module first +pub use circuit::{StepCircuit, SuperNovaAugmentedCircuitParams, TrivialCircuit}; +use circuit::{SuperNovaAugmentedCircuit, SuperNovaAugmentedCircuitInputs}; +use error::SuperNovaError; + +/// A struct that manages all the digests of the primary circuits of a SuperNova +/// instance +#[derive(Debug, PartialEq, Eq, Serialize)] +pub struct CircuitDigests { + digests: Vec, +} + +impl SimpleDigestible for CircuitDigests {} + +impl std::ops::Deref for CircuitDigests { + type Target = Vec; + + fn deref(&self) -> &Self::Target { &self.digests } +} + +impl CircuitDigests { + /// Construct a new [`CircuitDigests`] + pub fn new(digests: Vec) -> Self { Self { digests } } + + /// Return the [`CircuitDigests`]' digest. + pub fn digest(&self) -> E::Scalar { + let dc: DigestComputer<'_, ::Scalar, Self> = DigestComputer::new(self); + dc.digest().expect("Failure in computing digest") + } +} + +/// A vector of [`R1CSWithArity`] adjoined to a set of [`PublicParams`] +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct PublicParams +where E1: CurveCycleEquipped { + /// The internal circuit shapes + circuit_shapes: Vec>, + + ro_consts_primary: ROConstants, + ro_consts_circuit_primary: ROConstantsCircuit>, + ck_primary: Arc>, // This is shared between all circuit params + augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams, + + ro_consts_secondary: ROConstants>, + ro_consts_circuit_secondary: ROConstantsCircuit, + ck_secondary: Arc>>, + circuit_shape_secondary: R1CSWithArity>, + augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams, + + /// Digest constructed from this `PublicParams`' parameters + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} + +/// Auxiliary [`PublicParams`] information about the commitment keys and +/// secondary circuit. This is used as a helper struct when reconstructing +/// [`PublicParams`] downstream in lurk. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct AuxParams +where E1: CurveCycleEquipped { + pub ro_consts_primary: ROConstants, + pub ro_consts_circuit_primary: ROConstantsCircuit>, + pub ck_primary: Arc>, // This is shared between all circuit params + pub augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams, + + pub ro_consts_secondary: ROConstants>, + pub ro_consts_circuit_secondary: ROConstantsCircuit, + pub ck_secondary: Arc>>, + pub circuit_shape_secondary: R1CSWithArity>, + pub augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams, + + pub digest: E1::Scalar, +} + +use std::io::Cursor; + +use crate::{ + fast_serde, + fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, +}; + +impl FastSerde for AuxParams +where + E1: CurveCycleEquipped, + >::CommitmentKey: FastSerde, + <::CE as CommitmentEngineTrait>::CommitmentKey: FastSerde, +{ + /// Byte format: + /// [0..4] - Magic number (4 bytes) + /// [4] - Serde type: AuxParams (u8) + /// [5] - Number of sections (u8 = 8) + /// Sections (repeated 8 times): + /// [N] - Section type (u8) + /// [N+1..5] - Section size (u32) + /// [N+5..] - Section data (variable length) + /// Section types: + /// 1: ro_consts_primary (bincode) + /// 2: ro_consts_circuit_primary (bincode) + /// 3: ck_primary (FastSerde) + /// 4: ro_consts_secondary (bincode) + /// 5: ro_consts_circuit_secondary (bincode) + /// 6: ck_secondary (FastSerde) + /// 7: circuit_shape_secondary (json) + /// 8: digest (bincode) + fn to_bytes(&self) -> Vec { + let mut out = Vec::new(); + + // Write header + out.extend_from_slice(&fast_serde::MAGIC_NUMBER); + out.push(fast_serde::SerdeByteTypes::AuxParams as u8); + out.push(8); // num_sections + + // Write sections + Self::write_section_bytes(&mut out, 1, &bincode::serialize(&self.ro_consts_primary).unwrap()); + Self::write_section_bytes( + &mut out, + 2, + &bincode::serialize(&self.ro_consts_circuit_primary).unwrap(), + ); + Self::write_section_bytes(&mut out, 3, &self.ck_primary.to_bytes()); + Self::write_section_bytes(&mut out, 4, &bincode::serialize(&self.ro_consts_secondary).unwrap()); + Self::write_section_bytes( + &mut out, + 5, + &bincode::serialize(&self.ro_consts_circuit_secondary).unwrap(), + ); + Self::write_section_bytes(&mut out, 6, &self.ck_secondary.to_bytes()); + Self::write_section_bytes( + &mut out, + 7, + &bincode::serialize(&self.circuit_shape_secondary).unwrap(), + ); + Self::write_section_bytes(&mut out, 8, &bincode::serialize(&self.digest).unwrap()); + + out + } + + fn from_bytes(bytes: &Vec) -> Result { + let mut cursor = Cursor::new(bytes); + + // Validate header + Self::validate_header(&mut cursor, SerdeByteTypes::AuxParams, 8)?; + + // Read all sections + let ro_consts_primary = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 1)?)?; + let ro_consts_circuit_primary = + bincode::deserialize(&Self::read_section_bytes(&mut cursor, 2)?)?; + let ck_primary = Arc::new(>::CommitmentKey::from_bytes( + &Self::read_section_bytes(&mut cursor, 3)?, + )?); + let ro_consts_secondary = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 4)?)?; + let ro_consts_circuit_secondary = + bincode::deserialize(&Self::read_section_bytes(&mut cursor, 5)?)?; + let ck_secondary = Arc::new(<::CE as CommitmentEngineTrait< + E1::Secondary, + >>::CommitmentKey::from_bytes(&Self::read_section_bytes( + &mut cursor, + 6, + )?)?); + let circuit_shape_secondary = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 7)?)?; + let digest = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 8)?)?; + + // NOTE: This does not check the digest. Maybe we should. + Ok(Self { + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams::new( + BN_LIMB_WIDTH, + BN_N_LIMBS, + true, + ), + ro_consts_secondary, + ro_consts_circuit_secondary, + ck_secondary, + circuit_shape_secondary, + augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams::new( + BN_LIMB_WIDTH, + BN_N_LIMBS, + false, + ), + digest, + }) + } +} + +impl Index for PublicParams +where E1: CurveCycleEquipped +{ + type Output = R1CSWithArity; + + fn index(&self, index: usize) -> &Self::Output { &self.circuit_shapes[index] } +} + +impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} + +impl PublicParams +where E1: CurveCycleEquipped +{ + /// Construct a new [`PublicParams`] + /// + /// # Note + /// + /// Public parameters set up a number of bases for the homomorphic + /// commitment scheme of Nova. + /// + /// Some final compressing SNARKs, like variants of Spartan, use computation + /// commitments that require larger sizes for these parameters. These + /// SNARKs provide a hint for these values by implementing + /// `RelaxedR1CSSNARKTrait::commitment_key_floor()`, which can be passed to + /// this function. + /// + /// If you're not using such a SNARK, pass `&(|_| 0)` instead. + /// + /// # Arguments + /// + /// * `non_uniform_circuit`: The non-uniform circuit of type `NC`. + /// * `ck_hint1`: A `CommitmentKeyHint` for `E1`, which is a function that provides a hint for the + /// number of generators required in the commitment scheme for the primary circuit. + /// * `ck_hint2`: A `CommitmentKeyHint` for `E2`, similar to `ck_hint1`, but for the secondary + /// circuit. + pub fn setup>( + non_uniform_circuit: &NC, + ck_hint1: &CommitmentKeyHint, + ck_hint2: &CommitmentKeyHint>, + ) -> Self { + let num_circuits = non_uniform_circuit.num_circuits(); + + let augmented_circuit_params_primary = + SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + let ro_consts_primary: ROConstants = ROConstants::::default(); + // ro_consts_circuit_primary are parameterized by E2 because the type alias uses + // E2::Base = E1::Scalar + let ro_consts_circuit_primary: ROConstantsCircuit> = + ROConstantsCircuit::>::default(); + + let circuit_shapes = get_circuit_shapes(non_uniform_circuit); + + let ck_primary = Self::compute_primary_ck(&circuit_shapes, ck_hint1); + let ck_primary = Arc::new(ck_primary); + + let augmented_circuit_params_secondary = + SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); + let ro_consts_secondary = ROConstants::>::default(); + let c_secondary = non_uniform_circuit.secondary_circuit(); + let F_arity_secondary = c_secondary.arity(); + let ro_consts_circuit_secondary: ROConstantsCircuit = ROConstantsCircuit::::default(); + + let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, NC::C2> = + SuperNovaAugmentedCircuit::new( + &augmented_circuit_params_secondary, + None, + &c_secondary, + ro_consts_circuit_secondary.clone(), + num_circuits, + ); + let mut cs: ShapeCS> = ShapeCS::new(); + circuit_secondary.synthesize(&mut cs).expect("circuit synthesis failed"); + let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape_and_key(ck_hint2); + let ck_secondary = Arc::new(ck_secondary); + let circuit_shape_secondary = R1CSWithArity::new(r1cs_shape_secondary, F_arity_secondary); + + let pp = Self { + circuit_shapes, + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + augmented_circuit_params_primary, + ro_consts_secondary, + ro_consts_circuit_secondary, + ck_secondary, + circuit_shape_secondary, + augmented_circuit_params_secondary, + digest: OnceCell::new(), + }; + + // make sure to initialize the `OnceCell` and compute the digest + // and avoid paying for unexpected performance costs later + pp.digest(); + pp + } + + /// Breaks down an instance of [`PublicParams`] into the circuit params and + /// auxiliary params. + pub fn into_parts(self) -> (Vec>, AuxParams) { + let digest = self.digest(); + + let Self { + circuit_shapes, + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + augmented_circuit_params_primary, + ro_consts_secondary, + ro_consts_circuit_secondary, + ck_secondary, + circuit_shape_secondary, + augmented_circuit_params_secondary, + digest: _digest, + } = self; + + let aux_params = AuxParams { + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + augmented_circuit_params_primary, + ro_consts_secondary, + ro_consts_circuit_secondary, + ck_secondary, + circuit_shape_secondary, + augmented_circuit_params_secondary, + digest, + }; + + (circuit_shapes, aux_params) + } + + /// Returns just the [`AuxParams`] portion of [`PublicParams`] from a + /// reference to [`PublicParams`]. + pub fn aux_params(&self) -> AuxParams { + AuxParams { + ro_consts_primary: self.ro_consts_primary.clone(), + ro_consts_circuit_primary: self.ro_consts_circuit_primary.clone(), + ck_primary: self.ck_primary.clone(), + augmented_circuit_params_primary: self.augmented_circuit_params_primary.clone(), + ro_consts_secondary: self.ro_consts_secondary.clone(), + ro_consts_circuit_secondary: self.ro_consts_circuit_secondary.clone(), + ck_secondary: self.ck_secondary.clone(), + circuit_shape_secondary: self.circuit_shape_secondary.clone(), + augmented_circuit_params_secondary: self.augmented_circuit_params_secondary.clone(), + digest: self.digest(), + } + } + + /// Create a [`PublicParams`] from a vector of raw [`R1CSWithArity`] and + /// auxiliary params. + pub fn from_parts(circuit_shapes: Vec>, aux_params: AuxParams) -> Self { + let pp = Self { + circuit_shapes, + ro_consts_primary: aux_params.ro_consts_primary, + ro_consts_circuit_primary: aux_params.ro_consts_circuit_primary, + ck_primary: aux_params.ck_primary, + augmented_circuit_params_primary: aux_params.augmented_circuit_params_primary, + ro_consts_secondary: aux_params.ro_consts_secondary, + ro_consts_circuit_secondary: aux_params.ro_consts_circuit_secondary, + ck_secondary: aux_params.ck_secondary, + circuit_shape_secondary: aux_params.circuit_shape_secondary, + augmented_circuit_params_secondary: aux_params.augmented_circuit_params_secondary, + digest: OnceCell::new(), + }; + assert_eq!( + aux_params.digest, + pp.digest(), + "param data is invalid; aux_params contained the incorrect digest" + ); + pp + } + + /// Create a [`PublicParams`] from a vector of raw [`R1CSWithArity`] and + /// auxiliary params. We don't check that the `aux_params.digest` is a + /// valid digest for the created params. + pub fn from_parts_unchecked( + circuit_shapes: Vec>, + aux_params: AuxParams, + ) -> Self { + Self { + circuit_shapes, + ro_consts_primary: aux_params.ro_consts_primary, + ro_consts_circuit_primary: aux_params.ro_consts_circuit_primary, + ck_primary: aux_params.ck_primary, + augmented_circuit_params_primary: aux_params.augmented_circuit_params_primary, + ro_consts_secondary: aux_params.ro_consts_secondary, + ro_consts_circuit_secondary: aux_params.ro_consts_circuit_secondary, + ck_secondary: aux_params.ck_secondary, + circuit_shape_secondary: aux_params.circuit_shape_secondary, + augmented_circuit_params_secondary: aux_params.augmented_circuit_params_secondary, + digest: aux_params.digest.into(), + } + } + + /// Compute primary and secondary commitment keys sized to handle the + /// largest of the circuits in the provided `R1CSWithArity`. + fn compute_primary_ck( + circuit_params: &[R1CSWithArity], + ck_hint1: &CommitmentKeyHint, + ) -> CommitmentKey { + let size_primary = circuit_params + .iter() + .map(|circuit| commitment_key_size(&circuit.r1cs_shape, ck_hint1)) + .max() + .unwrap(); + + E1::CE::setup(b"ck", size_primary) + } + + /// Return the [`PublicParams`]' digest. + pub fn digest(&self) -> E1::Scalar { + self + .digest + .get_or_try_init(|| { + let dc: DigestComputer<'_, ::Scalar, Self> = DigestComputer::new(self); + dc.digest() + }) + .cloned() + .expect("Failure in retrieving digest") + } + + /// Returns the number of constraints and variables of inner circuit based + /// on index + pub fn num_constraints_and_variables(&self, index: usize) -> (usize, usize) { + (self.circuit_shapes[index].r1cs_shape.num_cons, self.circuit_shapes[index].r1cs_shape.num_vars) + } + + /// Returns the number of constraints and variables of the secondary circuit + pub fn num_constraints_and_variables_secondary(&self) -> (usize, usize) { + ( + self.circuit_shape_secondary.r1cs_shape.num_cons, + self.circuit_shape_secondary.r1cs_shape.num_vars, + ) + } + + /// All of the primary circuit digests of this [`PublicParams`] + pub fn circuit_param_digests(&self) -> CircuitDigests { + let digests = self.circuit_shapes.iter().map(|cp| cp.digest()).collect::>(); + CircuitDigests { digests } + } + + /// Returns all the primary R1CS Shapes + fn primary_r1cs_shapes(&self) -> Vec<&R1CSShape> { + self.circuit_shapes.iter().map(|cs| &cs.r1cs_shape).collect::>() + } +} + +pub fn get_circuit_shapes>( + non_uniform_circuit: &NC, +) -> Vec> { + let num_circuits = non_uniform_circuit.num_circuits(); + let augmented_circuit_params_primary = + SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + + // ro_consts_circuit_primary are parameterized by E2 because the type alias uses + // E2::Base = E1::Scalar + let ro_consts_circuit_primary: ROConstantsCircuit> = + ROConstantsCircuit::>::default(); + + (0..num_circuits) + .map(|i| { + let c_primary = non_uniform_circuit.primary_circuit(i); + let F_arity = c_primary.arity(); + // Initialize ck for the primary + let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, NC::C1> = + SuperNovaAugmentedCircuit::new( + &augmented_circuit_params_primary, + None, + &c_primary, + ro_consts_circuit_primary.clone(), + num_circuits, + ); + let mut cs: ShapeCS = ShapeCS::new(); + circuit_primary.synthesize(&mut cs).expect("circuit synthesis failed"); + + // We use the largest commitment_key for all instances + let r1cs_shape_primary = cs.r1cs_shape(); + R1CSWithArity::new(r1cs_shape_primary, F_arity) + }) + .collect::>() +} + +/// A resource buffer for SuperNova's [`RecursiveSNARK`] for storing scratch +/// values that are computed by `prove_step`, which allows the reuse of memory +/// allocations and avoids unnecessary new allocations in the critical section. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +struct ResourceBuffer { + l_w: Option>, + l_u: Option>, + + ABC_Z_1: R1CSResult, + ABC_Z_2: R1CSResult, + + /// buffer for `commit_T` + T: Vec, +} + +/// A SNARK that proves the correct execution of an non-uniform incremental +/// computation +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RecursiveSNARK +where E1: CurveCycleEquipped { + // Cached digest of the public parameters + pp_digest: E1::Scalar, + num_augmented_circuits: usize, + + // Number of iterations performed up to now + i: usize, + + // Inputs and outputs of the primary circuits + z0_primary: Vec, + zi_primary: Vec, + + // Proven circuit index, and current program counter + proven_circuit_index: usize, + program_counter: E1::Scalar, + + /// Buffer for memory needed by the primary fold-step + buffer_primary: ResourceBuffer, + /// Buffer for memory needed by the secondary fold-step + buffer_secondary: ResourceBuffer>, + + // Relaxed instances for the primary circuits + // Entries are `None` if the circuit has not been executed yet + r_W_primary: Vec>>, + r_U_primary: Vec>>, + + // Inputs and outputs of the secondary circuit + z0_secondary: Vec< as Engine>::Scalar>, + zi_secondary: Vec< as Engine>::Scalar>, + // Relaxed instance for the secondary circuit + r_W_secondary: RelaxedR1CSWitness>, + r_U_secondary: RelaxedR1CSInstance>, + // Proof for the secondary circuit to be accumulated into r_secondary in the next iteration + l_w_secondary: R1CSWitness>, + l_u_secondary: R1CSInstance>, +} + +impl RecursiveSNARK +where E1: CurveCycleEquipped +{ + /// iterate base step to get new instance of recursive SNARK + #[allow(clippy::too_many_arguments)] + pub fn new>( + pp: &PublicParams, + non_uniform_circuit: &C0, + c_primary: &C0::C1, + c_secondary: &C0::C2, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result { + let num_augmented_circuits = non_uniform_circuit.num_circuits(); + let circuit_index = non_uniform_circuit.initial_circuit_index(); + + let r1cs_secondary = &pp.circuit_shape_secondary.r1cs_shape; + + // check the length of the secondary initial input + if z0_secondary.len() != pp.circuit_shape_secondary.F_arity { + return Err(SuperNovaError::NovaError(NovaError::InvalidStepOutputLength)); + } + + // check the arity of all the primary circuits match the initial input length + // pp.circuit_shapes.iter().try_for_each(|circuit| { + // if circuit.F_arity != z0_primary.len() { + // return Err(SuperNovaError::NovaError( + // NovaError::InvalidStepOutputLength, + // )); + // } + // Ok(()) + // })?; + + // base case for the primary + let mut cs_primary = SatisfyingAssignment::::new(); + let program_counter = E1::Scalar::from(circuit_index as u64); + let inputs_primary: SuperNovaAugmentedCircuitInputs<'_, Dual> = + SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + E1::Scalar::ZERO, + z0_primary, + None, // zi = None for basecase + None, // U = [None], since no previous proofs have been computed + None, // u = None since we are not verifying a secondary circuit + None, // T = None since there is not proof to fold + Some(program_counter), // pc = initial_program_counter for primary circuit + E1::Scalar::ZERO, // u_index is always zero for the primary circuit + ); + + let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C0::C1> = + SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, + Some(inputs_primary), + c_primary, + pp.ro_consts_circuit_primary.clone(), + num_augmented_circuits, + ); + + let (zi_primary_pc_next, zi_primary) = + circuit_primary.synthesize(&mut cs_primary).map_err(|err| { + debug!("err {:?}", err); + NovaError::from(err) + })?; + if zi_primary.len() != pp[circuit_index].F_arity { + return Err(SuperNovaError::NovaError(NovaError::InvalidStepOutputLength)); + } + let (u_primary, w_primary) = cs_primary + .r1cs_instance_and_witness(&pp[circuit_index].r1cs_shape, &pp.ck_primary) + .map_err(|err| { + debug!("err {:?}", err); + err + })?; + + // base case for the secondary + let mut cs_secondary = SatisfyingAssignment::>::new(); + let u_primary_index = as Engine>::Scalar::from(circuit_index as u64); + let inputs_secondary: SuperNovaAugmentedCircuitInputs<'_, E1> = + SuperNovaAugmentedCircuitInputs::new( + pp.digest(), + as Engine>::Scalar::ZERO, + z0_secondary, + None, // zi = None for basecase + None, // U = Empty list of accumulators for the primary circuits + Some(&u_primary), // Proof for first iteration of current primary circuit + None, // T = None, since we just copy u_primary rather than fold it + None, // program_counter is always None for secondary circuit + u_primary_index, // index of the circuit proof u_primary + ); + + let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C0::C2> = + SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, + Some(inputs_secondary), + c_secondary, + pp.ro_consts_circuit_secondary.clone(), + num_augmented_circuits, + ); + let (_, zi_secondary) = + circuit_secondary.synthesize(&mut cs_secondary).map_err(NovaError::from)?; + if zi_secondary.len() != pp.circuit_shape_secondary.F_arity { + return Err(NovaError::InvalidStepOutputLength.into()); + } + let (u_secondary, w_secondary) = cs_secondary + .r1cs_instance_and_witness(r1cs_secondary, &pp.ck_secondary) + .map_err(|_| SuperNovaError::NovaError(NovaError::UnSat))?; + + // IVC proof for the primary circuit + let l_w_primary = w_primary; + let l_u_primary = u_primary; + let r_W_primary = + RelaxedR1CSWitness::from_r1cs_witness(&pp[circuit_index].r1cs_shape, l_w_primary); + + let r_U_primary = RelaxedR1CSInstance::from_r1cs_instance( + &*pp.ck_primary, + &pp[circuit_index].r1cs_shape, + l_u_primary, + ); + + // IVC proof of the secondary circuit + let l_w_secondary = w_secondary; + let l_u_secondary = u_secondary; + + // Initialize relaxed instance/witness pair for the secondary circuit proofs + let r_W_secondary = RelaxedR1CSWitness::>::default(r1cs_secondary); + let r_U_secondary = RelaxedR1CSInstance::default(&*pp.ck_secondary, r1cs_secondary); + + // Outputs of the two circuits and next program counter thus far. + let zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(NovaError::from(SynthesisError::AssignmentMissing).into())) + .collect::::Scalar>, SuperNovaError>>()?; + let zi_primary_pc_next = + zi_primary_pc_next + .expect("zi_primary_pc_next missing") + .get_value() + .ok_or::(NovaError::from(SynthesisError::AssignmentMissing).into())?; + let zi_secondary = zi_secondary + .iter() + .map(|v| v.get_value().ok_or(NovaError::from(SynthesisError::AssignmentMissing).into())) + .collect:: as Engine>::Scalar>, SuperNovaError>>()?; + + // handle the base case by initialize U_next in next round + let r_W_primary_initial_list = (0..num_augmented_circuits) + .map(|i| (i == circuit_index).then(|| r_W_primary.clone())) + .collect::>>>(); + + let r_U_primary_initial_list = (0..num_augmented_circuits) + .map(|i| (i == circuit_index).then(|| r_U_primary.clone())) + .collect::>>>(); + + // find the largest length r1cs shape for the buffer size + let max_num_cons = + pp.circuit_shapes.iter().map(|circuit| circuit.r1cs_shape.num_cons).max().unwrap(); + + let buffer_primary = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(max_num_cons), + ABC_Z_2: R1CSResult::default(max_num_cons), + T: r1cs::default_T::(max_num_cons), + }; + + let buffer_secondary = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(r1cs_secondary.num_cons), + ABC_Z_2: R1CSResult::default(r1cs_secondary.num_cons), + T: r1cs::default_T::>(r1cs_secondary.num_cons), + }; + + Ok(Self { + pp_digest: pp.digest(), + num_augmented_circuits, + i: 0_usize, // after base case, next iteration start from 1 + z0_primary: z0_primary.to_vec(), + zi_primary, + + proven_circuit_index: circuit_index, + program_counter: zi_primary_pc_next, + + buffer_primary, + buffer_secondary, + + r_W_primary: r_W_primary_initial_list, + r_U_primary: r_U_primary_initial_list, + z0_secondary: z0_secondary.to_vec(), + zi_secondary, + r_W_secondary, + r_U_secondary, + l_w_secondary, + l_u_secondary, + }) + } + + /// Inputs of the primary circuits + pub fn z0_primary(&self) -> &Vec { &self.z0_primary } + + /// Outputs of the primary circuits + pub fn zi_primary(&self) -> &Vec { &self.zi_primary } + + /// executing a step of the incremental computation + #[allow(clippy::too_many_arguments)] + #[tracing::instrument(skip_all, name = "supernova::RecursiveSNARK::prove_step")] + pub fn prove_step, C2: StepCircuit< as Engine>::Scalar>>( + &mut self, + pp: &PublicParams, + c_primary: &C1, + c_secondary: &C2, + ) -> Result<(), SuperNovaError> { + // First step was already done in the constructor + if self.i == 0 { + self.i = 1; + return Ok(()); + } + + // save the inputs before proceeding to the `i+1`th step + let r_U_primary_i = self.r_U_primary.clone(); + // Create single-entry accumulator list for the secondary circuit to hand to + // SuperNovaAugmentedCircuitInputs + let r_U_secondary_i = vec![Some(self.r_U_secondary.clone())]; + let l_u_secondary_i = self.l_u_secondary.clone(); + + let circuit_index = c_primary.circuit_index(); + assert_eq!(self.program_counter, E1::Scalar::from(circuit_index as u64)); + + // fold the secondary circuit's instance + let (nifs_secondary, _) = NIFS::prove_mut( + &*pp.ck_secondary, + &pp.ro_consts_secondary, + &scalar_as_base::(self.pp_digest), + &pp.circuit_shape_secondary.r1cs_shape, + &mut self.r_U_secondary, + &mut self.r_W_secondary, + &self.l_u_secondary, + &self.l_w_secondary, + &mut self.buffer_secondary.T, + &mut self.buffer_secondary.ABC_Z_1, + &mut self.buffer_secondary.ABC_Z_2, + ) + .map_err(SuperNovaError::NovaError)?; + + let mut cs_primary = SatisfyingAssignment::::with_capacity( + pp[circuit_index].r1cs_shape.num_io + 1, + pp[circuit_index].r1cs_shape.num_vars, + ); + let T = Commitment::>::decompress(&nifs_secondary.comm_T) + .map_err(SuperNovaError::NovaError)?; + let inputs_primary: SuperNovaAugmentedCircuitInputs<'_, Dual> = + SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::(self.pp_digest), + E1::Scalar::from(self.i as u64), + &self.z0_primary, + Some(&self.zi_primary), + Some(&r_U_secondary_i), + Some(&l_u_secondary_i), + Some(&T), + Some(self.program_counter), + E1::Scalar::ZERO, + ); + + let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C1> = + SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, + Some(inputs_primary), + c_primary, + pp.ro_consts_circuit_primary.clone(), + self.num_augmented_circuits, + ); + + let (zi_primary_pc_next, zi_primary) = + circuit_primary.synthesize(&mut cs_primary).map_err(NovaError::from)?; + if zi_primary.len() != pp[circuit_index].F_arity { + return Err(SuperNovaError::NovaError(NovaError::InvalidInitialInputLength)); + } + + let (l_u_primary, l_w_primary) = cs_primary + .r1cs_instance_and_witness(&pp[circuit_index].r1cs_shape, &pp.ck_primary) + .map_err(SuperNovaError::NovaError)?; + + let (r_U_primary, r_W_primary) = if let (Some(Some(r_U_primary)), Some(Some(r_W_primary))) = + (self.r_U_primary.get_mut(circuit_index), self.r_W_primary.get_mut(circuit_index)) + { + (r_U_primary, r_W_primary) + } else { + self.r_U_primary[circuit_index] = + Some(RelaxedR1CSInstance::default(&*pp.ck_primary, &pp[circuit_index].r1cs_shape)); + self.r_W_primary[circuit_index] = + Some(RelaxedR1CSWitness::default(&pp[circuit_index].r1cs_shape)); + ( + self.r_U_primary[circuit_index].as_mut().unwrap(), + self.r_W_primary[circuit_index].as_mut().unwrap(), + ) + }; + + let (nifs_primary, _) = NIFS::prove_mut( + &*pp.ck_primary, + &pp.ro_consts_primary, + &self.pp_digest, + &pp[circuit_index].r1cs_shape, + r_U_primary, + r_W_primary, + &l_u_primary, + &l_w_primary, + &mut self.buffer_primary.T, + &mut self.buffer_primary.ABC_Z_1, + &mut self.buffer_primary.ABC_Z_2, + ) + .map_err(SuperNovaError::NovaError)?; + + let mut cs_secondary = SatisfyingAssignment::>::with_capacity( + pp.circuit_shape_secondary.r1cs_shape.num_io + 1, + pp.circuit_shape_secondary.r1cs_shape.num_vars, + ); + let binding = + Commitment::::decompress(&nifs_primary.comm_T).map_err(SuperNovaError::NovaError)?; + let inputs_secondary: SuperNovaAugmentedCircuitInputs<'_, E1> = + SuperNovaAugmentedCircuitInputs::new( + self.pp_digest, + as Engine>::Scalar::from(self.i as u64), + &self.z0_secondary, + Some(&self.zi_secondary), + Some(&r_U_primary_i), + Some(&l_u_primary), + Some(&binding), + None, // pc is always None for secondary circuit + as Engine>::Scalar::from(circuit_index as u64), + ); + + let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C2> = SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, + Some(inputs_secondary), + c_secondary, + pp.ro_consts_circuit_secondary.clone(), + self.num_augmented_circuits, + ); + let (_, zi_secondary) = + circuit_secondary.synthesize(&mut cs_secondary).map_err(NovaError::from)?; + if zi_secondary.len() != pp.circuit_shape_secondary.F_arity { + return Err(SuperNovaError::NovaError(NovaError::InvalidInitialInputLength)); + } + + let (l_u_secondary_next, l_w_secondary_next) = cs_secondary + .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary)?; + + // update the running instances and witnesses + let zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(NovaError::from(SynthesisError::AssignmentMissing).into())) + .collect::::Scalar>, SuperNovaError>>()?; + let zi_primary_pc_next = + zi_primary_pc_next + .expect("zi_primary_pc_next missing") + .get_value() + .ok_or::(NovaError::from(SynthesisError::AssignmentMissing).into())?; + let zi_secondary = zi_secondary + .iter() + .map(|v| v.get_value().ok_or(NovaError::from(SynthesisError::AssignmentMissing).into())) + .collect:: as Engine>::Scalar>, SuperNovaError>>()?; + + if zi_primary.len() != pp[circuit_index].F_arity + || zi_secondary.len() != pp.circuit_shape_secondary.F_arity + { + return Err(SuperNovaError::NovaError(NovaError::InvalidStepOutputLength)); + } + + self.l_w_secondary = l_w_secondary_next; + self.l_u_secondary = l_u_secondary_next; + self.i += 1; + self.zi_primary = zi_primary; + self.zi_secondary = zi_secondary; + self.proven_circuit_index = circuit_index; + self.program_counter = zi_primary_pc_next; + Ok(()) + } + + /// verify recursive snark + pub fn verify( + &self, + pp: &PublicParams, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result<(Vec, Vec< as Engine>::Scalar>), SuperNovaError> { + // number of steps cannot be zero + if self.i == 0 { + debug!("must verify on valid RecursiveSNARK where i > 0"); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + + // Check lengths of r_primary + if self.r_U_primary.len() != self.num_augmented_circuits + || self.r_W_primary.len() != self.num_augmented_circuits + { + debug!("r_primary length mismatch"); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + + // Check that there are no missing instance/witness pairs + self.r_U_primary.iter().zip_eq(self.r_W_primary.iter()).enumerate().try_for_each( + |(i, (u, w))| match (u, w) { + (Some(_), Some(_)) | (None, None) => Ok(()), + _ => { + debug!("r_primary[{:?}]: mismatched instance/witness pair", i); + Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)) + }, + }, + )?; + + let circuit_index = self.proven_circuit_index; + + // check we have an instance/witness pair for the circuit_index + if self.r_U_primary[circuit_index].is_none() { + debug!("r_primary[{:?}]: instance/witness pair is missing", circuit_index); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + + // check the (relaxed) R1CS instances public outputs. + { + for (i, r_U_primary_i) in self.r_U_primary.iter().enumerate() { + if let Some(u) = r_U_primary_i { + if u.X.len() != 2 { + debug!("r_U_primary[{:?}] got instance length {:?} != 2", i, u.X.len(),); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + } + } + + if self.l_u_secondary.X.len() != 2 { + debug!("l_U_secondary got instance length {:?} != 2", self.l_u_secondary.X.len(),); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + + if self.r_U_secondary.X.len() != 2 { + debug!("r_U_secondary got instance length {:?} != 2", self.r_U_secondary.X.len(),); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + } + + let hash_primary = { + let num_absorbs = num_ro_inputs( + self.num_augmented_circuits, + pp.augmented_circuit_params_primary.get_n_limbs(), + pp[circuit_index].F_arity, + true, // is_primary + ); + + let mut hasher = as Engine>::RO::new(pp.ro_consts_secondary.clone(), num_absorbs); + hasher.absorb(self.pp_digest); + hasher.absorb(E1::Scalar::from(self.i as u64)); + hasher.absorb(self.program_counter); + + for e in z0_primary { + hasher.absorb(*e); + } + for e in &self.zi_primary { + hasher.absorb(*e); + } + + self.r_U_secondary.absorb_in_ro(&mut hasher); + hasher.squeeze(NUM_HASH_BITS) + }; + + let hash_secondary = { + let num_absorbs = num_ro_inputs( + self.num_augmented_circuits, + pp.augmented_circuit_params_secondary.get_n_limbs(), + pp.circuit_shape_secondary.F_arity, + false, // is_primary + ); + let mut hasher = ::RO::new(pp.ro_consts_primary.clone(), num_absorbs); + hasher.absorb(scalar_as_base::(self.pp_digest)); + hasher.absorb( as Engine>::Scalar::from(self.i as u64)); + + for e in z0_secondary { + hasher.absorb(*e); + } + for e in &self.zi_secondary { + hasher.absorb(*e); + } + + self.r_U_primary.iter().enumerate().for_each(|(i, U)| { + U.as_ref() + .unwrap_or(&RelaxedR1CSInstance::default(&*pp.ck_primary, &pp[i].r1cs_shape)) + .absorb_in_ro(&mut hasher); + }); + hasher.squeeze(NUM_HASH_BITS) + }; + + if hash_primary != self.l_u_secondary.X[0] { + debug!( + "hash_primary {:?} not equal l_u_secondary.X[0] {:?}", + hash_primary, self.l_u_secondary.X[0] + ); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + if hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) { + debug!( + "hash_secondary {:?} not equal l_u_secondary.X[1] {:?}", + hash_secondary, self.l_u_secondary.X[1] + ); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + + // check the satisfiability of all instance/witness pairs + let (res_r_primary, (res_r_secondary, res_l_secondary)) = rayon::join( + || { + self.r_U_primary.par_iter().zip_eq(self.r_W_primary.par_iter()).enumerate().try_for_each( + |(i, (u, w))| { + if let (Some(u), Some(w)) = (u, w) { + pp[i].r1cs_shape.is_sat_relaxed(&pp.ck_primary, u, w)? + } + Ok(()) + }, + ) + }, + || { + rayon::join( + || { + pp.circuit_shape_secondary.r1cs_shape.is_sat_relaxed( + &pp.ck_secondary, + &self.r_U_secondary, + &self.r_W_secondary, + ) + }, + || { + pp.circuit_shape_secondary.r1cs_shape.is_sat( + &pp.ck_secondary, + &self.l_u_secondary, + &self.l_w_secondary, + ) + }, + ) + }, + ); + + res_r_primary.map_err(|err| match err { + NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("r_primary", i), + e => SuperNovaError::NovaError(e), + })?; + res_r_secondary.map_err(|err| match err { + NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("r_secondary", i), + e => SuperNovaError::NovaError(e), + })?; + res_l_secondary.map_err(|err| match err { + NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("l_secondary", i), + e => SuperNovaError::NovaError(e), + })?; + + Ok((self.zi_primary.clone(), self.zi_secondary.clone())) + } +} + +/// SuperNova helper trait, for implementors that provide sets of sub-circuits +/// to be proved via NIVC. `C1` must be a type (likely an `Enum`) for which a +/// potentially-distinct instance can be supplied for each `index` below +/// `self.num_circuits()`. +pub trait NonUniformCircuit +where E1: CurveCycleEquipped { + /// The type of the step-circuits on the primary + type C1: StepCircuit; + /// The type of the step-circuits on the secondary + type C2: StepCircuit< as Engine>::Scalar>; + + /// Initial circuit index, defaults to zero. + fn initial_circuit_index(&self) -> usize { 0 } + + /// How many circuits are provided? + fn num_circuits(&self) -> usize; + + /// Return a new instance of the primary circuit at `index`. + fn primary_circuit(&self, circuit_index: usize) -> Self::C1; + + /// Return a new instance of the secondary circuit. + fn secondary_circuit(&self) -> Self::C2; +} + +/// Compute the circuit digest of a supernova [`StepCircuit`]. +/// +/// Note for callers: This function should be called with its performance +/// characteristics in mind. It will synthesize and digest the full `circuit` +/// given. +pub fn circuit_digest>( + circuit: &C, + num_augmented_circuits: usize, +) -> E1::Scalar { + let augmented_circuit_params = + SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + + // ro_consts_circuit are parameterized by E2 because the type alias uses + // E2::Base = E1::Scalar + let ro_consts_circuit = ROConstantsCircuit::>::default(); + + // Initialize ck for the primary + let augmented_circuit: SuperNovaAugmentedCircuit<'_, Dual, C> = + SuperNovaAugmentedCircuit::new( + &augmented_circuit_params, + None, + circuit, + ro_consts_circuit, + num_augmented_circuits, + ); + let mut cs: ShapeCS = ShapeCS::new(); + let _ = augmented_circuit.synthesize(&mut cs); + + let F_arity = circuit.arity(); + let circuit_params = R1CSWithArity::new(cs.r1cs_shape(), F_arity); + circuit_params.digest() +} + +/// Compute the number of absorbs for the random-oracle computing the circuit +/// output X = H(vk, i, pc, z0, zi, U) +fn num_ro_inputs(num_circuits: usize, num_limbs: usize, arity: usize, is_primary: bool) -> usize { + let num_circuits = if is_primary { 1 } else { num_circuits }; + + // [W(x,y,∞), E(x,y,∞), u] + [X0, X1] * #num_limb + let instance_size = 3 + 3 + 1 + 2 * num_limbs; + + 2 // params, i + + usize::from(is_primary) // optional program counter + + 2 * arity // z0, zi + + num_circuits * instance_size +} + +pub mod error; +pub mod snark; +mod utils; + +#[cfg(test)] mod test; diff --git a/prover/src/supernova/snark.rs b/prover/src/supernova/snark.rs new file mode 100644 index 0000000..cb2645b --- /dev/null +++ b/prover/src/supernova/snark.rs @@ -0,0 +1,569 @@ +//! This module defines a final compressing SNARK for supernova proofs + +use ff::PrimeField; +use serde::{Deserialize, Serialize}; + +use super::{error::SuperNovaError, PublicParams, RecursiveSNARK}; +use crate::{ + constants::NUM_HASH_BITS, + errors::NovaError, + r1cs::{R1CSInstance, RelaxedR1CSWitness}, + scalar_as_base, + traits::{ + snark::{BatchedRelaxedR1CSSNARKTrait, RelaxedR1CSSNARKTrait}, + AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROTrait, + }, + RelaxedR1CSInstance, NIFS, +}; + +/// A type that holds the prover key for `CompressedSNARK` +#[derive(Debug, Serialize, Deserialize)] +pub struct ProverKey +where + E1: CurveCycleEquipped, + S1: BatchedRelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + pub pk_primary: S1::ProverKey, + pub pk_secondary: S2::ProverKey, +} + +/// A type that holds the verifier key for `CompressedSNARK` +#[derive(Debug, Serialize, Deserialize)] +pub struct VerifierKey +where + E1: CurveCycleEquipped, + S1: BatchedRelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + pub vk_primary: S1::VerifierKey, + pub vk_secondary: S2::VerifierKey, +} + +/// A SNARK that proves the knowledge of a valid `RecursiveSNARK` +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CompressedSNARK +where + E1: CurveCycleEquipped, + S1: BatchedRelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + r_U_primary: Vec>, + r_W_snark_primary: S1, + + r_U_secondary: RelaxedR1CSInstance>, + l_u_secondary: R1CSInstance>, + nifs_secondary: NIFS>, + f_W_snark_secondary: S2, + + num_steps: usize, + program_counter: E1::Scalar, + + zn_primary: Vec, + zn_secondary: Vec< as Engine>::Scalar>, +} + +impl CompressedSNARK +where + E1: CurveCycleEquipped, + S1: BatchedRelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, +{ + pub fn initialize_pk( + pp: &PublicParams, + primary_vk_digest: E1::Scalar, + secondary_vk_digest: as Engine>::Scalar, + ) -> Result, SuperNovaError> { + // TODO: Should we actually clone here? + let pk_primary = S1::initialize_pk(pp.ck_primary.clone(), primary_vk_digest)?; + let pk_secondary = S2::initialize_pk(pp.ck_secondary.clone(), secondary_vk_digest)?; + + return Ok(ProverKey { pk_primary, pk_secondary }); + } + + /// Creates prover and verifier keys for `CompressedSNARK` + pub fn setup( + pp: &PublicParams, + ) -> Result<(ProverKey, VerifierKey), SuperNovaError> { + let (pk_primary, vk_primary) = S1::setup(pp.ck_primary.clone(), pp.primary_r1cs_shapes())?; + + let (pk_secondary, vk_secondary) = + S2::setup(pp.ck_secondary.clone(), &pp.circuit_shape_secondary.r1cs_shape)?; + + let prover_key = ProverKey { pk_primary, pk_secondary }; + let verifier_key = VerifierKey { vk_primary, vk_secondary }; + + Ok((prover_key, verifier_key)) + } + + /// Create a new `CompressedSNARK` + pub fn prove( + pp: &PublicParams, + pk: &ProverKey, + recursive_snark: &RecursiveSNARK, + ) -> Result { + // fold the secondary circuit's instance + let res_secondary = NIFS::prove( + &*pp.ck_secondary, + &pp.ro_consts_secondary, + &scalar_as_base::(pp.digest()), + &pp.circuit_shape_secondary.r1cs_shape, + &recursive_snark.r_U_secondary, + &recursive_snark.r_W_secondary, + &recursive_snark.l_u_secondary, + &recursive_snark.l_w_secondary, + ); + + let (nifs_secondary, (f_U_secondary, f_W_secondary), _) = res_secondary?; + + // Prepare the list of primary Relaxed R1CS instances (a default instance is + // provided for uninitialized circuits) + let r_U_primary = recursive_snark + .r_U_primary + .iter() + .enumerate() + .map(|(idx, r_U)| { + r_U + .clone() + .unwrap_or_else(|| RelaxedR1CSInstance::default(&*pp.ck_primary, &pp[idx].r1cs_shape)) + }) + .collect::>(); + + // Prepare the list of primary relaxed R1CS witnesses (a default witness is + // provided for uninitialized circuits) + let r_W_primary: Vec> = recursive_snark + .r_W_primary + .iter() + .enumerate() + .map(|(idx, r_W)| { + r_W.clone().unwrap_or_else(|| RelaxedR1CSWitness::default(&pp[idx].r1cs_shape)) + }) + .collect::>(); + + // Generate a primary SNARK proof for the list of primary circuits + let r_W_snark_primary = S1::prove( + &pp.ck_primary, + &pk.pk_primary, + pp.primary_r1cs_shapes(), + &r_U_primary, + &r_W_primary, + )?; + + // Generate a secondary SNARK proof for the secondary circuit + let f_W_snark_secondary = S2::prove( + &pp.ck_secondary, + &pk.pk_secondary, + &pp.circuit_shape_secondary.r1cs_shape, + &f_U_secondary, + &f_W_secondary, + )?; + + let compressed_snark = Self { + r_U_primary, + r_W_snark_primary, + + r_U_secondary: recursive_snark.r_U_secondary.clone(), + l_u_secondary: recursive_snark.l_u_secondary.clone(), + nifs_secondary, + f_W_snark_secondary, + + num_steps: recursive_snark.i, + program_counter: recursive_snark.program_counter, + + zn_primary: recursive_snark.zi_primary.clone(), + zn_secondary: recursive_snark.zi_secondary.clone(), + }; + + Ok(compressed_snark) + } + + /// Verify the correctness of the `CompressedSNARK` + pub fn verify( + &self, + pp: &PublicParams, + vk: &VerifierKey, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result<(Vec, Vec< as Engine>::Scalar>), SuperNovaError> { + let last_circuit_idx = field_as_usize(self.program_counter); + + let num_field_primary_ro = 3 // params_next, i_new, program_counter_new + + 2 * pp[last_circuit_idx].F_arity // zo, z1 + + (7 + 2 * pp.augmented_circuit_params_primary.get_n_limbs()); // # 1 * (7 + [X0, X1]*#num_limb) + + // secondary circuit + // NOTE: This count ensure the number of witnesses sent by the prover must equal + // the number of NIVC circuits + let num_field_secondary_ro = 2 // params_next, i_new + + 2 * pp.circuit_shape_secondary.F_arity // zo, z1 + + pp.circuit_shapes.len() * (7 + 2 * pp.augmented_circuit_params_primary.get_n_limbs()); // #num_augment + + // Compute the primary and secondary hashes given the digest, program counter, + // instances, and witnesses provided by the prover + let (hash_primary, hash_secondary) = { + let mut hasher = + as Engine>::RO::new(pp.ro_consts_secondary.clone(), num_field_primary_ro); + + hasher.absorb(pp.digest()); + hasher.absorb(E1::Scalar::from(self.num_steps as u64)); + hasher.absorb(self.program_counter); + + for e in z0_primary { + hasher.absorb(*e); + } + + for e in &self.zn_primary { + hasher.absorb(*e); + } + + self.r_U_secondary.absorb_in_ro(&mut hasher); + + let mut hasher2 = + ::RO::new(pp.ro_consts_primary.clone(), num_field_secondary_ro); + + hasher2.absorb(scalar_as_base::(pp.digest())); + hasher2.absorb( as Engine>::Scalar::from(self.num_steps as u64)); + + for e in z0_secondary { + hasher2.absorb(*e); + } + + for e in &self.zn_secondary { + hasher2.absorb(*e); + } + + self.r_U_primary.iter().for_each(|U| { + U.absorb_in_ro(&mut hasher2); + }); + + (hasher.squeeze(NUM_HASH_BITS), hasher2.squeeze(NUM_HASH_BITS)) + }; + + // Compare the computed hashes with the public IO of the last invocation of + // `prove_step` + if hash_primary != self.l_u_secondary.X[0] { + return Err(NovaError::ProofVerifyError.into()); + } + + if hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) { + return Err(NovaError::ProofVerifyError.into()); + } + + // Verify the primary SNARK + let res_primary = self.r_W_snark_primary.verify(&vk.vk_primary, &self.r_U_primary); + + // Fold the secondary circuit's instance + let f_U_secondary = self.nifs_secondary.verify( + &pp.ro_consts_secondary, + &scalar_as_base::(pp.digest()), + &self.r_U_secondary, + &self.l_u_secondary, + )?; + + // Verify the secondary SNARK + let res_secondary = self.f_W_snark_secondary.verify(&vk.vk_secondary, &f_U_secondary); + + res_primary?; + + res_secondary?; + + Ok((self.zn_primary.clone(), self.zn_secondary.clone())) + } +} + +fn field_as_usize(x: F) -> usize { + u32::from_le_bytes(x.to_repr().as_ref()[0..4].try_into().unwrap()) as usize +} + +#[cfg(test)] +mod test { + use std::marker::PhantomData; + + use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; + use ff::Field; + + use super::*; + use crate::{ + provider::{ipa_pc, Bn256EngineIPA}, + spartan::{batched, batched_ppsnark, snark::RelaxedR1CSSNARK}, + supernova::{circuit::TrivialCircuit, NonUniformCircuit, StepCircuit}, + }; + + type EE = ipa_pc::EvaluationEngine; + type S1 = batched::BatchedRelaxedR1CSSNARK>; + type S1PP = batched_ppsnark::BatchedRelaxedR1CSSNARK>; + type S2 = RelaxedR1CSSNARK>; + + #[derive(Clone)] + struct SquareCircuit { + _p: PhantomData, + } + + impl StepCircuit for SquareCircuit { + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { 0 } + + fn synthesize>( + &self, + cs: &mut CS, + _pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> + { + let z_i = &z[0]; + + let z_next = z_i.square(cs.namespace(|| "z_i^2"))?; + + let next_pc = AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(1u64)))?; + + cs.enforce( + || "next_pc = 1", + |lc| lc + CS::one(), + |lc| lc + next_pc.get_variable(), + |lc| lc + CS::one(), + ); + + Ok((Some(next_pc), vec![z_next])) + } + } + + #[derive(Clone)] + struct CubeCircuit { + _p: PhantomData, + } + + impl StepCircuit for CubeCircuit { + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { 1 } + + fn synthesize>( + &self, + cs: &mut CS, + _pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> + { + let z_i = &z[0]; + + let z_sq = z_i.square(cs.namespace(|| "z_i^2"))?; + let z_cu = z_sq.mul(cs.namespace(|| "z_i^3"), z_i)?; + + let next_pc = AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(0u64)))?; + + cs.enforce(|| "next_pc = 0", |lc| lc + CS::one(), |lc| lc + next_pc.get_variable(), |lc| lc); + + Ok((Some(next_pc), vec![z_cu])) + } + } + + #[derive(Clone)] + enum TestCircuit { + Square(SquareCircuit), + Cube(CubeCircuit), + } + + impl TestCircuit { + fn new(num_steps: usize) -> Vec { + let mut circuits = Vec::new(); + + for idx in 0..num_steps { + if idx % 2 == 0 { + circuits.push(Self::Square(SquareCircuit { _p: PhantomData })) + } else { + circuits.push(Self::Cube(CubeCircuit { _p: PhantomData })) + } + } + + circuits + } + } + + impl StepCircuit for TestCircuit { + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { + match self { + Self::Square(c) => c.circuit_index(), + Self::Cube(c) => c.circuit_index(), + } + } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> + { + match self { + Self::Square(c) => c.synthesize(cs, pc, z), + Self::Cube(c) => c.synthesize(cs, pc, z), + } + } + } + + impl NonUniformCircuit for TestCircuit { + type C1 = Self; + type C2 = TrivialCircuit< as Engine>::Scalar>; + + fn num_circuits(&self) -> usize { 2 } + + fn primary_circuit(&self, circuit_index: usize) -> Self { + match circuit_index { + 0 => Self::Square(SquareCircuit { _p: PhantomData }), + 1 => Self::Cube(CubeCircuit { _p: PhantomData }), + _ => panic!("Invalid circuit index"), + } + } + + fn secondary_circuit(&self) -> Self::C2 { Default::default() } + } + + #[derive(Clone)] + struct BigPowerCircuit { + _p: PhantomData, + } + + impl StepCircuit for BigPowerCircuit { + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { 1 } + + fn synthesize>( + &self, + cs: &mut CS, + _pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> + { + let mut x = z[0].clone(); + let mut y = x.clone(); + for i in 0..10_000 { + y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; + x = y.clone(); + } + + let next_pc = AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(0u64)))?; + + cs.enforce(|| "next_pc = 0", |lc| lc + CS::one(), |lc| lc + next_pc.get_variable(), |lc| lc); + + Ok((Some(next_pc), vec![y])) + } + } + + #[derive(Clone)] + enum BigTestCircuit { + Square(SquareCircuit), + BigPower(BigPowerCircuit), + } + + impl BigTestCircuit { + fn new(num_steps: usize) -> Vec { + let mut circuits = Vec::new(); + + for idx in 0..num_steps { + if idx % 2 == 0 { + circuits.push(Self::Square(SquareCircuit { _p: PhantomData })) + } else { + circuits.push(Self::BigPower(BigPowerCircuit { _p: PhantomData })) + } + } + + circuits + } + } + + impl StepCircuit for BigTestCircuit { + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { + match self { + Self::Square(c) => c.circuit_index(), + Self::BigPower(c) => c.circuit_index(), + } + } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> + { + match self { + Self::Square(c) => c.synthesize(cs, pc, z), + Self::BigPower(c) => c.synthesize(cs, pc, z), + } + } + } + + impl NonUniformCircuit for BigTestCircuit { + type C1 = Self; + type C2 = TrivialCircuit< as Engine>::Scalar>; + + fn num_circuits(&self) -> usize { 2 } + + fn primary_circuit(&self, circuit_index: usize) -> Self { + match circuit_index { + 0 => Self::Square(SquareCircuit { _p: PhantomData }), + 1 => Self::BigPower(BigPowerCircuit { _p: PhantomData }), + _ => panic!("Invalid circuit index"), + } + } + + fn secondary_circuit(&self) -> Self::C2 { Default::default() } + } + + fn test_compression_with(num_steps: usize, circuits_factory: F) + where + E1: CurveCycleEquipped, + S1: BatchedRelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, + C: NonUniformCircuit as Engine>::Scalar>> + + StepCircuit, + F: Fn(usize) -> Vec, { + let secondary_circuit = TrivialCircuit::default(); + let test_circuits = circuits_factory(num_steps); + + let pp = PublicParams::setup(&test_circuits[0], &*S1::ck_floor(), &*S2::ck_floor()); + + let z0_primary = vec![E1::Scalar::from(17u64)]; + let z0_secondary = vec![ as Engine>::Scalar::ZERO]; + + let mut recursive_snark = RecursiveSNARK::new( + &pp, + &test_circuits[0], + &test_circuits[0], + &secondary_circuit, + &z0_primary, + &z0_secondary, + ) + .unwrap(); + + for circuit in test_circuits.iter().take(num_steps) { + recursive_snark.prove_step(&pp, circuit, &secondary_circuit).unwrap(); + + recursive_snark.verify(&pp, &z0_primary, &z0_secondary).unwrap(); + } + + let (prover_key, verifier_key) = CompressedSNARK::<_, S1, S2>::setup(&pp).unwrap(); + + let compressed_snark = CompressedSNARK::prove(&pp, &prover_key, &recursive_snark).unwrap(); + + compressed_snark.verify(&pp, &verifier_key, &z0_primary, &z0_secondary).unwrap(); + } + + #[test] + fn test_nivc_trivial_with_compression() { + const NUM_STEPS: usize = 6; + test_compression_with::, S2<_>, _, _>(NUM_STEPS, TestCircuit::new); + test_compression_with::, S2<_>, _, _>(NUM_STEPS, TestCircuit::new); + } + + #[test] + fn test_compression_with_circuit_size_difference() { + const NUM_STEPS: usize = 4; + test_compression_with::, S2<_>, _, _>(NUM_STEPS, BigTestCircuit::new); + test_compression_with::, S2<_>, _, _>(NUM_STEPS, BigTestCircuit::new); + } +} diff --git a/prover/src/supernova/test.rs b/prover/src/supernova/test.rs new file mode 100644 index 0000000..74cabc3 --- /dev/null +++ b/prover/src/supernova/test.rs @@ -0,0 +1,770 @@ +use core::marker::PhantomData; +use std::fmt::Write; + +use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; +use expect_test::{expect, Expect}; +use ff::{Field, PrimeField}; +use tap::TapOptional; + +use super::{utils::get_selector_vec_from_index, *}; +use crate::{ + bellpepper::test_shape_cs::TestShapeCS, + gadgets::{alloc_one, alloc_zero}, + provider::{poseidon::PoseidonConstantsCircuit, Bn256EngineIPA, GrumpkinEngine}, + supernova::circuit::{StepCircuit, TrivialCircuit}, + traits::snark::default_ck_hint, +}; + +#[derive(Clone, Debug, Default)] +struct CubicCircuit { + _p: PhantomData, + circuit_index: usize, + rom_size: usize, +} + +impl CubicCircuit { + fn new(circuit_index: usize, rom_size: usize) -> Self { + Self { circuit_index, rom_size, _p: PhantomData } + } +} + +fn next_rom_index_and_pc>( + cs: &mut CS, + rom_index: &AllocatedNum, + allocated_rom: &[AllocatedNum], + pc: &AllocatedNum, +) -> Result<(AllocatedNum, AllocatedNum), SynthesisError> { + // Compute a selector for the current rom_index in allocated_rom + let current_rom_selector = + get_selector_vec_from_index(cs.namespace(|| "rom selector"), rom_index, allocated_rom.len())?; + + // Enforce that allocated_rom[rom_index] = pc + for (rom, bit) in allocated_rom.iter().zip_eq(current_rom_selector.iter()) { + // if bit = 1, then rom = pc + // bit * (rom - pc) = 0 + cs.enforce( + || "enforce bit = 1 => rom = pc", + |lc| lc + &bit.lc(CS::one(), F::ONE), + |lc| lc + rom.get_variable() - pc.get_variable(), + |lc| lc, + ); + } + + // Get the index of the current rom, or the index of the invalid rom if no match + let current_rom_index = current_rom_selector + .iter() + .position(|bit| bit.get_value().is_some_and(|v| v)) + .unwrap_or_default(); + let next_rom_index = current_rom_index + 1; + + let rom_index_next = AllocatedNum::alloc_infallible(cs.namespace(|| "next rom index"), || { + F::from(next_rom_index as u64) + }); + cs.enforce( + || " rom_index + 1 - next_rom_index_num = 0", + |lc| lc, + |lc| lc, + |lc| lc + rom_index.get_variable() + CS::one() - rom_index_next.get_variable(), + ); + + // Allocate the next pc without checking. + // The next iteration will check whether the next pc is valid. + let pc_next = AllocatedNum::alloc_infallible(cs.namespace(|| "next pc"), || { + allocated_rom.get(next_rom_index).and_then(|v| v.get_value()).unwrap_or(-F::ONE) + }); + + Ok((rom_index_next, pc_next)) +} + +impl StepCircuit for CubicCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { + 2 + self.rom_size // value + rom_pc + rom[].len() + } + + fn circuit_index(&self) -> usize { self.circuit_index } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + let rom_index = &z[1]; + let allocated_rom = &z[2..]; + + let (rom_index_next, pc_next) = next_rom_index_and_pc( + &mut cs.namespace(|| "next and rom_index and pc"), + rom_index, + allocated_rom, + pc.ok_or(SynthesisError::AssignmentMissing)?, + )?; + + // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are + // respectively the input and output. + let x = &z[0]; + let x_sq = x.square(cs.namespace(|| "x_sq"))?; + let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + F::from(5u64)) + })?; + + cs.enforce( + || "y = x^3 + x + 5", + |lc| { + lc + x_cu.get_variable() + + x.get_variable() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + }, + |lc| lc + CS::one(), + |lc| lc + y.get_variable(), + ); + + let mut z_next = vec![y]; + z_next.push(rom_index_next); + z_next.extend(z[2..].iter().cloned()); + Ok((Some(pc_next), z_next)) + } +} + +#[derive(Clone, Debug, Default)] +struct SquareCircuit { + _p: PhantomData, + circuit_index: usize, + rom_size: usize, +} + +impl SquareCircuit { + fn new(circuit_index: usize, rom_size: usize) -> Self { + Self { circuit_index, rom_size, _p: PhantomData } + } +} + +impl StepCircuit for SquareCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { + 2 + self.rom_size // value + rom_pc + rom[].len() + } + + fn circuit_index(&self) -> usize { self.circuit_index } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + let rom_index = &z[1]; + let allocated_rom = &z[2..]; + + let (rom_index_next, pc_next) = next_rom_index_and_pc( + &mut cs.namespace(|| "next and rom_index and pc"), + rom_index, + allocated_rom, + pc.ok_or(SynthesisError::AssignmentMissing)?, + )?; + + // Consider an equation: `x^2 + x + 5 = y`, where `x` and `y` are respectively + // the input and output. + let x = &z[0]; + let x_sq = x.square(cs.namespace(|| "x_sq"))?; + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok(x_sq.get_value().unwrap() + x.get_value().unwrap() + F::from(5u64)) + })?; + + cs.enforce( + || "y = x^2 + x + 5", + |lc| { + lc + x_sq.get_variable() + + x.get_variable() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + }, + |lc| lc + CS::one(), + |lc| lc + y.get_variable(), + ); + + let mut z_next = vec![y]; + z_next.push(rom_index_next); + z_next.extend(z[2..].iter().cloned()); + Ok((Some(pc_next), z_next)) + } +} + +fn print_constraints_name_on_error_index< + E1, + C1: StepCircuit, + C2: StepCircuit< as Engine>::Scalar>, +>( + err: &SuperNovaError, + pp: &PublicParams, + c_primary: &C1, + c_secondary: &C2, + num_augmented_circuits: usize, +) where + E1: CurveCycleEquipped, +{ + match err { + SuperNovaError::UnSatIndex(msg, index) if *msg == "r_primary" => { + let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C1> = + SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, + None, + c_primary, + pp.ro_consts_circuit_primary.clone(), + num_augmented_circuits, + ); + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = circuit_primary.synthesize(&mut cs); + cs.constraints + .get(*index) + .tap_some(|constraint| debug!("{msg} failed at constraint {}", constraint.3)); + }, + SuperNovaError::UnSatIndex(msg, index) if *msg == "r_secondary" || *msg == "l_secondary" => { + let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C2> = SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, + None, + c_secondary, + pp.ro_consts_circuit_secondary.clone(), + num_augmented_circuits, + ); + let mut cs: TestShapeCS> = TestShapeCS::new(); + let _ = circuit_secondary.synthesize(&mut cs); + cs.constraints + .get(*index) + .tap_some(|constraint| debug!("{msg} failed at constraint {}", constraint.3)); + }, + _ => (), + } +} + +const OPCODE_0: usize = 0; +const OPCODE_1: usize = 1; + +struct TestROM { + rom: Vec, + _p: PhantomData, +} + +#[derive(Debug, Clone)] +enum TestROMCircuit { + Cubic(CubicCircuit), + Square(SquareCircuit), +} + +impl StepCircuit for TestROMCircuit { + fn arity(&self) -> usize { + match self { + Self::Cubic(x) => x.arity(), + Self::Square(x) => x.arity(), + } + } + + fn circuit_index(&self) -> usize { + match self { + Self::Cubic(x) => x.circuit_index(), + Self::Square(x) => x.circuit_index(), + } + } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + match self { + Self::Cubic(x) => x.synthesize(cs, pc, z), + Self::Square(x) => x.synthesize(cs, pc, z), + } + } +} + +impl NonUniformCircuit for TestROM +where E1: CurveCycleEquipped +{ + type C1 = TestROMCircuit; + type C2 = TrivialCircuit< as Engine>::Scalar>; + + fn num_circuits(&self) -> usize { 2 } + + fn primary_circuit(&self, circuit_index: usize) -> Self::C1 { + match circuit_index { + 0 => TestROMCircuit::Cubic(CubicCircuit::new(circuit_index, self.rom.len())), + 1 => TestROMCircuit::Square(SquareCircuit::new(circuit_index, self.rom.len())), + _ => panic!("unsupported primary circuit index"), + } + } + + fn secondary_circuit(&self) -> Self::C2 { Default::default() } + + fn initial_circuit_index(&self) -> usize { self.rom[0] } +} + +impl TestROM { + fn new(rom: Vec) -> Self { Self { rom, _p: Default::default() } } +} + +fn test_trivial_nivc_with() +where E1: CurveCycleEquipped { + // Here demo a simple RAM machine + // - with 2 argumented circuit + // - each argumented circuit contains primary and secondary circuit + // - a memory commitment via a public IO `rom` (like a program) to constraint the sequence + // execution + + // This test also ready to add more argumented circuit and ROM can be arbitrary + // length + + // ROM is for constraints the sequence of execution order for opcode + + // TODO: replace with memory commitment along with suggestion from Supernova 4.4 + // optimisations + + // This is mostly done with the existing Nova code. With additions of U_i[] and + // program_counter checks in the augmented circuit. + + let rom = vec![ + OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, + OPCODE_1, + ]; // Rom can be arbitrary length. + + let test_rom = TestROM::::new(rom); + + let pp = PublicParams::setup(&test_rom, &*default_ck_hint(), &*default_ck_hint()); + + // extend z0_primary/secondary with rom content + let mut z0_primary = vec![::Scalar::ONE]; + z0_primary.push(::Scalar::ZERO); // rom_index = 0 + z0_primary.extend(test_rom.rom.iter().map(|opcode| ::Scalar::from(*opcode as u64))); + let z0_secondary = vec![ as Engine>::Scalar::ONE]; + + let mut recursive_snark_option: Option> = None; + + for &op_code in test_rom.rom.iter() { + let circuit_primary = test_rom.primary_circuit(op_code); + let circuit_secondary = test_rom.secondary_circuit(); + + let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { + RecursiveSNARK::new( + &pp, + &test_rom, + &circuit_primary, + &circuit_secondary, + &z0_primary, + &z0_secondary, + ) + .unwrap() + }); + + recursive_snark.prove_step(&pp, &circuit_primary, &circuit_secondary).unwrap(); + recursive_snark + .verify(&pp, &z0_primary, &z0_secondary) + .map_err(|err| { + print_constraints_name_on_error_index( + &err, + &pp, + &circuit_primary, + &circuit_secondary, + test_rom.num_circuits(), + ) + }) + .unwrap(); + + recursive_snark_option = Some(recursive_snark) + } + + assert!(recursive_snark_option.is_some()); + + // Now you can handle the Result using if let + let RecursiveSNARK { zi_primary, zi_secondary, program_counter, .. } = + &recursive_snark_option.unwrap(); + + println!("zi_primary: {:?}", zi_primary); + println!("zi_secondary: {:?}", zi_secondary); + println!("final program_counter: {:?}", program_counter); + + // The final program counter should be -1 + assert_eq!(*program_counter, -::Scalar::ONE); +} + +#[test] +#[tracing_test::traced_test] +fn test_trivial_nivc() { + // Experimenting with selecting the running claims for nifs + test_trivial_nivc_with::(); +} + +// In the following we use 1 to refer to the primary, and 2 to refer to the +// secondary circuit +fn test_recursive_circuit_with( + primary_params: &SuperNovaAugmentedCircuitParams, + secondary_params: &SuperNovaAugmentedCircuitParams, + ro_consts1: ROConstantsCircuit>, + ro_consts2: ROConstantsCircuit, + num_constraints_primary: &Expect, + num_constraints_secondary: &Expect, +) where + E1: CurveCycleEquipped, +{ + // Initialize the shape and ck for the primary + let step_circuit1 = TrivialCircuit::default(); + let arity1 = step_circuit1.arity(); + let circuit1: SuperNovaAugmentedCircuit< + '_, + Dual, + TrivialCircuit< as Engine>::Base>, + > = SuperNovaAugmentedCircuit::new(primary_params, None, &step_circuit1, ro_consts1.clone(), 2); + let mut cs: ShapeCS = ShapeCS::new(); + if let Err(e) = circuit1.synthesize(&mut cs) { + panic!("{}", e) + } + let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); + num_constraints_primary.assert_eq(&cs.num_constraints().to_string()); + + // Initialize the shape and ck for the secondary + let step_circuit2 = TrivialCircuit::default(); + let arity2 = step_circuit2.arity(); + let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = + SuperNovaAugmentedCircuit::new(secondary_params, None, &step_circuit2, ro_consts2.clone(), 2); + let mut cs: ShapeCS> = ShapeCS::new(); + if let Err(e) = circuit2.synthesize(&mut cs) { + panic!("{}", e) + } + let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); + num_constraints_secondary.assert_eq(&cs.num_constraints().to_string()); + + // Execute the base case for the primary + let zero1 = < as Engine>::Base as Field>::ZERO; + let z0 = vec![zero1; arity1]; + let mut cs1 = SatisfyingAssignment::::new(); + let inputs1: SuperNovaAugmentedCircuitInputs<'_, Dual> = SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::(zero1), // pass zero for testing + zero1, + &z0, + None, + None, + None, + None, + Some(zero1), + zero1, + ); + let step_circuit = TrivialCircuit::default(); + let circuit1: SuperNovaAugmentedCircuit< + '_, + Dual, + TrivialCircuit< as Engine>::Base>, + > = SuperNovaAugmentedCircuit::new(primary_params, Some(inputs1), &step_circuit, ro_consts1, 2); + if let Err(e) = circuit1.synthesize(&mut cs1) { + panic!("{}", e) + } + let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, &ck1).unwrap(); + // Make sure that this is satisfiable + shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); + + // Execute the base case for the secondary + let zero2 = <::Base as Field>::ZERO; + let z0 = vec![zero2; arity2]; + let mut cs2 = SatisfyingAssignment::>::new(); + let inputs2: SuperNovaAugmentedCircuitInputs<'_, E1> = SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::>(zero2), // pass zero for testing + zero2, + &z0, + None, + None, + Some(&inst1), + None, + None, + zero2, + ); + let step_circuit = TrivialCircuit::default(); + let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = + SuperNovaAugmentedCircuit::new(secondary_params, Some(inputs2), &step_circuit, ro_consts2, 2); + if let Err(e) = circuit2.synthesize(&mut cs2) { + panic!("{}", e) + } + let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, &ck2).unwrap(); + // Make sure that it is satisfiable + shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); +} + +#[test] +fn test_recursive_circuit() { + let params1 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + let params2 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); + let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + + test_recursive_circuit_with::( + ¶ms1, + ¶ms2, + ro_consts1, + ro_consts2, + &expect!["10004"], + &expect!["12206"], + ); +} + +fn test_pp_digest_with(non_uniform_circuit: &NC, expected: &Expect) +where + E1: CurveCycleEquipped, + NC: NonUniformCircuit, { + // TODO: add back in https://github.com/argumentcomputer/arecibo/issues/53 + // // this tests public parameters with a size specifically intended for a + // spark-compressed SNARK let pp_hint1 = + // Some(SPrime::::commitment_key_floor()); let pp_hint2 = + // Some(SPrime::::commitment_key_floor()); + let pp = PublicParams::::setup(non_uniform_circuit, &*default_ck_hint(), &*default_ck_hint()); + + let digest_str = pp.digest().to_repr().as_ref().iter().fold(String::new(), |mut output, b| { + let _ = write!(output, "{b:02x}"); + output + }); + expected.assert_eq(&digest_str); +} + +#[test] +fn test_supernova_pp_digest() { + let rom = vec![ + OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, + OPCODE_1, + ]; // Rom can be arbitrary length. + let test_rom_grumpkin = TestROM::::new(rom); + + test_pp_digest_with::(&test_rom_grumpkin, &expect![ + "30418e576c11dd698054a6cc69d1b1e43ddf0f562abfb50b777147afad741a01" + ]); +} + +// y is a non-deterministic hint representing the cube root of the input at a +// step. +#[derive(Clone, Debug)] +struct CubeRootCheckingCircuit { + y: Option, +} + +impl StepCircuit for CubeRootCheckingCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { 0 } + + fn synthesize>( + &self, + cs: &mut CS, + _pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + let x = &z[0]; + + // we allocate a variable and set it to the provided non-deterministic hint. + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + self.y.ok_or(SynthesisError::AssignmentMissing) + })?; + + // We now check if y = x^{1/3} by checking if y^3 = x + let y_sq = y.square(cs.namespace(|| "y_sq"))?; + let y_cube = y_sq.mul(cs.namespace(|| "y_cube"), &y)?; + + cs.enforce( + || "y^3 = x", + |lc| lc + y_cube.get_variable(), + |lc| lc + CS::one(), + |lc| lc + x.get_variable(), + ); + + let next_pc = alloc_one(&mut cs.namespace(|| "next_pc")); + + Ok((Some(next_pc), vec![y])) + } +} + +// y is a non-deterministic hint representing the fifth root of the input at a +// step. +#[derive(Clone, Debug)] +struct FifthRootCheckingCircuit { + y: Option, +} + +impl StepCircuit for FifthRootCheckingCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { 1 } + + fn synthesize>( + &self, + cs: &mut CS, + _pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + let x = &z[0]; + + // we allocate a variable and set it to the provided non-deterministic hint. + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + self.y.ok_or(SynthesisError::AssignmentMissing) + })?; + + // We now check if y = x^{1/5} by checking if y^5 = x + let y_sq = y.square(cs.namespace(|| "y_sq"))?; + let y_quad = y_sq.square(cs.namespace(|| "y_quad"))?; + let y_pow_5 = y_quad.mul(cs.namespace(|| "y_fifth"), &y)?; + + cs.enforce( + || "y^5 = x", + |lc| lc + y_pow_5.get_variable(), + |lc| lc + CS::one(), + |lc| lc + x.get_variable(), + ); + + let next_pc = alloc_zero(&mut cs.namespace(|| "next_pc")); + + Ok((Some(next_pc), vec![y])) + } +} + +#[derive(Clone, Debug)] +enum RootCheckingCircuit { + Cube(CubeRootCheckingCircuit), + Fifth(FifthRootCheckingCircuit), +} + +impl RootCheckingCircuit { + fn new(num_steps: usize) -> (Vec, Vec) { + let mut powers = Vec::new(); + let rng = &mut rand::rngs::OsRng; + let mut seed = F::random(rng); + + for i in 0..num_steps + 1 { + let seed_sq = seed.clone().square(); + // Cube-root and fifth-root circuits alternate. We compute the hints backward, + // so the calculations appear to be associated with the 'wrong' + // circuit. The final circuit is discarded, and only the final seed is used (as + // z_0). + powers.push(if i % 2 == num_steps % 2 { + seed *= seed_sq; + Self::Fifth(FifthRootCheckingCircuit { y: Some(seed) }) + } else { + seed *= seed_sq.clone().square(); + Self::Cube(CubeRootCheckingCircuit { y: Some(seed) }) + }) + } + + // reverse the powers to get roots + let roots = powers.into_iter().rev().collect::>(); + (vec![roots[0].get_y().unwrap()], roots[1..].to_vec()) + } + + fn get_y(&self) -> Option { + match self { + Self::Fifth(x) => x.y, + Self::Cube(x) => x.y, + } + } +} + +impl StepCircuit for RootCheckingCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { + match self { + Self::Cube(x) => x.circuit_index(), + Self::Fifth(x) => x.circuit_index(), + } + } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + match self { + Self::Cube(c) => c.synthesize(cs, pc, z), + Self::Fifth(c) => c.synthesize(cs, pc, z), + } + } +} + +impl NonUniformCircuit for RootCheckingCircuit +where E1: CurveCycleEquipped +{ + type C1 = Self; + type C2 = TrivialCircuit< as Engine>::Scalar>; + + fn num_circuits(&self) -> usize { 2 } + + fn primary_circuit(&self, circuit_index: usize) -> Self { + match circuit_index { + 0 => Self::Cube(CubeRootCheckingCircuit { y: None }), + 1 => Self::Fifth(FifthRootCheckingCircuit { y: None }), + _ => unreachable!(), + } + } + + fn secondary_circuit(&self) -> Self::C2 { TrivialCircuit::::default() } +} + +fn test_nivc_nondet_with() +where E1: CurveCycleEquipped { + let circuit_secondary = TrivialCircuit::default(); + + let num_steps = 3; + + // produce non-deterministic hint + let (z0_primary, roots) = RootCheckingCircuit::new(num_steps); + assert_eq!(num_steps, roots.len()); + let z0_secondary = vec![ as Engine>::Scalar::ZERO]; + + // produce public parameters + let pp = PublicParams::::setup(&roots[0], &*default_ck_hint(), &*default_ck_hint()); + // produce a recursive SNARK + + let circuit_primary = &roots[0]; + + let mut recursive_snark = RecursiveSNARK::::new( + &pp, + circuit_primary, + circuit_primary, + &circuit_secondary, + &z0_primary, + &z0_secondary, + ) + .map_err(|err| { + print_constraints_name_on_error_index(&err, &pp, circuit_primary, &circuit_secondary, 2) + }) + .unwrap(); + + for circuit_primary in roots.iter().take(num_steps) { + let res = recursive_snark.prove_step(&pp, circuit_primary, &circuit_secondary); + assert!(res + .map_err(|err| { + print_constraints_name_on_error_index(&err, &pp, circuit_primary, &circuit_secondary, 2) + }) + .is_ok()); + + // verify the recursive SNARK + recursive_snark + .verify(&pp, &z0_primary, &z0_secondary) + .map_err(|err| { + print_constraints_name_on_error_index(&err, &pp, circuit_primary, &circuit_secondary, 2) + }) + .unwrap(); + } +} + +#[test] +fn test_nivc_nondet() { test_nivc_nondet_with::(); } diff --git a/src/supernova/utils.rs b/prover/src/supernova/utils.rs similarity index 57% rename from src/supernova/utils.rs rename to prover/src/supernova/utils.rs index 4ea421c..08c09c1 100644 --- a/src/supernova/utils.rs +++ b/prover/src/supernova/utils.rs @@ -1,15 +1,15 @@ use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, LinearCombination, SynthesisError, + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, LinearCombination, SynthesisError, }; use ff::PrimeField; use itertools::Itertools as _; use crate::{ - constants::NIO_NOVA_FOLD, - gadgets::{conditionally_select_alloc_relaxed_r1cs, AllocatedRelaxedR1CSInstance}, - traits::Engine, + constants::NIO_NOVA_FOLD, + gadgets::{conditionally_select_alloc_relaxed_r1cs, AllocatedRelaxedR1CSInstance}, + traits::Engine, }; /// Return the element of `a` given by the indicator bit in `selector_vec`. @@ -25,89 +25,84 @@ use crate::{ // `a`. The larger the elements, the fewer are needed before multicase becomes // cost-effective. pub fn get_from_vec_alloc_relaxed_r1cs::Base>>( - mut cs: CS, - a: &[AllocatedRelaxedR1CSInstance], - selector_vec: &[Boolean], + mut cs: CS, + a: &[AllocatedRelaxedR1CSInstance], + selector_vec: &[Boolean], ) -> Result, SynthesisError> { - assert_eq!(a.len(), selector_vec.len()); - - // Compare all instances in `a` to the first one - let first: AllocatedRelaxedR1CSInstance = a - .first() - .cloned() - .ok_or_else(|| SynthesisError::IncompatibleLengthVector("empty vec length".to_string()))?; - - // Since `selector_vec` is correct, only one entry is 1. - // If selector_vec[0] is 1, then all `conditionally_select` will return `first`. - // Otherwise, the correct instance will be selected. - // TODO: reformulate when iterator_try_reduce stabilizes - let selected = a - .iter() - .zip_eq(selector_vec.iter()) - .enumerate() - .skip(1) - .try_fold(first, |matched, (i, (candidate, equal_bit))| { - conditionally_select_alloc_relaxed_r1cs( - cs.namespace(|| format!("next_matched_allocated-{:?}", i)), - candidate, - &matched, - equal_bit, - ) - })?; - - Ok(selected) + assert_eq!(a.len(), selector_vec.len()); + + // Compare all instances in `a` to the first one + let first: AllocatedRelaxedR1CSInstance = a + .first() + .cloned() + .ok_or_else(|| SynthesisError::IncompatibleLengthVector("empty vec length".to_string()))?; + + // Since `selector_vec` is correct, only one entry is 1. + // If selector_vec[0] is 1, then all `conditionally_select` will return `first`. + // Otherwise, the correct instance will be selected. + // TODO: reformulate when iterator_try_reduce stabilizes + let selected = a.iter().zip_eq(selector_vec.iter()).enumerate().skip(1).try_fold( + first, + |matched, (i, (candidate, equal_bit))| { + conditionally_select_alloc_relaxed_r1cs( + cs.namespace(|| format!("next_matched_allocated-{:?}", i)), + candidate, + &matched, + equal_bit, + ) + }, + )?; + + Ok(selected) } /// Compute a selector vector `s` of size `num_indices`, such that /// `s[i] == 1` if i == `target_index` and 0 otherwise. pub fn get_selector_vec_from_index>( - mut cs: CS, - target_index: &AllocatedNum, - num_indices: usize, + mut cs: CS, + target_index: &AllocatedNum, + num_indices: usize, ) -> Result, SynthesisError> { - assert_ne!(num_indices, 0); - - // Compute the selector vector non-deterministically - let selector = (0..num_indices) - .map(|idx| { - // b <- idx == target_index - Ok(Boolean::Is(AllocatedBit::alloc( - cs.namespace(|| format!("allocate s_{:?}", idx)), - target_index.get_value().map(|v| v == F::from(idx as u64)), - )?)) - }) - .collect::, SynthesisError>>()?; - - // Enforce ∑ selector[i] = 1 - { - let selected_sum = selector.iter().fold(LinearCombination::zero(), |lc, bit| { - lc + &bit.lc(CS::one(), F::ONE) - }); - cs.enforce( - || "exactly-one-selection", - |_| selected_sum, - |lc| lc + CS::one(), - |lc| lc + CS::one(), - ); - } - - // Enforce `target_index - ∑ i * selector[i] = 0`` - { - let selected_value = selector - .iter() - .enumerate() - .fold(LinearCombination::zero(), |lc, (i, bit)| { - lc + &bit.lc(CS::one(), F::from(i as u64)) - }); - cs.enforce( - || "target_index - ∑ i * selector[i] = 0", - |lc| lc, - |lc| lc, - |lc| lc + target_index.get_variable() - &selected_value, - ); - } - - Ok(selector) + assert_ne!(num_indices, 0); + + // Compute the selector vector non-deterministically + let selector = (0..num_indices) + .map(|idx| { + // b <- idx == target_index + Ok(Boolean::Is(AllocatedBit::alloc( + cs.namespace(|| format!("allocate s_{:?}", idx)), + target_index.get_value().map(|v| v == F::from(idx as u64)), + )?)) + }) + .collect::, SynthesisError>>()?; + + // Enforce ∑ selector[i] = 1 + { + let selected_sum = + selector.iter().fold(LinearCombination::zero(), |lc, bit| lc + &bit.lc(CS::one(), F::ONE)); + cs.enforce( + || "exactly-one-selection", + |_| selected_sum, + |lc| lc + CS::one(), + |lc| lc + CS::one(), + ); + } + + // Enforce `target_index - ∑ i * selector[i] = 0`` + { + let selected_value = selector + .iter() + .enumerate() + .fold(LinearCombination::zero(), |lc, (i, bit)| lc + &bit.lc(CS::one(), F::from(i as u64))); + cs.enforce( + || "target_index - ∑ i * selector[i] = 0", + |lc| lc, + |lc| lc, + |lc| lc + target_index.get_variable() - &selected_value, + ); + } + + Ok(selector) } // #[cfg(test)] diff --git a/prover/src/traits/commitment.rs b/prover/src/traits/commitment.rs new file mode 100644 index 0000000..97ed7dd --- /dev/null +++ b/prover/src/traits/commitment.rs @@ -0,0 +1,87 @@ +//! This module defines a collection of traits that define the behavior of a +//! commitment engine We require the commitment engine to provide a commitment +//! to vectors with a single group element +use core::{ + fmt::Debug, + ops::{Add, Mul, MulAssign}, +}; + +use serde::{Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + traits::{AbsorbInROTrait, Engine, TranscriptReprTrait}, +}; + +/// A helper trait for types implementing scalar multiplication. +pub trait ScalarMul: Mul + MulAssign {} + +impl ScalarMul for T where T: Mul + MulAssign {} + +/// This trait defines the behavior of the commitment +pub trait CommitmentTrait: + Clone + + Copy + + Debug + + Default + + PartialEq + + Eq + + Send + + Sync + + TranscriptReprTrait + + Serialize + + for<'de> Deserialize<'de> + + AbsorbInROTrait + + Add + + ScalarMul { + /// Holds the type of the compressed commitment + type CompressedCommitment: Clone + + Debug + + PartialEq + + Eq + + Send + + Sync + + TranscriptReprTrait + + Serialize + + for<'de> Deserialize<'de>; + + /// Compresses self into a compressed commitment + fn compress(&self) -> Self::CompressedCommitment; + + /// Returns the coordinate representation of the commitment + fn to_coordinates(&self) -> (E::Base, E::Base, bool); + + /// Decompresses a compressed commitment into a commitment + fn decompress(c: &Self::CompressedCommitment) -> Result; +} + +/// A trait that helps determine the length of a structure. +/// Note this does not impose any memory representation constraints on the +/// structure. +pub trait Len { + /// Returns the length of the structure. + fn length(&self) -> usize; +} + +/// A trait that ties different pieces of the commitment generation together +pub trait CommitmentEngineTrait: Clone + Send + Sync { + /// Holds the type of the commitment key + /// The key should quantify its length in terms of group generators. + type CommitmentKey: Len + + Clone + + PartialEq + + Debug + + Send + + Sync + + Serialize + + for<'de> Deserialize<'de>; + + /// Holds the type of the commitment + type Commitment: CommitmentTrait; + + /// Samples a new commitment key of a specified size + fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey; + + /// Commits to the provided vector using the provided generators + fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment; +} diff --git a/prover/src/traits/evaluation.rs b/prover/src/traits/evaluation.rs new file mode 100644 index 0000000..faff5df --- /dev/null +++ b/prover/src/traits/evaluation.rs @@ -0,0 +1,60 @@ +//! This module defines a collection of traits that define the behavior of a +//! polynomial evaluation engine A vector of size N is treated as a multilinear +//! polynomial in \log{N} variables, and a commitment provided by the commitment +//! engine is treated as a multilinear polynomial commitment +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + traits::{commitment::CommitmentEngineTrait, Engine}, +}; + +/// A trait that ties different pieces of the commitment evaluation together +pub trait EvaluationEngineTrait: Clone + Send + Sync { + /// A type that holds the prover key + type ProverKey: Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// A type that holds the verifier key + type VerifierKey: Send + + Sync + // required for easy Digest computation purposes, could be relaxed to + // [`crate::digest::Digestible`] + + Serialize + + for<'de> Deserialize<'de>; + + /// A type that holds the evaluation argument + type EvaluationArgument: Clone + Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// A method to perform any additional setup needed to produce proofs of + /// evaluations + /// + /// **Note:** This method should be cheap and should not copy most of the + /// commitment key. Look at `CommitmentEngineTrait::setup` for generating + /// SRS data. + fn setup( + ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, + ) -> (Self::ProverKey, Self::VerifierKey); + + /// A method to prove the evaluation of a multilinear polynomial + fn prove( + ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, + pk: &Self::ProverKey, + transcript: &mut E::TE, + comm: &<::CE as CommitmentEngineTrait>::Commitment, + poly: &[E::Scalar], + point: &[E::Scalar], + eval: &E::Scalar, + ) -> Result; + + /// A method to verify the purported evaluation of a multilinear polynomials + fn verify( + vk: &Self::VerifierKey, + transcript: &mut E::TE, + comm: &<::CE as CommitmentEngineTrait>::Commitment, + point: &[E::Scalar], + eval: &E::Scalar, + arg: &Self::EvaluationArgument, + ) -> Result<(), NovaError>; +} diff --git a/prover/src/traits/mod.rs b/prover/src/traits/mod.rs new file mode 100644 index 0000000..e1c54c7 --- /dev/null +++ b/prover/src/traits/mod.rs @@ -0,0 +1,182 @@ +//! This module defines various traits required by the users of the library to +//! implement. +use core::fmt::Debug; + +use bellpepper_core::{boolean::AllocatedBit, num::AllocatedNum, ConstraintSystem, SynthesisError}; +use ff::{PrimeField, PrimeFieldBits}; +use num_bigint::BigInt; +use serde::{Deserialize, Serialize}; + +use crate::errors::NovaError; + +pub mod commitment; + +use commitment::CommitmentEngineTrait; + +/// Represents an element of a group +/// This is currently tailored for an elliptic curve group +pub trait Group: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { + /// A type representing an element of the base field of the group + type Base: PrimeFieldBits + Serialize + for<'de> Deserialize<'de>; + + /// A type representing an element of the scalar field of the group + type Scalar: PrimeFieldBits + PrimeFieldExt + Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// Returns A, B, the order of the group, the size of the base field as big + /// integers + fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt); +} + +/// A collection of engines that are required by the library +pub trait Engine: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { + /// A type representing an element of the base field of the group + type Base: PrimeFieldBits + TranscriptReprTrait + Serialize + for<'de> Deserialize<'de>; + + /// A type representing an element of the scalar field of the group + type Scalar: PrimeFieldBits + + PrimeFieldExt + + Send + + Sync + + TranscriptReprTrait + + Serialize + + for<'de> Deserialize<'de>; + + /// A type that represents an element of the group + type GE: Group + Serialize + for<'de> Deserialize<'de>; + + /// A type that represents a circuit-friendly sponge that consumes elements + /// from the base field and squeezes out elements of the scalar field + type RO: ROTrait; + + /// An alternate implementation of `Self::RO` in the circuit model + type ROCircuit: ROCircuitTrait; + + /// A type that provides a generic Fiat-Shamir transcript to be used when + /// externalizing proofs + type TE: TranscriptEngineTrait; + + /// A type that defines a commitment engine over scalars in the group + type CE: CommitmentEngineTrait; +} + +/// This is a convenience trait to pair engines which fields are in a curve +/// cycle relationship +pub trait CurveCycleEquipped: Engine { + /// The secondary `Engine` of `Self` + type Secondary: Engine::Scalar, Scalar = ::Base>; +} + +/// Convenience projection to the secondary `Engine` of a `CurveCycleEquipped` +pub type Dual = ::Secondary; + +/// A helper trait to absorb different objects in RO +pub trait AbsorbInROTrait { + /// Absorbs the value in the provided RO + fn absorb_in_ro(&self, ro: &mut E::RO); +} + +/// A helper trait that defines the behavior of a hash function that we use as +/// an RO +pub trait ROTrait { + /// The circuit alter ego of this trait impl - this constrains it to use the + /// same constants + type CircuitRO: ROCircuitTrait; + + /// A type representing constants/parameters associated with the hash + /// function + type Constants: Debug + + Default + + Clone + + PartialEq + + Send + + Sync + + Serialize + + for<'de> Deserialize<'de>; + + /// Initializes the hash function + fn new(constants: Self::Constants, num_absorbs: usize) -> Self; + + /// Adds a scalar to the internal state + fn absorb(&mut self, e: Base); + + /// Returns a challenge of `num_bits` by hashing the internal state + fn squeeze(&mut self, num_bits: usize) -> Scalar; +} + +/// A helper trait that defines the behavior of a hash function that we use as +/// an RO in the circuit model +pub trait ROCircuitTrait { + /// the vanilla alter ego of this trait - this constrains it to use the same + /// constants + type NativeRO: ROTrait; + + /// A type representing constants/parameters associated with the hash + /// function on this Base field + type Constants: Debug + + Default + + Clone + + PartialEq + + Send + + Sync + + Serialize + + for<'de> Deserialize<'de>; + + /// Initializes the hash function + fn new(constants: Self::Constants, num_absorbs: usize) -> Self; + + /// Adds a scalar to the internal state + fn absorb(&mut self, e: &AllocatedNum); + + /// Returns a challenge of `num_bits` by hashing the internal state + fn squeeze>( + &mut self, + cs: CS, + num_bits: usize, + ) -> Result, SynthesisError>; +} + +/// An alias for constants associated with `E::RO` +pub type ROConstants = + <::RO as ROTrait<::Base, ::Scalar>>::Constants; + +/// An alias for constants associated with `E::ROCircuit` +pub type ROConstantsCircuit = + <::ROCircuit as ROCircuitTrait<::Base>>::Constants; + +/// This trait allows types to implement how they want to be added to +/// `TranscriptEngine` +pub trait TranscriptReprTrait: Send + Sync { + /// returns a byte representation of self to be added to the transcript + fn to_transcript_bytes(&self) -> Vec; +} + +/// This trait defines the behavior of a transcript engine compatible with +/// Spartan +pub trait TranscriptEngineTrait: Send + Sync { + /// initializes the transcript + fn new(label: &'static [u8]) -> Self; + + /// returns a scalar element of the group as a challenge + fn squeeze(&mut self, label: &'static [u8]) -> Result; + + /// absorbs any type that implements `TranscriptReprTrait` under a label + fn absorb>(&mut self, label: &'static [u8], o: &T); + + /// adds a domain separator + fn dom_sep(&mut self, bytes: &'static [u8]); +} + +/// Defines additional methods on `PrimeField` objects +pub trait PrimeFieldExt: PrimeField { + /// Returns a scalar representing the bytes + fn from_uniform(bytes: &[u8]) -> Self; +} + +impl> TranscriptReprTrait for &[T] { + fn to_transcript_bytes(&self) -> Vec { + self.iter().flat_map(|t| t.to_transcript_bytes()).collect::>() + } +} + +pub mod evaluation; +pub mod snark; diff --git a/prover/src/traits/snark.rs b/prover/src/traits/snark.rs new file mode 100644 index 0000000..c048348 --- /dev/null +++ b/prover/src/traits/snark.rs @@ -0,0 +1,132 @@ +//! This module defines a collection of traits that define the behavior of a +//! `zkSNARK` for `RelaxedR1CS` +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, + traits::Engine, + CommitmentKey, +}; + +// NOTES: This function seems heavily reliant on dynamic allocation all to +// return 0 in the end... + +/// Public parameter creation takes a size hint. This size hint carries the +/// particular requirements of the final compressing SNARK the user expected to +/// use with these public parameters, and the below is a sensible default, which +/// is to not require any more bases then the usual (maximum of the number of +/// variables and constraints of the involved R1CS circuit). +pub fn default_ck_hint() -> Box Fn(&'a R1CSShape) -> usize> { + // The default is to not put an additional floor on the size of the commitment + // key + Box::new(|_shape: &R1CSShape| 0) +} + +// NOTES: I'm not sure having a trait here is really necessary unless you're +// wanting to have a much larger abstraction. I'd consider just gutting this and +// forming one SNARK that we use. + +/// A trait that defines the behavior of a `zkSNARK` +pub trait RelaxedR1CSSNARKTrait: + Send + Sync + Serialize + for<'de> Deserialize<'de> { + /// A type that represents the prover's key + type ProverKey: Send + Sync; + + /// A type that represents the verifier's key + type VerifierKey: Send + Sync + Serialize; + + /// This associated function (not a method) provides a hint that offers + /// a minimum sizing cue for the commitment key used by this SNARK + /// implementation. The commitment key passed in setup should then + /// be at least as large as this hint. + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + // The default is to not put an additional floor on the size of the commitment + // key + default_ck_hint() + } + + /// Initialize a ProvingKey directly from a CommitmentKey and a + /// already known vk_digest. + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result; + + /// Produces the keys for the prover and the verifier + fn setup( + ck: Arc>, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; + + /// Produces a new SNARK for a relaxed R1CS + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result; + + /// Verifies a SNARK for a relaxed R1CS + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError>; +} + +/// A trait that defines the behavior of a `zkSNARK` to prove knowledge of +/// satisfying witness to batches of relaxed R1CS instances. +pub trait BatchedRelaxedR1CSSNARKTrait: + Send + Sync + Serialize + for<'de> Deserialize<'de> { + /// A type that represents the prover's key + type ProverKey: Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// A type that represents the verifier's key + type VerifierKey: Send + Sync + DigestHelperTrait + Serialize + for<'de> Deserialize<'de>; + + // NOTES: If we don't need something more general here, this is just an odd + // thing to have defined generically since it just calls the weird function + // above. + + /// This associated function (not a method) provides a hint that offers + /// a minimum sizing cue for the commitment key used by this SNARK + /// implementation. The commitment key passed in setup should then + /// be at least as large as this hint. + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { default_ck_hint() } + + /// Initialize a ProvingKey directly from a CommitmentKey and a + /// already known vk_digest. + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result; + + /// Produces the keys for the prover and the verifier + /// + /// **Note:** This method should be cheap and should not copy most of the + /// commitment key. Look at `CommitmentEngineTrait::setup` for generating + /// SRS data. + fn setup( + ck: Arc>, // NOTES: Why `Arc` this? + S: Vec<&R1CSShape>, /* NOTES: Why not a &[R1CSShape] here?, would get the same + * thing across as an iter i think */ + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; + + /// Produces a new SNARK for a batch of relaxed R1CS + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: Vec<&R1CSShape>, + U: &[RelaxedR1CSInstance], + W: &[RelaxedR1CSWitness], + ) -> Result; + + /// Verifies a SNARK for a batch of relaxed R1CS + fn verify(&self, vk: &Self::VerifierKey, U: &[RelaxedR1CSInstance]) -> Result<(), NovaError>; +} + +/// A helper trait that defines the behavior of a verifier key of `zkSNARK` +pub trait DigestHelperTrait { + /// Returns the digest of the verifier's key + fn digest(&self) -> E::Scalar; +} diff --git a/src/bellpepper/mod.rs b/src/bellpepper/mod.rs deleted file mode 100644 index 35e8796..0000000 --- a/src/bellpepper/mod.rs +++ /dev/null @@ -1,64 +0,0 @@ -//! Support for generating R1CS from [Bellpepper]. -//! -//! [Bellpepper]: https://github.com/argumentcomputer/bellpepper - -pub mod r1cs; -pub mod shape_cs; -pub mod solver; -pub mod test_shape_cs; - -#[cfg(test)] -mod tests { - use bellpepper_core::{num::AllocatedNum, ConstraintSystem}; - use ff::PrimeField; - - use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - provider::Bn256EngineKZG, - traits::{snark::default_ck_hint, Engine}, - }; - - fn synthesize_alloc_bit>(cs: &mut CS) { - // get two bits as input and check that they are indeed bits - let a = AllocatedNum::alloc_infallible(cs.namespace(|| "a"), || Fr::ONE); - let _ = a.inputize(cs.namespace(|| "a is input")); - cs.enforce( - || "check a is 0 or 1", - |lc| lc + CS::one() - a.get_variable(), - |lc| lc + a.get_variable(), - |lc| lc, - ); - let b = AllocatedNum::alloc_infallible(cs.namespace(|| "b"), || Fr::ONE); - let _ = b.inputize(cs.namespace(|| "b is input")); - cs.enforce( - || "check b is 0 or 1", - |lc| lc + CS::one() - b.get_variable(), - |lc| lc + b.get_variable(), - |lc| lc, - ); - } - - fn test_alloc_bit_with() { - // First create the shape - let mut cs: ShapeCS = ShapeCS::new(); - synthesize_alloc_bit(&mut cs); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - // Now get the assignment - let mut cs = SatisfyingAssignment::::new(); - synthesize_alloc_bit(&mut cs); - let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - - // Make sure that this is satisfiable - shape.is_sat(&ck, &inst, &witness).unwrap(); - } - - #[test] - fn test_alloc_bit() { - test_alloc_bit_with::(); - } -} diff --git a/src/bellpepper/r1cs.rs b/src/bellpepper/r1cs.rs deleted file mode 100644 index 431f5a3..0000000 --- a/src/bellpepper/r1cs.rs +++ /dev/null @@ -1,162 +0,0 @@ -//! Support for generating R1CS using bellpepper. - -#![allow(non_snake_case)] - -use bellpepper_core::{Index, LinearCombination}; -use ff::PrimeField; - -use super::{shape_cs::ShapeCS, solver::SatisfyingAssignment, test_shape_cs::TestShapeCS}; -use crate::{ - errors::NovaError, - r1cs::{commitment_key, CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, SparseMatrix}, - traits::Engine, - CommitmentKey, -}; - -/// `NovaWitness` provide a method for acquiring an `R1CSInstance` and -/// `R1CSWitness` from implementers. -pub trait NovaWitness { - /// Return an instance and witness, given a shape and ck. - fn r1cs_instance_and_witness( - self, - shape: &R1CSShape, - ck: &CommitmentKey, - ) -> Result<(R1CSInstance, R1CSWitness), NovaError>; -} - -/// `NovaShape` provides methods for acquiring `R1CSShape` and `CommitmentKey` -/// from implementers. -pub trait NovaShape { - /// Return an appropriate `R1CSShape` and `CommitmentKey` structs. - /// A `CommitmentKeyHint` should be provided to help guide the construction - /// of the `CommitmentKey`. This parameter is documented in - /// `r1cs::R1CS::commitment_key`. - fn r1cs_shape_and_key( - &self, - ck_hint: &CommitmentKeyHint, - ) -> (R1CSShape, CommitmentKey) { - let S = self.r1cs_shape(); - let ck = commitment_key(&S, ck_hint); - - (S, ck) - } - /// Return an appropriate `R1CSShape`. - fn r1cs_shape(&self) -> R1CSShape; -} - -impl NovaWitness for SatisfyingAssignment { - fn r1cs_instance_and_witness( - self, - shape: &R1CSShape, - ck: &CommitmentKey, - ) -> Result<(R1CSInstance, R1CSWitness), NovaError> { - let (input_assignment, aux_assignment) = self.to_assignments(); - let W = R1CSWitness::::new(shape, aux_assignment)?; - let X = input_assignment[1..].to_owned(); - - let comm_W = W.commit(ck); - - let instance = R1CSInstance::::new(shape, comm_W, X)?; - - Ok((instance, W)) - } -} - -macro_rules! impl_nova_shape { - ( $name:ident) => { - impl NovaShape for $name - where - E::Scalar: PrimeField, - { - fn r1cs_shape(&self) -> R1CSShape { - let mut A = SparseMatrix::::empty(); - let mut B = SparseMatrix::::empty(); - let mut C: SparseMatrix<::Scalar> = SparseMatrix::::empty(); - - let mut num_cons_added = 0; - let mut X = (&mut A, &mut B, &mut C, &mut num_cons_added); - let num_inputs = self.num_inputs(); - let num_constraints = self.num_constraints(); - let num_vars = self.num_aux(); - - for constraint in self.constraints.iter() { - add_constraint( - &mut X, - num_vars, - &constraint.0, - &constraint.1, - &constraint.2, - ); - } - assert_eq!(num_cons_added, num_constraints); - - A.cols = num_vars + num_inputs; - B.cols = num_vars + num_inputs; - C.cols = num_vars + num_inputs; - - // Don't count One as an input for shape's purposes. - let res = R1CSShape::new(num_constraints, num_vars, num_inputs - 1, A, B, C); - res.unwrap() - } - } - }; -} - -impl_nova_shape!(ShapeCS); -impl_nova_shape!(TestShapeCS); - -fn add_constraint( - X: &mut ( - &mut SparseMatrix, - &mut SparseMatrix, - &mut SparseMatrix, - &mut usize, - ), - num_vars: usize, - a_lc: &LinearCombination, - b_lc: &LinearCombination, - c_lc: &LinearCombination, -) { - let (A, B, C, nn) = X; - let n = **nn; - assert_eq!(n, A.num_rows(), "A: invalid shape"); - assert_eq!(n, B.num_rows(), "B: invalid shape"); - assert_eq!(n, C.num_rows(), "C: invalid shape"); - - let add_constraint_component = |index: Index, coeff: &S, M: &mut SparseMatrix| { - // we add constraints to the matrix only if the associated coefficient is - // non-zero - if *coeff != S::ZERO { - match index { - Index::Input(idx) => { - // Inputs come last, with input 0, representing 'one', - // at position num_vars within the witness vector. - let idx = idx + num_vars; - M.data.push(*coeff); - M.indices.push(idx); - } - Index::Aux(idx) => { - M.data.push(*coeff); - M.indices.push(idx); - } - } - } - }; - - for (index, coeff) in a_lc.iter() { - add_constraint_component(index.0, coeff, A); - } - A.indptr.push(A.indices.len()); - - for (index, coeff) in b_lc.iter() { - add_constraint_component(index.0, coeff, B) - } - B.indptr.push(B.indices.len()); - - for (index, coeff) in c_lc.iter() { - add_constraint_component(index.0, coeff, C) - } - C.indptr.push(C.indices.len()); - - **nn += 1; -} diff --git a/src/bellpepper/shape_cs.rs b/src/bellpepper/shape_cs.rs deleted file mode 100644 index a7aa662..0000000 --- a/src/bellpepper/shape_cs.rs +++ /dev/null @@ -1,107 +0,0 @@ -//! Support for generating R1CS shape using bellpepper. - -use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; -use ff::PrimeField; - -use crate::traits::Engine; - -/// `ShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a circuit. -pub struct ShapeCS -where - E::Scalar: PrimeField, -{ - /// All constraints added to the `ShapeCS`. - pub constraints: Vec<( - LinearCombination, - LinearCombination, - LinearCombination, - )>, - inputs: usize, - aux: usize, -} - -impl ShapeCS { - /// Create a new, default `ShapeCS`, - pub fn new() -> Self { - Self::default() - } - - /// Returns the number of constraints defined for this `ShapeCS`. - pub fn num_constraints(&self) -> usize { - self.constraints.len() - } - - /// Returns the number of inputs defined for this `ShapeCS`. - pub fn num_inputs(&self) -> usize { - self.inputs - } - - /// Returns the number of aux inputs defined for this `ShapeCS`. - pub fn num_aux(&self) -> usize { - self.aux - } -} - -impl Default for ShapeCS { - fn default() -> Self { - Self { - constraints: vec![], - inputs: 1, - aux: 0, - } - } -} - -impl ConstraintSystem for ShapeCS { - type Root = Self; - - fn alloc(&mut self, _annotation: A, _f: F) -> Result - where - F: FnOnce() -> Result, - A: FnOnce() -> AR, - AR: Into, - { - self.aux += 1; - - Ok(Variable::new_unchecked(Index::Aux(self.aux - 1))) - } - - fn alloc_input(&mut self, _annotation: A, _f: F) -> Result - where - F: FnOnce() -> Result, - A: FnOnce() -> AR, - AR: Into, - { - self.inputs += 1; - - Ok(Variable::new_unchecked(Index::Input(self.inputs - 1))) - } - - fn enforce(&mut self, _annotation: A, a: LA, b: LB, c: LC) - where - A: FnOnce() -> AR, - AR: Into, - LA: FnOnce(LinearCombination) -> LinearCombination, - LB: FnOnce(LinearCombination) -> LinearCombination, - LC: FnOnce(LinearCombination) -> LinearCombination, - { - let a = a(LinearCombination::zero()); - let b = b(LinearCombination::zero()); - let c = c(LinearCombination::zero()); - - self.constraints.push((a, b, c)); - } - - fn push_namespace(&mut self, _name_fn: N) - where - NR: Into, - N: FnOnce() -> NR, - { - } - - fn pop_namespace(&mut self) {} - - fn get_root(&mut self) -> &mut Self::Root { - self - } -} diff --git a/src/bellpepper/test_shape_cs.rs b/src/bellpepper/test_shape_cs.rs deleted file mode 100644 index 923e432..0000000 --- a/src/bellpepper/test_shape_cs.rs +++ /dev/null @@ -1,320 +0,0 @@ -//! Support for generating R1CS shape using bellpepper. -//! `TestShapeCS` implements a superset of `ShapeCS`, adding non-trivial -//! namespace support for use in testing. - -use core::fmt::Write; -use std::{ - cmp::Ordering, - collections::{BTreeMap, HashMap}, -}; - -use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; -use ff::{Field, PrimeField}; - -use crate::traits::Engine; - -#[derive(Clone, Copy)] -struct OrderedVariable(Variable); - -#[allow(dead_code)] -#[derive(Debug)] -enum NamedObject { - Constraint(usize), - Var(Variable), - Namespace, -} - -impl Eq for OrderedVariable {} -impl PartialEq for OrderedVariable { - fn eq(&self, other: &Self) -> bool { - match (self.0.get_unchecked(), other.0.get_unchecked()) { - (Index::Input(ref a), Index::Input(ref b)) | (Index::Aux(ref a), Index::Aux(ref b)) => { - a == b - } - _ => false, - } - } -} -impl PartialOrd for OrderedVariable { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} -impl Ord for OrderedVariable { - fn cmp(&self, other: &Self) -> Ordering { - match (self.0.get_unchecked(), other.0.get_unchecked()) { - (Index::Input(ref a), Index::Input(ref b)) | (Index::Aux(ref a), Index::Aux(ref b)) => { - a.cmp(b) - } - (Index::Input(_), Index::Aux(_)) => Ordering::Less, - (Index::Aux(_), Index::Input(_)) => Ordering::Greater, - } - } -} - -/// `TestShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a -/// circuit. -pub struct TestShapeCS { - named_objects: HashMap, - current_namespace: Vec, - /// All constraints added to the `TestShapeCS`. - pub constraints: Vec<( - LinearCombination, - LinearCombination, - LinearCombination, - String, - )>, - inputs: Vec, - aux: Vec, -} - -fn proc_lc( - terms: &LinearCombination, -) -> BTreeMap { - let mut map = BTreeMap::new(); - for (var, &coeff) in terms.iter() { - map.entry(OrderedVariable(var)) - .or_insert_with(|| Scalar::ZERO) - .add_assign(&coeff); - } - - // Remove terms that have a zero coefficient to normalize - let mut to_remove = vec![]; - for (var, coeff) in map.iter() { - if coeff.is_zero().into() { - to_remove.push(*var) - } - } - - for var in to_remove { - map.remove(&var); - } - - map -} - -impl TestShapeCS -where - E::Scalar: PrimeField, -{ - #[allow(unused)] - /// Create a new, default `TestShapeCS`, - pub fn new() -> Self { - Self::default() - } - - /// Returns the number of constraints defined for this `TestShapeCS`. - pub fn num_constraints(&self) -> usize { - self.constraints.len() - } - - /// Returns the number of inputs defined for this `TestShapeCS`. - pub fn num_inputs(&self) -> usize { - self.inputs.len() - } - - /// Returns the number of aux inputs defined for this `TestShapeCS`. - pub fn num_aux(&self) -> usize { - self.aux.len() - } - - /// Print all public inputs, aux inputs, and constraint names. - #[allow(dead_code)] - pub fn pretty_print_list(&self) -> Vec { - let mut result = Vec::new(); - - for input in &self.inputs { - result.push(format!("INPUT {input}")); - } - for aux in &self.aux { - result.push(format!("AUX {aux}")); - } - - for (_a, _b, _c, name) in &self.constraints { - result.push(name.to_string()); - } - - result - } - - /// Print all iputs and a detailed representation of each constraint. - #[allow(dead_code)] - pub fn pretty_print(&self) -> String { - let mut s = String::new(); - - for input in &self.inputs { - writeln!(s, "INPUT {}", &input).unwrap() - } - - let negone = -::ONE; - - let powers_of_two = (0..E::Scalar::NUM_BITS) - .map(|i| E::Scalar::from(2u64).pow_vartime([u64::from(i)])) - .collect::>(); - - let pp = |s: &mut String, lc: &LinearCombination| { - s.push('('); - let mut is_first = true; - for (var, coeff) in proc_lc::(lc) { - if coeff == negone { - s.push_str(" - ") - } else if !is_first { - s.push_str(" + ") - } - is_first = false; - - if coeff != ::ONE && coeff != negone { - for (i, x) in powers_of_two.iter().enumerate() { - if x == &coeff { - write!(s, "2^{i} . ").unwrap(); - break; - } - } - - write!(s, "{coeff:?} . ").unwrap() - } - - match var.0.get_unchecked() { - Index::Input(i) => { - write!(s, "`I{}`", &self.inputs[i]).unwrap(); - } - Index::Aux(i) => { - write!(s, "`A{}`", &self.aux[i]).unwrap(); - } - } - } - if is_first { - // Nothing was visited, print 0. - s.push('0'); - } - s.push(')'); - }; - - for (a, b, c, name) in &self.constraints { - s.push('\n'); - - write!(s, "{name}: ").unwrap(); - pp(&mut s, a); - write!(s, " * ").unwrap(); - pp(&mut s, b); - s.push_str(" = "); - pp(&mut s, c); - } - - s.push('\n'); - - s - } - - /// Associate `NamedObject` with `path`. - /// `path` must not already have an associated object. - fn set_named_obj(&mut self, path: String, to: NamedObject) { - assert!( - !self.named_objects.contains_key(&path), - "tried to create object at existing path: {path}" - ); - - self.named_objects.insert(path, to); - } -} - -impl Default for TestShapeCS { - fn default() -> Self { - let mut map = HashMap::new(); - map.insert("ONE".into(), NamedObject::Var(Self::one())); - Self { - named_objects: map, - current_namespace: vec![], - constraints: vec![], - inputs: vec![String::from("ONE")], - aux: vec![], - } - } -} - -impl ConstraintSystem for TestShapeCS -where - E::Scalar: PrimeField, -{ - type Root = Self; - - fn alloc(&mut self, annotation: A, _f: F) -> Result - where - F: FnOnce() -> Result, - A: FnOnce() -> AR, - AR: Into, - { - let path = compute_path(&self.current_namespace, &annotation().into()); - self.aux.push(path); - - Ok(Variable::new_unchecked(Index::Aux(self.aux.len() - 1))) - } - - fn alloc_input(&mut self, annotation: A, _f: F) -> Result - where - F: FnOnce() -> Result, - A: FnOnce() -> AR, - AR: Into, - { - let path = compute_path(&self.current_namespace, &annotation().into()); - self.inputs.push(path); - - Ok(Variable::new_unchecked(Index::Input(self.inputs.len() - 1))) - } - - fn enforce(&mut self, annotation: A, a: LA, b: LB, c: LC) - where - A: FnOnce() -> AR, - AR: Into, - LA: FnOnce(LinearCombination) -> LinearCombination, - LB: FnOnce(LinearCombination) -> LinearCombination, - LC: FnOnce(LinearCombination) -> LinearCombination, - { - let path = compute_path(&self.current_namespace, &annotation().into()); - let index = self.constraints.len(); - self.set_named_obj(path.clone(), NamedObject::Constraint(index)); - - let a = a(LinearCombination::zero()); - let b = b(LinearCombination::zero()); - let c = c(LinearCombination::zero()); - - self.constraints.push((a, b, c, path)); - } - - fn push_namespace(&mut self, name_fn: N) - where - NR: Into, - N: FnOnce() -> NR, - { - let name = name_fn().into(); - let path = compute_path(&self.current_namespace, &name); - self.set_named_obj(path, NamedObject::Namespace); - self.current_namespace.push(name); - } - - fn pop_namespace(&mut self) { - assert!(self.current_namespace.pop().is_some()); - } - - fn get_root(&mut self) -> &mut Self::Root { - self - } -} - -fn compute_path(ns: &[String], this: &str) -> String { - assert!(!this.contains('/'), "'/' is not allowed in names"); - - let mut name = String::new(); - - let mut needs_separation = false; - for ns in ns.iter().chain(Some(this.to_string()).iter()) { - if needs_separation { - name += "/"; - } - - name += ns; - needs_separation = true; - } - - name -} diff --git a/src/circuit.rs b/src/circuit.rs deleted file mode 100644 index 5cc5a5f..0000000 --- a/src/circuit.rs +++ /dev/null @@ -1,528 +0,0 @@ -//! There are two augmented circuits: the primary and the secondary. -//! Each of them is over a curve in a 2-cycle of elliptic curves. -//! We have two running instances. Each circuit takes as input 2 hashes: one for -//! each of the running instances. Each of these hashes is H(params = H(shape, -//! ck), i, z0, zi, U). Each circuit folds the last invocation of the other into -//! the running instance - -use bellpepper::gadgets::{boolean_utils::conditionally_select_slice, Assignment}; -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, SynthesisError, -}; -use ff::Field; -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::{NIO_NOVA_FOLD, NUM_FE_WITHOUT_IO_FOR_CRHF, NUM_HASH_BITS}, - gadgets::{ - alloc_num_equals, alloc_scalar_as_base, alloc_zero, le_bits_to_num, AllocatedPoint, - AllocatedR1CSInstance, AllocatedRelaxedR1CSInstance, - }, - r1cs::{R1CSInstance, RelaxedR1CSInstance}, - supernova::StepCircuit, - traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, - Commitment, -}; - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct NovaAugmentedCircuitParams { - limb_width: usize, - n_limbs: usize, - is_primary_circuit: bool, // A boolean indicating if this is the primary circuit -} - -impl NovaAugmentedCircuitParams { - pub const fn new(limb_width: usize, n_limbs: usize, is_primary_circuit: bool) -> Self { - Self { - limb_width, - n_limbs, - is_primary_circuit, - } - } -} - -// NOTES: All these options here seem to point towards using a typestate pattern -// or something. - -#[derive(Debug, Serialize)] -#[serde(bound = "")] -pub struct NovaAugmentedCircuitInputs { - params: E::Scalar, - i: E::Base, - z0: Vec, - zi: Option>, - U: Option>, - u: Option>, - T: Option>, -} - -impl NovaAugmentedCircuitInputs { - /// Create new inputs/witness for the verification circuit - pub fn new( - params: E::Scalar, - i: E::Base, - z0: Vec, - zi: Option>, - U: Option>, - u: Option>, - T: Option>, - ) -> Self { - Self { - params, - i, - z0, - zi, - U, - u, - T, - } - } -} - -/// The augmented circuit F' in Nova that includes a step circuit F -/// and the circuit for the verifier in Nova's non-interactive folding scheme -pub struct NovaAugmentedCircuit<'a, E: Engine, SC: StepCircuit> { - params: &'a NovaAugmentedCircuitParams, - ro_consts: ROConstantsCircuit, - inputs: Option>, - step_circuit: &'a SC, // The function that is applied for each step -} - -impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { - /// Create a new verification circuit for the input relaxed r1cs instances - pub const fn new( - params: &'a NovaAugmentedCircuitParams, - inputs: Option>, - step_circuit: &'a SC, - ro_consts: ROConstantsCircuit, - ) -> Self { - Self { - params, - inputs, - step_circuit, - ro_consts, - } - } - - /// Allocate all witnesses and return - fn alloc_witness::Base>>( - &self, - mut cs: CS, - arity: usize, - ) -> Result< - ( - AllocatedNum, - AllocatedNum, - Vec>, - Vec>, - AllocatedRelaxedR1CSInstance, - AllocatedR1CSInstance, - AllocatedPoint, - ), - SynthesisError, - > { - // Allocate the params - let params = alloc_scalar_as_base::( - cs.namespace(|| "params"), - self.inputs.as_ref().map(|inputs| inputs.params), - )?; - - // Allocate i - let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; - - // Allocate z0 - let z_0 = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || { - Ok(self.inputs.get()?.z0[i]) - }) - }) - .collect::>, _>>()?; - - // Allocate zi. If inputs.zi is not provided (base case) allocate default value - // 0 - let zero = vec![E::Base::ZERO; arity]; - let z_i = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { - Ok(self.inputs.get()?.zi.as_ref().unwrap_or(&zero)[i]) - }) - }) - .collect::>, _>>()?; - - // Allocate the running instance - let U: AllocatedRelaxedR1CSInstance = - AllocatedRelaxedR1CSInstance::alloc( - cs.namespace(|| "Allocate U"), - self.inputs.as_ref().and_then(|inputs| inputs.U.as_ref()), - self.params.limb_width, - self.params.n_limbs, - )?; - - // Allocate the instance to be folded in - let u = AllocatedR1CSInstance::alloc( - cs.namespace(|| "allocate instance u to fold"), - self.inputs.as_ref().and_then(|inputs| inputs.u.as_ref()), - )?; - - // Allocate T - let T = AllocatedPoint::alloc( - cs.namespace(|| "allocate T"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.T.map(|T| T.to_coordinates())), - )?; - T.check_on_curve(cs.namespace(|| "check T on curve"))?; - - Ok((params, i, z_0, z_i, U, u, T)) - } - - /// Synthesizes base case and returns the new relaxed `R1CSInstance` - fn synthesize_base_case::Base>>( - &self, - mut cs: CS, - u: AllocatedR1CSInstance, - ) -> Result, SynthesisError> { - let U_default: AllocatedRelaxedR1CSInstance = - if self.params.is_primary_circuit { - // The primary circuit just returns the default R1CS instance - AllocatedRelaxedR1CSInstance::default( - cs.namespace(|| "Allocate U_default"), - self.params.limb_width, - self.params.n_limbs, - )? - } else { - // The secondary circuit returns the incoming R1CS instance - AllocatedRelaxedR1CSInstance::from_r1cs_instance( - cs.namespace(|| "Allocate U_default"), - u, - self.params.limb_width, - self.params.n_limbs, - )? - }; - Ok(U_default) - } - - /// Synthesizes non base case and returns the new relaxed `R1CSInstance` - /// And a boolean indicating if all checks pass - fn synthesize_non_base_case::Base>>( - &self, - mut cs: CS, - params: &AllocatedNum, - i: &AllocatedNum, - z_0: &[AllocatedNum], - z_i: &[AllocatedNum], - U: &AllocatedRelaxedR1CSInstance, - u: &AllocatedR1CSInstance, - T: &AllocatedPoint, - arity: usize, - ) -> Result<(AllocatedRelaxedR1CSInstance, AllocatedBit), SynthesisError> - { - // Check that u.x[0] = Hash(params, U, i, z0, zi) - let mut ro = E::ROCircuit::new( - self.ro_consts.clone(), - NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity, - ); - ro.absorb(params); - ro.absorb(i); - for e in z_0 { - ro.absorb(e); - } - for e in z_i { - ro.absorb(e); - } - U.absorb_in_ro(cs.namespace(|| "absorb U"), &mut ro)?; - - let hash_bits = ro.squeeze(cs.namespace(|| "Input hash"), NUM_HASH_BITS)?; - let hash = le_bits_to_num(cs.namespace(|| "bits to hash"), &hash_bits)?; - let check_pass = alloc_num_equals( - cs.namespace(|| "check consistency of u.X[0] with H(params, U, i, z0, zi)"), - &u.X[0], - &hash, - )?; - - // Run NIFS Verifier - let U_fold = U.fold_with_r1cs( - cs.namespace(|| "compute fold of U and u"), - params, - u, - T, - self.ro_consts.clone(), - self.params.limb_width, - self.params.n_limbs, - )?; - - Ok((U_fold, check_pass)) - } -} - -impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { - /// synthesize circuit giving constraint system - pub fn synthesize::Base>>( - self, - cs: &mut CS, - ) -> Result>, SynthesisError> { - let arity = self.step_circuit.arity(); - - // Allocate all witnesses - let (params, i, z_0, z_i, U, u, T) = - self.alloc_witness(cs.namespace(|| "allocate the circuit witness"), arity)?; - - // Compute variable indicating if this is the base case - let zero = alloc_zero(cs.namespace(|| "zero")); - let is_base_case = - alloc_num_equals(cs.namespace(|| "Check if base case"), &i.clone(), &zero)?; - - // Synthesize the circuit for the base case and get the new running instance - let Unew_base = self.synthesize_base_case(cs.namespace(|| "base case"), u.clone())?; - - // Synthesize the circuit for the non-base case and get the new running - // instance along with a boolean indicating if all checks have passed - let (Unew_non_base, check_non_base_pass) = self.synthesize_non_base_case( - cs.namespace(|| "synthesize non base case"), - ¶ms, - &i, - &z_0, - &z_i, - &U, - &u, - &T, - arity, - )?; - - // Either check_non_base_pass=true or we are in the base case - let should_be_false = AllocatedBit::nor( - cs.namespace(|| "check_non_base_pass nor base_case"), - &check_non_base_pass, - &is_base_case, - )?; - cs.enforce( - || "check_non_base_pass nor base_case = false", - |lc| lc + should_be_false.get_variable(), - |lc| lc + CS::one(), - |lc| lc, - ); - - // Compute the U_new - let Unew = Unew_base.conditionally_select( - cs.namespace(|| "compute U_new"), - &Unew_non_base, - &Boolean::from(is_base_case.clone()), - )?; - - // Compute i + 1 - let i_new = AllocatedNum::alloc(cs.namespace(|| "i + 1"), || { - Ok(*i.get_value().get()? + E::Base::ONE) - })?; - cs.enforce( - || "check i + 1", - |lc| lc, - |lc| lc, - |lc| lc + i_new.get_variable() - CS::one() - i.get_variable(), - ); - - // Compute z_{i+1} - let z_input = conditionally_select_slice( - cs.namespace(|| "select input to F"), - &z_0, - &z_i, - &Boolean::from(is_base_case), - )?; - - // TODO: Note, I changed this here because I removed the other `StepCircuit` - // trait. - let (_pc, z_next) = - self.step_circuit - .synthesize(&mut cs.namespace(|| "F"), None, &z_input)?; - - if z_next.len() != arity { - return Err(SynthesisError::IncompatibleLengthVector( - "z_next".to_string(), - )); - } - - // Compute the new hash H(params, Unew, i+1, z0, z_{i+1}) - let mut ro = E::ROCircuit::new(self.ro_consts, NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity); - ro.absorb(¶ms); - ro.absorb(&i_new); - for e in &z_0 { - ro.absorb(e); - } - for e in &z_next { - ro.absorb(e); - } - Unew.absorb_in_ro(cs.namespace(|| "absorb U_new"), &mut ro)?; - let hash_bits = ro.squeeze(cs.namespace(|| "output hash bits"), NUM_HASH_BITS)?; - let hash = le_bits_to_num(cs.namespace(|| "convert hash to num"), &hash_bits)?; - - // Outputs the computed hash and u.X[1] that corresponds to the hash of the - // other circuit - u.X[1].inputize(cs.namespace(|| "Output unmodified hash of the other circuit"))?; - hash.inputize(cs.namespace(|| "output new hash of this circuit"))?; - - Ok(z_next) - } -} - -// #[cfg(test)] -// mod tests { -// use expect_test::{expect, Expect}; - -// use super::*; -// use crate::{ -// bellpepper::{ -// r1cs::{NovaShape, NovaWitness}, -// solver::SatisfyingAssignment, -// test_shape_cs::TestShapeCS, -// }, -// constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, -// gadgets::scalar_as_base, -// provider::{ -// poseidon::PoseidonConstantsCircuit, Bn256EngineKZG, -// GrumpkinEngine, PallasEngine, Secp256k1Engine, Secq256k1Engine, -// VestaEngine, }, -// traits::{snark::default_ck_hint, CurveCycleEquipped, Dual}, -// }; - -// // In the following we use 1 to refer to the primary, and 2 to refer to -// the // secondary circuit -// fn test_recursive_circuit_with( -// primary_params: &NovaAugmentedCircuitParams, -// secondary_params: &NovaAugmentedCircuitParams, -// ro_consts1: ROConstantsCircuit>, -// ro_consts2: ROConstantsCircuit, -// expected_num_constraints_primary: &Expect, -// expected_num_constraints_secondary: &Expect, -// ) where -// E1: CurveCycleEquipped, -// { -// let tc1 = TrivialCircuit::default(); -// // Initialize the shape and ck for the primary -// let circuit1: NovaAugmentedCircuit< -// '_, -// Dual, -// TrivialCircuit< as Engine>::Base>, -// > = NovaAugmentedCircuit::new(primary_params, None, &tc1, -// > ro_consts1.clone()); -// let mut cs: TestShapeCS = TestShapeCS::new(); -// let _ = circuit1.synthesize(&mut cs); -// let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); - -// expected_num_constraints_primary.assert_eq(&cs.num_constraints(). -// to_string()); - -// let tc2 = TrivialCircuit::default(); -// // Initialize the shape and ck for the secondary -// let circuit2: NovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(secondary_params, -// None, &tc2, ro_consts2.clone()); let mut cs: TestShapeCS> = -// TestShapeCS::new(); let _ = circuit2.synthesize(&mut cs); -// let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); - -// expected_num_constraints_secondary.assert_eq(&cs.num_constraints(). -// to_string()); - -// // Execute the base case for the primary -// let zero1 = < as Engine>::Base as Field>::ZERO; -// let mut cs1 = SatisfyingAssignment::::new(); -// let inputs1: NovaAugmentedCircuitInputs> = -// NovaAugmentedCircuitInputs::new( scalar_as_base::(zero1), // -// pass zero for testing zero1, -// vec![zero1], -// None, -// None, -// None, -// None, -// ); -// let circuit1: NovaAugmentedCircuit< -// '_, -// Dual, -// TrivialCircuit< as Engine>::Base>, -// > = NovaAugmentedCircuit::new(primary_params, Some(inputs1), &tc1, -// > ro_consts1); -// let _ = circuit1.synthesize(&mut cs1); -// let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, -// &ck1).unwrap(); // Make sure that this is satisfiable -// shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); - -// // Execute the base case for the secondary -// let zero2 = <::Base as Field>::ZERO; -// let mut cs2 = SatisfyingAssignment::>::new(); -// let inputs2: NovaAugmentedCircuitInputs = -// NovaAugmentedCircuitInputs::new( -// scalar_as_base::>(zero2), // pass zero for testing -// zero2, vec![zero2], -// None, -// None, -// Some(inst1), -// None, -// ); -// let circuit2: NovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(secondary_params, -// Some(inputs2), &tc2, ro_consts2); let _ = circuit2.synthesize(&mut -// cs2); let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, -// &ck2).unwrap(); // Make sure that it is satisfiable -// shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); -// } - -// #[test] -// fn test_recursive_circuit_pasta() { -// // this test checks against values that must be replicated in -// benchmarks if // changed here -// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, -// BN_N_LIMBS, true); let params2 = -// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); -// let ro_consts1: ROConstantsCircuit = -// PoseidonConstantsCircuit::default(); let ro_consts2: -// ROConstantsCircuit = PoseidonConstantsCircuit::default(); - -// test_recursive_circuit_with::( -// ¶ms1, -// ¶ms2, -// ro_consts1, -// ro_consts2, -// &expect!["9817"], -// &expect!["10349"], -// ); -// } - -// #[test] -// fn test_recursive_circuit_bn256_grumpkin() { -// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, -// BN_N_LIMBS, true); let params2 = -// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); -// let ro_consts1: ROConstantsCircuit = -// PoseidonConstantsCircuit::default(); let ro_consts2: -// ROConstantsCircuit = PoseidonConstantsCircuit::default(); - -// test_recursive_circuit_with::( -// ¶ms1, -// ¶ms2, -// ro_consts1, -// ro_consts2, -// &expect!["9985"], -// &expect!["10538"], -// ); -// } - -// #[test] -// fn test_recursive_circuit_secpq() { -// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, -// BN_N_LIMBS, true); let params2 = -// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); -// let ro_consts1: ROConstantsCircuit = -// PoseidonConstantsCircuit::default(); let ro_consts2: -// ROConstantsCircuit = PoseidonConstantsCircuit::default(); - -// test_recursive_circuit_with::( -// ¶ms1, -// ¶ms2, -// ro_consts1, -// ro_consts2, -// &expect!["10264"], -// &expect!["10961"], -// ); -// } -// } diff --git a/src/cyclefold/circuit.rs b/src/cyclefold/circuit.rs deleted file mode 100644 index 63ae346..0000000 --- a/src/cyclefold/circuit.rs +++ /dev/null @@ -1,285 +0,0 @@ -//! This module defines Cyclefold circuit - -use bellpepper::gadgets::boolean_utils::conditionally_select; -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - ConstraintSystem, SynthesisError, -}; -use ff::Field; -use neptune::{circuit2::poseidon_hash_allocated, poseidon::PoseidonConstants}; - -use crate::{ - constants::NUM_CHALLENGE_BITS, - gadgets::{alloc_zero, le_bits_to_num, AllocatedPoint}, - traits::{commitment::CommitmentTrait, Engine}, - Commitment, -}; - -/// A structure containing the CycleFold circuit inputs and implementing the -/// synthesize function -pub struct CycleFoldCircuit { - commit_1: Option>, - commit_2: Option>, - scalar: Option<[bool; NUM_CHALLENGE_BITS]>, - poseidon_constants: PoseidonConstants, -} - -impl Default for CycleFoldCircuit { - fn default() -> Self { - let poseidon_constants = PoseidonConstants::new(); - Self { - commit_1: None, - commit_2: None, - scalar: None, - poseidon_constants, - } - } -} -impl CycleFoldCircuit { - /// Create a new CycleFold circuit with the given inputs - pub fn new( - commit_1: Option>, - commit_2: Option>, - scalar: Option<[bool; NUM_CHALLENGE_BITS]>, - ) -> Self { - let poseidon_constants = PoseidonConstants::new(); - Self { - commit_1, - commit_2, - scalar, - poseidon_constants, - } - } - - fn alloc_witness::Base>>( - &self, - mut cs: CS, - ) -> Result< - ( - AllocatedPoint, // commit_1 - AllocatedPoint, // commit_2 - Vec, // scalar - ), - SynthesisError, - > { - let commit_1 = AllocatedPoint::alloc( - cs.namespace(|| "allocate C_1"), - self.commit_1.map(|C_1| C_1.to_coordinates()), - )?; - commit_1.check_on_curve(cs.namespace(|| "commit_1 on curve"))?; - - let commit_2 = AllocatedPoint::alloc( - cs.namespace(|| "allocate C_2"), - self.commit_2.map(|C_2| C_2.to_coordinates()), - )?; - commit_2.check_on_curve(cs.namespace(|| "commit_2 on curve"))?; - - let scalar: Vec = self - .scalar - .unwrap_or([false; NUM_CHALLENGE_BITS]) - .into_iter() - .enumerate() - .map(|(idx, bit)| { - AllocatedBit::alloc(cs.namespace(|| format!("scalar bit {idx}")), Some(bit)) - }) - .collect::, _>>()?; - - Ok((commit_1, commit_2, scalar)) - } - - /// Synthesize the CycleFold circuit - pub fn synthesize::Base>>( - &self, - mut cs: CS, - ) -> Result<(), SynthesisError> { - let (C_1, C_2, r) = self.alloc_witness(cs.namespace(|| "allocate circuit witness"))?; - - // Calculate C_final - let r_C_2 = C_2.scalar_mul(cs.namespace(|| "r * C_2"), &r)?; - - let C_final = C_1.add(cs.namespace(|| "C_1 + r * C_2"), &r_C_2)?; - - self.inputize_point(&C_1, cs.namespace(|| "inputize C_1"))?; - self.inputize_point(&C_2, cs.namespace(|| "inputize C_2"))?; - self.inputize_point(&C_final, cs.namespace(|| "inputize C_final"))?; - - let scalar = le_bits_to_num(cs.namespace(|| "get scalar"), &r)?; - - scalar.inputize(cs.namespace(|| "scalar"))?; - - Ok(()) - } - - // Represent the point in the public IO as its 2-ary Poseidon hash - fn inputize_point( - &self, - point: &AllocatedPoint, - mut cs: CS, - ) -> Result<(), SynthesisError> - where - E: Engine, - CS: ConstraintSystem, - { - let (x, y, is_infinity) = point.get_coordinates(); - let preimage = vec![x.clone(), y.clone()]; - let val = poseidon_hash_allocated( - cs.namespace(|| "hash point"), - preimage, - &self.poseidon_constants, - )?; - - let zero = alloc_zero(cs.namespace(|| "zero")); - - let is_infinity_bit = AllocatedBit::alloc( - cs.namespace(|| "is_infinity"), - Some(is_infinity.get_value().unwrap_or(E::Base::ONE) == E::Base::ONE), - )?; - - cs.enforce( - || "infinity_bit matches", - |lc| lc, - |lc| lc, - |lc| lc + is_infinity_bit.get_variable() - is_infinity.get_variable(), - ); - - // Output 0 when it is the point at infinity - let output = conditionally_select( - cs.namespace(|| "select output"), - &zero, - &val, - &Boolean::from(is_infinity_bit), - )?; - - output.inputize(cs.namespace(|| "inputize hash"))?; - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use expect_test::{expect, Expect}; - use ff::{Field, PrimeField, PrimeFieldBits}; - use neptune::Poseidon; - use rand_core::OsRng; - - use super::*; - use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - constants::NIO_CYCLE_FOLD, - gadgets::scalar_as_base, - provider::Bn256EngineKZG, - traits::{ - commitment::CommitmentEngineTrait, snark::default_ck_hint, CurveCycleEquipped, Dual, - }, - }; - - fn test_cyclefold_circuit_size_with(expected_constraints: &Expect, expected_vars: &Expect) - where - E1: CurveCycleEquipped, - { - // Instantiate the circuit with trivial inputs - let circuit: CycleFoldCircuit> = CycleFoldCircuit::default(); - - // Synthesize the R1CS shape - let mut cs: ShapeCS = ShapeCS::new(); - let _ = circuit.synthesize(cs.namespace(|| "synthesizing shape")); - - // Extract the number of constraints and variables - let num_constraints = cs.num_constraints(); - let num_variables = cs.num_aux(); - let num_io = cs.num_inputs(); - - // Check the number of constraints and variables match the expected values - expected_constraints.assert_eq(&num_constraints.to_string()); - expected_vars.assert_eq(&num_variables.to_string()); - assert_eq!(num_io, NIO_CYCLE_FOLD + 1); // includes 1 - } - - #[test] - fn test_cyclefold_circuit_size() { - test_cyclefold_circuit_size_with::(&expect!("2090"), &expect!("2081")); - } - - fn test_cyclefold_circuit_sat_with() { - let rng = OsRng; - - let ck = < as Engine>::CE as CommitmentEngineTrait>>::setup(b"test", 5); - - // Generate random vectors to commit to - let v1 = (0..5) - .map(|_| < as Engine>::Scalar as Field>::random(rng)) - .collect::>(); - let v2 = (0..5) - .map(|_| < as Engine>::Scalar as Field>::random(rng)) - .collect::>(); - - // Calculate the random commitments - let C_1 = < as Engine>::CE as CommitmentEngineTrait>>::commit(&ck, &v1); - let C_2 = < as Engine>::CE as CommitmentEngineTrait>>::commit(&ck, &v2); - - // Generate a random scalar - let val: u128 = rand::random(); - let r = < as Engine>::Scalar as PrimeField>::from_u128(val); - let r_bits = r - .to_le_bits() - .into_iter() - .take(128) - .collect::>() - .try_into() - .unwrap(); - - let circuit: CycleFoldCircuit> = - CycleFoldCircuit::new(Some(C_1), Some(C_2), Some(r_bits)); - - // Calculate the result out of circuit - let native_result = C_1 + C_2 * r; - - // Generate the R1CS shape and commitment key - let mut cs: ShapeCS = ShapeCS::new(); - let _ = circuit.synthesize(cs.namespace(|| "synthesizing shape")); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - // Synthesize the R1CS circuit on the random inputs - let mut cs = SatisfyingAssignment::::new(); - circuit - .synthesize(cs.namespace(|| "synthesizing witness")) - .unwrap(); - - let (instance, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - let X = &instance.X; - - // Helper functio to calculate the hash - let compute_hash = |P: Commitment>| -> E::Scalar { - let (x, y, is_infinity) = P.to_coordinates(); - if is_infinity { - return E::Scalar::ZERO; - } - - let mut hasher = Poseidon::new_with_preimage(&[x, y], &circuit.poseidon_constants); - - hasher.hash() - }; - - // Check the circuit calculates the right thing - let hash_1 = compute_hash(C_1); - assert_eq!(hash_1, X[0]); - let hash_2 = compute_hash(C_2); - assert_eq!(hash_2, X[1]); - let hash_res = compute_hash(native_result); - assert_eq!(hash_res, X[2]); - assert_eq!(r, scalar_as_base::(X[3])); - - // Check the R1CS equation is satisfied - shape.is_sat(&ck, &instance, &witness).unwrap(); - } - - #[test] - fn test_cyclefold_circuit_sat() { - test_cyclefold_circuit_sat_with::(); - } -} diff --git a/src/cyclefold/gadgets.rs b/src/cyclefold/gadgets.rs deleted file mode 100644 index 7edd0c6..0000000 --- a/src/cyclefold/gadgets.rs +++ /dev/null @@ -1,706 +0,0 @@ -//! This module defines many of the gadgets needed in the primary folding -//! circuit - -use bellpepper::gadgets::Assignment; -use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; -use ff::Field; -use itertools::Itertools; - -use super::util::FoldingData; -use crate::{ - constants::{BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS}, - gadgets::{ - alloc_bignat_constant, f_to_nat, le_bits_to_num, AllocatedPoint, - AllocatedRelaxedR1CSInstance, BigNat, Num, - }, - r1cs::R1CSInstance, - traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, -}; - -// An allocated version of the R1CS instance obtained from a single cyclefold -// invocation -pub struct AllocatedCycleFoldInstance { - W: AllocatedPoint, - X: [BigNat; NIO_CYCLE_FOLD], -} - -impl AllocatedCycleFoldInstance { - pub fn alloc>( - mut cs: CS, - inst: Option<&R1CSInstance>, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let W = AllocatedPoint::alloc( - cs.namespace(|| "allocate W"), - inst.map(|u| u.comm_W.to_coordinates()), - )?; - W.check_on_curve(cs.namespace(|| "check W on curve"))?; - - if let Some(inst) = inst { - if inst.X.len() != NIO_CYCLE_FOLD { - return Err(SynthesisError::IncompatibleLengthVector(String::from( - "R1CS instance has wrong arity", - ))); - } - } - - let X: [BigNat; NIO_CYCLE_FOLD] = (0..NIO_CYCLE_FOLD) - .map(|idx| { - BigNat::alloc_from_nat( - cs.namespace(|| format!("allocating IO {idx}")), - || Ok(f_to_nat(inst.map_or(&E::Scalar::ZERO, |inst| &inst.X[idx]))), - limb_width, - n_limbs, - ) - }) - .collect::, _>>()? - .try_into() - .map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!( - "{} != {NIO_CYCLE_FOLD}", - err.len() - )) - })?; - - Ok(Self { W, X }) - } - - pub fn absorb_in_ro( - &self, - mut cs: CS, - ro: &mut impl ROCircuitTrait, - ) -> Result<(), SynthesisError> - where - CS: ConstraintSystem, - { - ro.absorb(&self.W.x); - ro.absorb(&self.W.y); - ro.absorb(&self.W.is_infinity); - self.X - .iter() - .enumerate() - .try_for_each(|(io_idx, x)| -> Result<(), SynthesisError> { - x.as_limbs().iter().enumerate().try_for_each( - |(limb_idx, limb)| -> Result<(), SynthesisError> { - ro.absorb(&limb.as_allocated_num(cs.namespace(|| { - format!("convert limb {limb_idx} of X[{io_idx}] to num") - }))?); - Ok(()) - }, - ) - })?; - - Ok(()) - } -} - -/// An circuit allocated version of the `FoldingData` coming from a CycleFold -/// invocation. -pub struct AllocatedCycleFoldData { - pub U: AllocatedRelaxedR1CSInstance, - pub u: AllocatedCycleFoldInstance, - pub T: AllocatedPoint, -} - -impl AllocatedCycleFoldData { - pub fn alloc>( - mut cs: CS, - inst: Option<&FoldingData>, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let U = AllocatedRelaxedR1CSInstance::alloc( - cs.namespace(|| "U"), - inst.map(|x| &x.U), - limb_width, - n_limbs, - )?; - - let u = AllocatedCycleFoldInstance::alloc( - cs.namespace(|| "u"), - inst.map(|x| &x.u), - limb_width, - n_limbs, - )?; - - let T = AllocatedPoint::alloc(cs.namespace(|| "T"), inst.map(|x| x.T.to_coordinates()))?; - T.check_on_curve(cs.namespace(|| "T on curve"))?; - - Ok(Self { U, u, T }) - } - - /// The NIFS verifier which folds the CycleFold instance into a running - /// relaxed R1CS instance. - pub fn apply_fold( - &self, - mut cs: CS, - params: &AllocatedNum, - ro_consts: ROConstantsCircuit, - limb_width: usize, - n_limbs: usize, - ) -> Result, SynthesisError> - where - CS: ConstraintSystem, - { - // Compute r: - let mut ro = E::ROCircuit::new( - ro_consts, - 1 + (3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS) + (3 + NIO_CYCLE_FOLD * BN_N_LIMBS) + 3, /* digest + (U) + (u) + T */ - ); - ro.absorb(params); - - self.U.absorb_in_ro( - cs.namespace(|| "absorb cyclefold running instance"), - &mut ro, - )?; - // running instance `U` does not need to absorbed since u.X[0] = Hash(params, U, - // i, z0, zi) - self.u - .absorb_in_ro(cs.namespace(|| "absorb cyclefold instance"), &mut ro)?; - - ro.absorb(&self.T.x); - ro.absorb(&self.T.y); - ro.absorb(&self.T.is_infinity); - let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; - let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; - - // W_fold = self.W + r * u.W - let rW = self.u.W.scalar_mul(cs.namespace(|| "r * u.W"), &r_bits)?; - let W_fold = self.U.W.add(cs.namespace(|| "self.W + r * u.W"), &rW)?; - - // E_fold = self.E + r * T - let rT = self.T.scalar_mul(cs.namespace(|| "r * T"), &r_bits)?; - let E_fold = self.U.E.add(cs.namespace(|| "self.E + r * T"), &rT)?; - - // u_fold = u_r + r - let u_fold = AllocatedNum::alloc(cs.namespace(|| "u_fold"), || { - Ok(*self.U.u.get_value().get()? + r.get_value().get()?) - })?; - cs.enforce( - || "Check u_fold", - |lc| lc, - |lc| lc, - |lc| lc + u_fold.get_variable() - self.U.u.get_variable() - r.get_variable(), - ); - - // Fold the IO: - // Analyze r into limbs - let r_bn = BigNat::from_num( - cs.namespace(|| "allocate r_bn"), - &Num::from(r), - limb_width, - n_limbs, - )?; - - // Allocate the order of the non-native field as a constant - let m_bn = alloc_bignat_constant( - cs.namespace(|| "alloc m"), - &E::GE::group_params().2, - limb_width, - n_limbs, - )?; - - let mut X_fold = vec![]; - - // Calculate the - for (idx, (X, x)) in self.U.X.iter().zip_eq(self.u.X.iter()).enumerate() { - let (_, r) = x.mult_mod(cs.namespace(|| format!("r*u.X[{idx}]")), &r_bn, &m_bn)?; - let r_new = X.add(&r)?; - let X_i_fold = - r_new.red_mod(cs.namespace(|| format!("reduce folded X[{idx}]")), &m_bn)?; - X_fold.push(X_i_fold); - } - - let X_fold = X_fold.try_into().map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {NIO_CYCLE_FOLD}", err.len())) - })?; - - Ok(AllocatedRelaxedR1CSInstance { - W: W_fold, - E: E_fold, - u: u_fold, - X: X_fold, - }) - } -} - -pub mod emulated { - use bellpepper::gadgets::{boolean_utils::conditionally_select, Assignment}; - use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, SynthesisError, - }; - use ff::Field; - - use super::FoldingData; - use crate::{ - constants::{NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT}, - gadgets::{ - alloc_bignat_constant, alloc_zero, conditionally_select_allocated_bit, - conditionally_select_bignat, f_to_nat, le_bits_to_num, BigNat, - }, - traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, - RelaxedR1CSInstance, - }; - - /// An allocated version of a curve point from the non-native curve - #[derive(Clone)] - pub struct AllocatedEmulPoint - where - G: Group, - { - pub x: BigNat, - pub y: BigNat, - pub is_infinity: AllocatedBit, - } - - impl AllocatedEmulPoint - where - G: Group, - { - pub fn alloc( - mut cs: CS, - coords: Option<(G::Scalar, G::Scalar, bool)>, - limb_width: usize, - n_limbs: usize, - ) -> Result - where - CS: ConstraintSystem<::Base>, - { - let x = BigNat::alloc_from_nat( - cs.namespace(|| "x"), - || { - Ok(f_to_nat( - &coords.map_or(::ZERO, |val| val.0), - )) - }, - limb_width, - n_limbs, - )?; - - let y = BigNat::alloc_from_nat( - cs.namespace(|| "y"), - || { - Ok(f_to_nat( - &coords.map_or(::ZERO, |val| val.1), - )) - }, - limb_width, - n_limbs, - )?; - - let is_infinity = AllocatedBit::alloc( - cs.namespace(|| "alloc is_infinity"), - coords.map_or(Some(true), |(_, _, is_infinity)| Some(is_infinity)), - )?; - - Ok(Self { x, y, is_infinity }) - } - - pub fn absorb_in_ro( - &self, - mut cs: CS, - ro: &mut impl ROCircuitTrait, - ) -> Result<(), SynthesisError> - where - CS: ConstraintSystem, - { - let x_bn = self - .x - .as_limbs() - .iter() - .enumerate() - .map(|(i, limb)| { - limb.as_allocated_num(cs.namespace(|| format!("convert limb {i} of x to num"))) - }) - .collect::>, _>>()?; - - for limb in x_bn { - ro.absorb(&limb) - } - - let y_bn = self - .y - .as_limbs() - .iter() - .enumerate() - .map(|(i, limb)| { - limb.as_allocated_num(cs.namespace(|| format!("convert limb {i} of y to num"))) - }) - .collect::>, _>>()?; - - for limb in y_bn { - ro.absorb(&limb) - } - - let is_infinity_num: AllocatedNum = - AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { - self.is_infinity.get_value().map_or( - Err(SynthesisError::AssignmentMissing), - |bit| { - if bit { - Ok(G::Base::ONE) - } else { - Ok(G::Base::ZERO) - } - }, - ) - })?; - - cs.enforce( - || "constrain num equals bit", - |lc| lc, - |lc| lc, - |lc| lc + is_infinity_num.get_variable() - self.is_infinity.get_variable(), - ); - - ro.absorb(&is_infinity_num); - - Ok(()) - } - - fn conditionally_select>( - &self, - mut cs: CS, - other: &Self, - condition: &Boolean, - ) -> Result { - let x = conditionally_select_bignat( - cs.namespace(|| "x = cond ? self.x : other.x"), - &self.x, - &other.x, - condition, - )?; - - let y = conditionally_select_bignat( - cs.namespace(|| "y = cond ? self.y : other.y"), - &self.y, - &other.y, - condition, - )?; - - let is_infinity = conditionally_select_allocated_bit( - cs.namespace(|| "is_infinity = cond ? self.is_infinity : other.is_infinity"), - &self.is_infinity, - &other.is_infinity, - condition, - )?; - - Ok(Self { x, y, is_infinity }) - } - - pub fn default>( - mut cs: CS, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let x = alloc_bignat_constant( - cs.namespace(|| "allocate x_default = 0"), - &f_to_nat(&G::Base::ZERO), - limb_width, - n_limbs, - )?; - let y = alloc_bignat_constant( - cs.namespace(|| "allocate y_default = 0"), - &f_to_nat(&G::Base::ZERO), - limb_width, - n_limbs, - )?; - - let is_infinity = - AllocatedBit::alloc(cs.namespace(|| "allocate is_infinity"), Some(true))?; - cs.enforce( - || "is_infinity = 1", - |lc| lc, - |lc| lc, - |lc| lc + CS::one() - is_infinity.get_variable(), - ); - - Ok(Self { x, y, is_infinity }) - } - } - - /// A non-native circuit version of a `RelaxedR1CSInstance`. This is used - /// for the in-circuit representation of the primary running instance - pub struct AllocatedEmulRelaxedR1CSInstance { - pub comm_W: AllocatedEmulPoint, - pub comm_E: AllocatedEmulPoint, - u: AllocatedNum, - x0: AllocatedNum, - x1: AllocatedNum, - } - - impl AllocatedEmulRelaxedR1CSInstance - where - E: Engine, - { - pub fn alloc>( - mut cs: CS, - inst: Option<&RelaxedR1CSInstance>, - limb_width: usize, - n_limbs: usize, - ) -> Result - where - CS: ConstraintSystem<::Base>, - { - let comm_W = AllocatedEmulPoint::alloc( - cs.namespace(|| "allocate comm_W"), - inst.map(|x| x.comm_W.to_coordinates()), - limb_width, - n_limbs, - )?; - - let comm_E = AllocatedEmulPoint::alloc( - cs.namespace(|| "allocate comm_E"), - inst.map(|x| x.comm_E.to_coordinates()), - limb_width, - n_limbs, - )?; - - let u = AllocatedNum::alloc(cs.namespace(|| "allocate u"), || { - inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u)) - })?; - - let x0 = AllocatedNum::alloc(cs.namespace(|| "allocate x0"), || { - inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.X[0])) - })?; - - let x1 = AllocatedNum::alloc(cs.namespace(|| "allocate x1"), || { - inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.X[1])) - })?; - - Ok(Self { - comm_W, - comm_E, - u, - x0, - x1, - }) - } - - /// Performs a folding of a primary R1CS instance (`u_W`, `u_x0`, - /// `u_x1`) into a running `AllocatedEmulRelaxedR1CSInstance` - /// As the curve operations are performed in the CycleFold circuit and - /// provided to the primary circuit as non-deterministic advice, - /// this folding simply sets those values as the new witness and - /// error vector commitments. - pub fn fold_with_r1cs::Base>>( - &self, - mut cs: CS, - pp_digest: &AllocatedNum, - W_new: AllocatedEmulPoint, - E_new: AllocatedEmulPoint, - u_W: &AllocatedEmulPoint, - u_x0: &AllocatedNum, - u_x1: &AllocatedNum, - comm_T: &AllocatedEmulPoint, - ro_consts: ROConstantsCircuit, - ) -> Result { - let mut ro = E::ROCircuit::new( - ro_consts, - 1 + NUM_FE_IN_EMULATED_POINT + 2 + NUM_FE_IN_EMULATED_POINT, /* pp_digest + u.W + u.x + comm_T */ - ); - ro.absorb(pp_digest); - - // Absorb u - // Absorb the witness - u_W.absorb_in_ro(cs.namespace(|| "absorb u_W"), &mut ro)?; - // Absorb public IO - ro.absorb(u_x0); - ro.absorb(u_x1); - - // Absorb comm_T - comm_T.absorb_in_ro(cs.namespace(|| "absorb comm_T"), &mut ro)?; - - let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; - let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; - - let u_fold = self.u.add(cs.namespace(|| "u_fold = u + r"), &r)?; - let x0_fold = - AllocatedNum::alloc(cs.namespace(|| "x0"), || { - Ok(*self.x0.get_value().get()? - + *r.get_value().get()? * *u_x0.get_value().get()?) - })?; - cs.enforce( - || "x0_fold = x0 + r * u_x0", - |lc| lc + r.get_variable(), - |lc| lc + u_x0.get_variable(), - |lc| lc + x0_fold.get_variable() - self.x0.get_variable(), - ); - - let x1_fold = - AllocatedNum::alloc(cs.namespace(|| "x1"), || { - Ok(*self.x1.get_value().get()? - + *r.get_value().get()? * *u_x1.get_value().get()?) - })?; - cs.enforce( - || "x1_fold = x1 + r * u_x1", - |lc| lc + r.get_variable(), - |lc| lc + u_x1.get_variable(), - |lc| lc + x1_fold.get_variable() - self.x1.get_variable(), - ); - - Ok(Self { - comm_W: W_new, - comm_E: E_new, - u: u_fold, - x0: x0_fold, - x1: x1_fold, - }) - } - - pub fn absorb_in_ro( - &self, - mut cs: CS, - ro: &mut impl ROCircuitTrait, - ) -> Result<(), SynthesisError> - where - CS: ConstraintSystem<::Base>, - { - self.comm_W - .absorb_in_ro(cs.namespace(|| "absorb comm_W"), ro)?; - self.comm_E - .absorb_in_ro(cs.namespace(|| "absorb comm_E"), ro)?; - - ro.absorb(&self.u); - ro.absorb(&self.x0); - ro.absorb(&self.x1); - - Ok(()) - } - - pub fn conditionally_select::Base>>( - &self, - mut cs: CS, - other: &Self, - condition: &Boolean, - ) -> Result { - let comm_W = self.comm_W.conditionally_select( - cs.namespace(|| "comm_W = cond ? self.comm_W : other.comm_W"), - &other.comm_W, - condition, - )?; - - let comm_E = self.comm_E.conditionally_select( - cs.namespace(|| "comm_E = cond? self.comm_E : other.comm_E"), - &other.comm_E, - condition, - )?; - - let u = conditionally_select( - cs.namespace(|| "u = cond ? self.u : other.u"), - &self.u, - &other.u, - condition, - )?; - - let x0 = conditionally_select( - cs.namespace(|| "x0 = cond ? self.x0 : other.x0"), - &self.x0, - &other.x0, - condition, - )?; - - let x1 = conditionally_select( - cs.namespace(|| "x1 = cond ? self.x1 : other.x1"), - &self.x1, - &other.x1, - condition, - )?; - - Ok(Self { - comm_W, - comm_E, - u, - x0, - x1, - }) - } - - pub fn default::Base>>( - mut cs: CS, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let comm_W = AllocatedEmulPoint::default( - cs.namespace(|| "default comm_W"), - limb_width, - n_limbs, - )?; - let comm_E = comm_W.clone(); - - let u = alloc_zero(cs.namespace(|| "u = 0")); - - let x0 = u.clone(); - let x1 = u.clone(); - - Ok(Self { - comm_W, - comm_E, - u, - x0, - x1, - }) - } - } - - /// The in-circuit representation of the primary folding data. - pub struct AllocatedFoldingData { - pub U: AllocatedEmulRelaxedR1CSInstance, - pub u_W: AllocatedEmulPoint, - pub u_x0: AllocatedNum, - pub u_x1: AllocatedNum, - pub T: AllocatedEmulPoint, - } - - impl AllocatedFoldingData { - pub fn alloc>( - mut cs: CS, - inst: Option<&FoldingData>, - limb_width: usize, - n_limbs: usize, - ) -> Result - where - CS: ConstraintSystem<::Base>, - { - let U = AllocatedEmulRelaxedR1CSInstance::alloc( - cs.namespace(|| "allocate U"), - inst.map(|x| &x.U), - limb_width, - n_limbs, - )?; - - let u_W = AllocatedEmulPoint::alloc( - cs.namespace(|| "allocate u_W"), - inst.map(|x| x.u.comm_W.to_coordinates()), - limb_width, - n_limbs, - )?; - - let u_x0 = AllocatedNum::alloc(cs.namespace(|| "allocate u_x0"), || { - inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u.X[0])) - })?; - - let u_x1 = AllocatedNum::alloc(cs.namespace(|| "allocate u_x1"), || { - inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u.X[1])) - })?; - - let T = AllocatedEmulPoint::alloc( - cs.namespace(|| "allocate T"), - inst.map(|x| x.T.to_coordinates()), - limb_width, - n_limbs, - )?; - - Ok(Self { - U, - u_W, - u_x0, - u_x1, - T, - }) - } - } -} diff --git a/src/cyclefold/nifs.rs b/src/cyclefold/nifs.rs deleted file mode 100644 index 2991351..0000000 --- a/src/cyclefold/nifs.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! This module defines the needed wrong-field NIFS prover - -use std::marker::PhantomData; - -use super::util::{absorb_cyclefold_r1cs, absorb_primary_commitment, absorb_primary_r1cs}; -use crate::{ - constants::{NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT}, - errors::NovaError, - gadgets::scalar_as_base, - r1cs::{R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness}, - traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROConstants, ROTrait}, - CommitmentKey, CompressedCommitment, -}; - -/// A SNARK that holds the proof of a step of an incremental computation of the -/// primary circuit in the CycleFold folding scheme. -/// The difference of this folding scheme from the Nova NIFS in `src/nifs.rs` is -/// that this -#[derive(Debug)] -pub struct PrimaryNIFS -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - pub(crate) comm_T: CompressedCommitment, - _p: PhantomData, -} - -impl PrimaryNIFS -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - /// Takes a relaxed R1CS instance-witness pair (U1, W1) and an R1CS - /// instance-witness pair (U2, W2) and folds them into a new relaxed - /// R1CS instance-witness pair (U, W) and a commitment to the cross term - /// T. It also provides the challenge r used to fold the instances. - pub fn prove( - ck: &CommitmentKey, - ro_consts: &ROConstants, - pp_digest: &E1::Scalar, - S: &R1CSShape, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) -> Result< - ( - Self, - (RelaxedR1CSInstance, RelaxedR1CSWitness), - E1::Scalar, - ), - NovaError, - > { - let arity = U1.X.len(); - - if arity != U2.X.len() { - return Err(NovaError::InvalidInputLength); - } - - let mut ro = E2::RO::new( - ro_consts.clone(), - 1 + NUM_FE_IN_EMULATED_POINT + arity + NUM_FE_IN_EMULATED_POINT, /* pp_digest + u.W - * + u.X + T */ - ); - - ro.absorb(*pp_digest); - - absorb_primary_r1cs::(U2, &mut ro); - - let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; - - absorb_primary_commitment::(&comm_T, &mut ro); - - let r = scalar_as_base::(ro.squeeze(NUM_CHALLENGE_BITS)); - - let U = U1.fold(U2, &comm_T, &r); - - let W = W1.fold(W2, &T, &r)?; - - Ok(( - Self { - comm_T: comm_T.compress(), - _p: PhantomData, - }, - (U, W), - r, - )) - } -} - -/// A SNARK that holds the proof of a step of an incremental computation of the -/// CycleFold circuit The difference of this folding scheme from the Nova NIFS -/// in `src/nifs.rs` is that this folding prover and verifier must fold in the -/// `RelaxedR1CSInstance` accumulator because the optimization in the -#[derive(Debug)] -pub struct CycleFoldNIFS { - pub(crate) comm_T: CompressedCommitment, -} - -impl CycleFoldNIFS { - /// Folds an R1CS instance/witness pair (U2, W2) into a relaxed R1CS - /// instance/witness (U1, W1) returning the new folded accumulator and a - /// commitment to the cross-term. - pub fn prove( - ck: &CommitmentKey, - ro_consts: &ROConstants, - pp_digest: &E::Scalar, - S: &R1CSShape, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) -> Result<(Self, (RelaxedR1CSInstance, RelaxedR1CSWitness)), NovaError> { - // Check `U1` and `U2` have the same arity - if U2.X.len() != NIO_CYCLE_FOLD || U1.X.len() != NIO_CYCLE_FOLD { - return Err(NovaError::InvalidInputLength); - } - - // initialize a new RO - let mut ro = E::RO::new( - ro_consts.clone(), - 46, /* 1 + (3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS) + (3 + NIO_CYCLE_FOLD * - * BN_N_LIMBS) + 3, // digest + (U) + (u) + T */ - ); - - // append the digest of pp to the transcript - ro.absorb(scalar_as_base::(*pp_digest)); - - // append U1 to the transcript. - // NOTE: this must be here because the IO for `U2` does not have the data of the - // hash of `U1` - U1.absorb_in_ro(&mut ro); - - // append U2 to transcript - absorb_cyclefold_r1cs(U2, &mut ro); - - // compute a commitment to the cross-term - let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; - - // append `comm_T` to the transcript and obtain a challenge - comm_T.absorb_in_ro(&mut ro); - - // compute a challenge from the RO - let r = ro.squeeze(NUM_CHALLENGE_BITS); - - // fold the instance using `r` and `comm_T` - let U = U1.fold(U2, &comm_T, &r); - - // fold the witness using `r` and `T` - let W = W1.fold(W2, &T, &r)?; - - // return the folded instance and witness - Ok(( - Self { - comm_T: comm_T.compress(), - }, - (U, W), - )) - } -} diff --git a/src/cyclefold/nova_circuit.rs b/src/cyclefold/nova_circuit.rs deleted file mode 100644 index ed56f08..0000000 --- a/src/cyclefold/nova_circuit.rs +++ /dev/null @@ -1,565 +0,0 @@ -//! This module defines the Nova augmented circuit used for Cyclefold - -use bellpepper::gadgets::{ - boolean::Boolean, boolean_utils::conditionally_select_slice, num::AllocatedNum, Assignment, -}; -use bellpepper_core::{boolean::AllocatedBit, ConstraintSystem, SynthesisError}; -use ff::Field; -use serde::{Deserialize, Serialize}; - -use super::{ - gadgets::{emulated, AllocatedCycleFoldData}, - util::FoldingData, -}; -use crate::{ - constants::{BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_FE_IN_EMULATED_POINT, NUM_HASH_BITS}, - gadgets::{ - alloc_num_equals, alloc_scalar_as_base, alloc_zero, le_bits_to_num, - AllocatedRelaxedR1CSInstance, - }, - supernova::StepCircuit, - traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, - Commitment, -}; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct AugmentedCircuitParams { - limb_width: usize, - n_limbs: usize, -} - -impl AugmentedCircuitParams { - pub const fn new(limb_width: usize, n_limbs: usize) -> Self { - Self { - limb_width, - n_limbs, - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct AugmentedCircuitInputs -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - pp_digest: E1::Scalar, - i: E1::Base, - z0: Vec, - - zi: Option>, - data_p: Option>, - - data_c_1: Option>, - data_c_2: Option>, - - E_new: Option>, - W_new: Option>, -} - -impl AugmentedCircuitInputs -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - pub fn new( - pp_digest: E1::Scalar, - i: E1::Base, - z0: Vec, - zi: Option>, - data_p: Option>, - data_c_1: Option>, - data_c_2: Option>, - E_new: Option>, - W_new: Option>, - ) -> Self { - Self { - pp_digest, - i, - z0, - zi, - data_p, - data_c_1, - data_c_2, - E_new, - W_new, - } - } -} -pub struct AugmentedCircuit<'a, E1, E2, SC> -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, - SC: StepCircuit, -{ - params: &'a AugmentedCircuitParams, - ro_consts: ROConstantsCircuit, - inputs: Option>, - step_circuit: &'a SC, -} - -impl<'a, E1, E2, SC> AugmentedCircuit<'a, E1, E2, SC> -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, - SC: StepCircuit, -{ - pub const fn new( - params: &'a AugmentedCircuitParams, - ro_consts: ROConstantsCircuit, - inputs: Option>, - step_circuit: &'a SC, - ) -> Self { - Self { - params, - ro_consts, - inputs, - step_circuit, - } - } - - fn alloc_witness::Base>>( - &self, - mut cs: CS, - arity: usize, - ) -> Result< - ( - AllocatedNum, // pp_digest - AllocatedNum, // i - Vec>, // z0 - Vec>, // zi - emulated::AllocatedFoldingData, // data_p - AllocatedCycleFoldData, // data_c_1 - AllocatedCycleFoldData, // data_c_2 - emulated::AllocatedEmulPoint, // E_new - emulated::AllocatedEmulPoint, // W_new - ), - SynthesisError, - > { - let pp_digest = alloc_scalar_as_base::( - cs.namespace(|| "params"), - self.inputs.as_ref().map(|inputs| inputs.pp_digest), - )?; - - let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; - - let z_0 = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || { - Ok(self.inputs.get()?.z0[i]) - }) - }) - .collect::>, _>>()?; - - // Allocate zi. If inputs.zi is not provided (base case) allocate default value - // 0 - let zero = vec![E1::Base::ZERO; arity]; - let z_i = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { - Ok(self.inputs.get()?.zi.as_ref().unwrap_or(&zero)[i]) - }) - }) - .collect::>, _>>()?; - - let data_p = emulated::AllocatedFoldingData::alloc( - cs.namespace(|| "data_p"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.data_p.as_ref()), - self.params.limb_width, - self.params.n_limbs, - )?; - - let data_c_1 = AllocatedCycleFoldData::alloc( - cs.namespace(|| "data_c_1"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.data_c_1.as_ref()), - self.params.limb_width, - self.params.n_limbs, - )?; - - let data_c_2 = AllocatedCycleFoldData::alloc( - cs.namespace(|| "data_c_2"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.data_c_2.as_ref()), - self.params.limb_width, - self.params.n_limbs, - )?; - - let E_new = emulated::AllocatedEmulPoint::alloc( - cs.namespace(|| "E_new"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.E_new.as_ref()) - .map(|E_new| E_new.to_coordinates()), - self.params.limb_width, - self.params.n_limbs, - )?; - - let W_new = emulated::AllocatedEmulPoint::alloc( - cs.namespace(|| "W_new"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.W_new.as_ref()) - .map(|W_new| W_new.to_coordinates()), - self.params.limb_width, - self.params.n_limbs, - )?; - - Ok(( - pp_digest, i, z_0, z_i, data_p, data_c_1, data_c_2, E_new, W_new, - )) - } - - pub fn synthesize_base_case::Base>>( - &self, - mut cs: CS, - ) -> Result< - ( - AllocatedRelaxedR1CSInstance, - emulated::AllocatedEmulRelaxedR1CSInstance, - ), - SynthesisError, - > { - let U_c_default = AllocatedRelaxedR1CSInstance::default( - cs.namespace(|| "Allocate U_c_default"), - self.params.limb_width, - self.params.n_limbs, - )?; - - let U_p_default = emulated::AllocatedEmulRelaxedR1CSInstance::default( - cs.namespace(|| "Allocated U_p_default"), - self.params.limb_width, - self.params.n_limbs, - )?; - - // In the first folding step return the default relaxed instances for both the - // CycleFold and primary running accumulators - Ok((U_c_default, U_p_default)) - } - - pub fn synthesize_non_base_case::Base>>( - &self, - mut cs: CS, - pp_digest: &AllocatedNum, - i: &AllocatedNum, - z_0: &[AllocatedNum], - z_i: &[AllocatedNum], - data_p: &emulated::AllocatedFoldingData, - data_c_1: &AllocatedCycleFoldData, - data_c_2: &AllocatedCycleFoldData, - E_new: emulated::AllocatedEmulPoint, - W_new: emulated::AllocatedEmulPoint, - arity: usize, - ) -> Result< - ( - AllocatedRelaxedR1CSInstance, - emulated::AllocatedEmulRelaxedR1CSInstance, - AllocatedBit, - ), - SynthesisError, - > { - // Follows the outline written down here https://hackmd.io/@argumentcomputer/HybHrnNFT - - // Calculate the hash of the non-deterministic advice for the primary circuit - let mut ro_p = E1::ROCircuit::new( - self.ro_consts.clone(), - 2 + 2 * arity + 2 * NUM_FE_IN_EMULATED_POINT + 3, - ); - - ro_p.absorb(pp_digest); - ro_p.absorb(i); - for e in z_0 { - ro_p.absorb(e) - } - for e in z_i { - ro_p.absorb(e) - } - data_p - .U - .absorb_in_ro(cs.namespace(|| "absorb U_p"), &mut ro_p)?; - - let hash_bits_p = ro_p.squeeze(cs.namespace(|| "primary hash bits"), NUM_HASH_BITS)?; - let hash_p = le_bits_to_num(cs.namespace(|| "primary hash"), &hash_bits_p)?; - - // check the hash matches the public IO from the last primary instance - let check_primary = alloc_num_equals( - cs.namespace(|| "u.X[0] = H(params, i, z0, zi, U_p)"), - &data_p.u_x0, - &hash_p, - )?; - - // Calculate the hash of the non-dterministic advice for the secondary circuit - let mut ro_c = E1::ROCircuit::new( - self.ro_consts.clone(), - 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + i + W + E + u + X - ); - - ro_c.absorb(pp_digest); - ro_c.absorb(i); - data_c_1 - .U - .absorb_in_ro(cs.namespace(|| "absorb U_c"), &mut ro_c)?; - let hash_c_bits = ro_c.squeeze(cs.namespace(|| "cyclefold hash bits"), NUM_HASH_BITS)?; - let hash_c = le_bits_to_num(cs.namespace(|| "cyclefold hash"), &hash_c_bits)?; - - // check the hash matches the public IO from the last primary instance - let check_cyclefold = alloc_num_equals( - cs.namespace(|| "u.X[1] = H(params, U_c)"), - &data_p.u_x1, - &hash_c, - )?; - - let check_io = AllocatedBit::and( - cs.namespace(|| "both IOs match"), - &check_primary, - &check_cyclefold, - )?; - - // Run NIVC.V on U_c, u_c_1, T_c_1 - let U_int = data_c_1.apply_fold( - cs.namespace(|| "fold u_c_1 into U_c"), - pp_digest, - self.ro_consts.clone(), - self.params.limb_width, - self.params.n_limbs, - )?; - - // Calculate h_int = H(pp, U_c_int) - let mut ro_c_int = E1::ROCircuit::new( - self.ro_consts.clone(), - 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + W + E + u + X - ); - ro_c_int.absorb(pp_digest); - U_int.absorb_in_ro(cs.namespace(|| "absorb U_c_int"), &mut ro_c_int)?; - let h_c_int_bits = - ro_c_int.squeeze(cs.namespace(|| "intermediate hash bits"), NUM_HASH_BITS)?; - let h_c_int = le_bits_to_num(cs.namespace(|| "intermediate hash"), &h_c_int_bits)?; - - // Calculate h_1 = H(pp, U_c_1) - let mut ro_c_1 = E1::ROCircuit::new( - self.ro_consts.clone(), - 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + W + E + u + X - ); - - ro_c_1.absorb(pp_digest); - data_c_2 - .U - .absorb_in_ro(cs.namespace(|| "absorb U_c_1"), &mut ro_c_1)?; - let h_c_1_bits = ro_c_1.squeeze(cs.namespace(|| "cyclefold_1 hash bits"), NUM_HASH_BITS)?; - let h_c_1 = le_bits_to_num(cs.namespace(|| "cyclefold_1 hash"), &h_c_1_bits)?; - - // Check the intermediate-calculated running instance matches the - // non-deterministic advice provided to the prover - let check_cyclefold_int = - alloc_num_equals(cs.namespace(|| "h_int = h_c_1"), &h_c_int, &h_c_1)?; - - let checks_pass = AllocatedBit::and( - cs.namespace(|| "all checks passed"), - &check_io, - &check_cyclefold_int, - )?; - - // calculate the folded CycleFold accumulator - let U_c = data_c_2.apply_fold( - cs.namespace(|| "fold u_c_2 into U_c_1"), - pp_digest, - self.ro_consts.clone(), - self.params.limb_width, - self.params.n_limbs, - )?; - - // calculate the folded primary circuit accumulator - let U_p = data_p.U.fold_with_r1cs( - cs.namespace(|| "fold u_p into U_p"), - pp_digest, - W_new, - E_new, - &data_p.u_W, - &data_p.u_x0, - &data_p.u_x1, - &data_p.T, - self.ro_consts.clone(), - )?; - - Ok((U_c, U_p, checks_pass)) - } - - pub fn synthesize::Base>>( - self, - cs: &mut CS, - ) -> Result>, SynthesisError> { - // Circuit is documented here: https://hackmd.io/SBvAur_2RQmaduDi7gYbhw - let arity = self.step_circuit.arity(); - - // Allocate the witness - let (pp_digest, i, z_0, z_i, data_p, data_c_1, data_c_2, E_new, W_new) = - self.alloc_witness(cs.namespace(|| "alloc_witness"), arity)?; - - let zero = alloc_zero(cs.namespace(|| "zero")); - let is_base_case = alloc_num_equals(cs.namespace(|| "is base case"), &i, &zero)?; - - let (U_new_c_base, U_new_p_base) = - self.synthesize_base_case(cs.namespace(|| "base case"))?; - - let (U_new_c_non_base, U_new_p_non_base, check_non_base_pass) = self - .synthesize_non_base_case( - cs.namespace(|| "synthesize non base case"), - &pp_digest, - &i, - &z_0, - &z_i, - &data_p, - &data_c_1, - &data_c_2, - E_new, - W_new, - arity, - )?; - - let should_be_false = AllocatedBit::nor( - cs.namespace(|| "check_non_base_pass nor base_case"), - &check_non_base_pass, - &is_base_case, - )?; - cs.enforce( - || "check_non_base_pass nor base_case = false", - |lc| lc + should_be_false.get_variable(), - |lc| lc + CS::one(), - |lc| lc, - ); - - // select the new running primary instance - let Unew_p = U_new_p_base.conditionally_select( - cs.namespace(|| "compute Unew_p"), - &U_new_p_non_base, - &Boolean::from(is_base_case.clone()), - )?; - - // select the new running CycleFold instance - let Unew_c = U_new_c_base.conditionally_select( - cs.namespace(|| "compute Unew_c"), - &U_new_c_non_base, - &Boolean::from(is_base_case.clone()), - )?; - - // Compute i + 1 - let i_new = AllocatedNum::alloc(cs.namespace(|| "i + 1"), || { - Ok(*i.get_value().get()? + E1::Base::ONE) - })?; - cs.enforce( - || "check i + 1", - |lc| lc, - |lc| lc, - |lc| lc + i_new.get_variable() - CS::one() - i.get_variable(), - ); - - // Compute z_{i+1} - let z_input = conditionally_select_slice( - cs.namespace(|| "select input to F"), - &z_0, - &z_i, - &Boolean::from(is_base_case), - )?; - - let (_pc, z_next) = - self.step_circuit - .synthesize(&mut cs.namespace(|| "F"), None, &z_input)?; - - if z_next.len() != arity { - return Err(SynthesisError::IncompatibleLengthVector( - "z_next".to_string(), - )); - } - - // Calculate the first component of the public IO as the hash of the calculated - // primary running instance - let mut ro_p = E1::ROCircuit::new( - self.ro_consts.clone(), - 2 + 2 * arity + (2 * NUM_FE_IN_EMULATED_POINT + 3), // pp + i + z_0 + z_next + (U_p) - ); - ro_p.absorb(&pp_digest); - ro_p.absorb(&i_new); - for e in &z_0 { - ro_p.absorb(e); - } - for e in &z_next { - ro_p.absorb(e); - } - Unew_p.absorb_in_ro(cs.namespace(|| "absorb Unew_p"), &mut ro_p)?; - let hash_p_bits = ro_p.squeeze(cs.namespace(|| "hash_p_bits"), NUM_HASH_BITS)?; - let hash_p = le_bits_to_num(cs.namespace(|| "hash_p"), &hash_p_bits)?; - - // Calculate the second component of the public IO as the hash of the calculated - // CycleFold running instance - let mut ro_c = E1::ROCircuit::new( - self.ro_consts, - 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + i + W + E + u + X - ); - ro_c.absorb(&pp_digest); - ro_c.absorb(&i_new); - Unew_c.absorb_in_ro(cs.namespace(|| "absorb Unew_c"), &mut ro_c)?; - let hash_c_bits = ro_c.squeeze(cs.namespace(|| "hash_c_bits"), NUM_HASH_BITS)?; - let hash_c = le_bits_to_num(cs.namespace(|| "hash_c"), &hash_c_bits)?; - - hash_p.inputize(cs.namespace(|| "u_p.x[0] = hash_p"))?; - hash_c.inputize(cs.namespace(|| "u_p.x[1] = hash_c"))?; - - Ok(z_next) - } -} - -// #[cfg(test)] -// mod test { -// use expect_test::{expect, Expect}; - -// use super::*; -// use crate::{ -// bellpepper::test_shape_cs::TestShapeCS, -// constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, -// provider::{Bn256EngineKZG, PallasEngine, Secp256k1Engine}, -// traits::{circuit::TrivialCircuit, CurveCycleEquipped, Dual}, -// }; - -// fn test_augmented_circuit_size_with(expected_cons: &Expect, -// expected_var: &Expect) where -// E: CurveCycleEquipped, -// { -// let params = AugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS); - -// let ro_consts = ROConstantsCircuit::::default(); - -// let step_circuit = TrivialCircuit::::default(); - -// let circuit = AugmentedCircuit::, -// TrivialCircuit>::new( ¶ms, -// ro_consts, -// None, -// &step_circuit, -// ); -// let mut cs: TestShapeCS> = TestShapeCS::default(); - -// let res = circuit.synthesize(&mut cs); - -// res.unwrap(); - -// let num_constraints = cs.num_constraints(); -// let num_variables = cs.num_aux(); - -// expected_cons.assert_eq(&num_constraints.to_string()); -// expected_var.assert_eq(&num_variables.to_string()); -// } - -// #[test] -// fn test_augmented_circuit_size() { -// test_augmented_circuit_size_with::(&expect!["33289"], -// &expect!["33323"]); -// test_augmented_circuit_size_with::(&expect!["35125" -// ], &expect!["35159"]); -// test_augmented_circuit_size_with::(&expect!["33856"], -// &expect!["33890"]); } -// } diff --git a/src/cyclefold/snark.rs b/src/cyclefold/snark.rs deleted file mode 100644 index 6d8ef3b..0000000 --- a/src/cyclefold/snark.rs +++ /dev/null @@ -1,563 +0,0 @@ -//! This module defines the Cyclefold `RecursiveSNARK` type with its `new`, -//! `prove_step`, and `verify` methods. - -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use ff::PrimeFieldBits; -use once_cell::sync::OnceCell; -use serde::{Deserialize, Serialize}; - -use super::{ - nifs::{CycleFoldNIFS, PrimaryNIFS}, - nova_circuit::{AugmentedCircuit, AugmentedCircuitInputs, AugmentedCircuitParams}, - util::{absorb_primary_relaxed_r1cs, FoldingData}, -}; -use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - constants::{ - BN_LIMB_WIDTH, BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT, - NUM_HASH_BITS, - }, - cyclefold::circuit::CycleFoldCircuit, - errors::NovaError, - gadgets::scalar_as_base, - r1cs::{ - self, CommitmentKeyHint, R1CSInstance, R1CSResult, R1CSWitness, RelaxedR1CSInstance, - RelaxedR1CSWitness, - }, - supernova::StepCircuit, - traits::{ - commitment::CommitmentTrait, AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, - ROConstantsCircuit, ROTrait, - }, - Commitment, CommitmentKey, DigestComputer, R1CSWithArity, ROConstants, ResourceBuffer, - SimpleDigestible, -}; - -/// The public parameters used in the CycleFold recursive SNARK proof and -/// verification -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct PublicParams -where - E1: CurveCycleEquipped, -{ - F_arity_primary: usize, - ro_consts_primary: ROConstants>, - ro_consts_circuit_primary: ROConstantsCircuit>, - ck_primary: CommitmentKey, - circuit_shape_primary: R1CSWithArity, - augmented_circuit_params: AugmentedCircuitParams, - - ro_consts_cyclefold: ROConstants>, - ck_cyclefold: CommitmentKey>, - circuit_shape_cyclefold: R1CSWithArity>, - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} - -impl PublicParams -where - E1: CurveCycleEquipped, -{ - /// Builds the public parameters for the circuit `C1`. - /// The same note for public parameter hints apply as in the case for Nova's - /// public parameters: For some final compressing SNARKs the size of the - /// commitment key must be larger, so we include `ck_hint_primary` and - /// `ck_hint_cyclefold` parameters to accommodate this. - pub fn setup>( - c_primary: &C1, - ck_hint_primary: &CommitmentKeyHint, - ck_hint_cyclefold: &CommitmentKeyHint>, - ) -> Self { - let F_arity_primary = c_primary.arity(); - let ro_consts_primary = ROConstants::>::default(); - let ro_consts_circuit_primary = ROConstantsCircuit::>::default(); - - let augmented_circuit_params = AugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS); - let circuit_primary: AugmentedCircuit<'_, Dual, E1, C1> = AugmentedCircuit::new( - &augmented_circuit_params, - ro_consts_circuit_primary.clone(), - None, - c_primary, - ); - let mut cs: ShapeCS = ShapeCS::new(); - let _ = circuit_primary.synthesize(&mut cs); - let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape_and_key(ck_hint_primary); - let circuit_shape_primary = R1CSWithArity::new(r1cs_shape_primary, F_arity_primary); - - let ro_consts_cyclefold = ROConstants::>::default(); - let mut cs: ShapeCS> = ShapeCS::new(); - let circuit_cyclefold: CycleFoldCircuit = CycleFoldCircuit::default(); - let _ = circuit_cyclefold.synthesize(&mut cs); - let (r1cs_shape_cyclefold, ck_cyclefold) = cs.r1cs_shape_and_key(ck_hint_cyclefold); - let circuit_shape_cyclefold = R1CSWithArity::new(r1cs_shape_cyclefold, 0); - - Self { - F_arity_primary, - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - circuit_shape_primary, - augmented_circuit_params, - ro_consts_cyclefold, - ck_cyclefold, - circuit_shape_cyclefold, - digest: OnceCell::new(), - } - } - - /// Calculate the digest of the public parameters. - pub fn digest(&self) -> E1::Scalar { - self.digest - .get_or_try_init(|| DigestComputer::new(self).digest()) - .cloned() - .expect("Failure in retrieving digest") - } - - /// Returns the number of constraints in the primary and cyclefold circuits - pub const fn num_constraints(&self) -> (usize, usize) { - ( - self.circuit_shape_primary.r1cs_shape.num_cons, - self.circuit_shape_cyclefold.r1cs_shape.num_cons, - ) - } - - /// Returns the number of variables in the primary and cyclefold circuits - pub const fn num_variables(&self) -> (usize, usize) { - ( - self.circuit_shape_primary.r1cs_shape.num_vars, - self.circuit_shape_cyclefold.r1cs_shape.num_vars, - ) - } -} - -impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} - -/// A SNARK that proves the correct execution of an incremental computation in -/// the CycleFold folding scheme. -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - // Input - z0_primary: Vec, - - // primary circuit data - r_W_primary: RelaxedR1CSWitness, - r_U_primary: RelaxedR1CSInstance, - l_w_primary: R1CSWitness, - l_u_primary: R1CSInstance, - - // cyclefold circuit data - r_W_cyclefold: RelaxedR1CSWitness>, - r_U_cyclefold: RelaxedR1CSInstance>, - - // memory buffers for folding steps - buffer_primary: ResourceBuffer, - buffer_cyclefold: ResourceBuffer>, - - i: usize, - zi_primary: Vec, -} - -impl RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - /// Create a new instance of a recursive SNARK - pub fn new>( - pp: &PublicParams, - c_primary: &C1, - z0_primary: &[E1::Scalar], - ) -> Result { - if z0_primary.len() != pp.F_arity_primary { - return Err(NovaError::InvalidInitialInputLength); - } - - let r1cs_primary = &pp.circuit_shape_primary.r1cs_shape; - let r1cs_cyclefold = &pp.circuit_shape_cyclefold.r1cs_shape; - - let r_U_cyclefold = RelaxedR1CSInstance::default(&pp.ck_cyclefold, r1cs_cyclefold); - let r_W_cyclefold = RelaxedR1CSWitness::default(r1cs_cyclefold); - - let mut cs_primary = SatisfyingAssignment::::new(); - let inputs_primary: AugmentedCircuitInputs, E1> = AugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - as Engine>::Base::from(0u64), - z0_primary.to_vec(), - None, - None, - None, - None, - None, - None, - ); - - let circuit_primary = AugmentedCircuit::new( - &pp.augmented_circuit_params, - pp.ro_consts_circuit_primary.clone(), - Some(inputs_primary), - c_primary, - ); - - let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; - let (l_u_primary, l_w_primary) = - cs_primary.r1cs_instance_and_witness(r1cs_primary, &pp.ck_primary)?; - - let r_U_primary = RelaxedR1CSInstance::default(&pp.ck_primary, r1cs_primary); - let r_W_primary = RelaxedR1CSWitness::default(r1cs_primary); - - let zi_primary = zi_primary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::, _>>()?; - - let buffer_primary = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(r1cs_primary.num_cons), - ABC_Z_2: R1CSResult::default(r1cs_primary.num_cons), - T: r1cs::default_T::(r1cs_primary.num_cons), - }; - - let buffer_cyclefold = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(r1cs_cyclefold.num_cons), - ABC_Z_2: R1CSResult::default(r1cs_cyclefold.num_cons), - T: r1cs::default_T::>(r1cs_cyclefold.num_cons), - }; - - Ok(Self { - z0_primary: z0_primary.to_vec(), - r_W_primary, - r_U_primary, - l_w_primary, - l_u_primary, - r_W_cyclefold, - r_U_cyclefold, - buffer_primary, - buffer_cyclefold, - i: 0, - zi_primary, - }) - } - - /// Update the `RecursiveSNARK` by proving a step of the incremental - /// computation. - pub fn prove_step>( - &mut self, - pp: &PublicParams, - c_primary: &C1, - ) -> Result<(), NovaError> { - if self.i == 0 { - self.i = 1; - return Ok(()); - } - - let (nifs_primary, (r_U_primary, r_W_primary), r) = PrimaryNIFS::>::prove( - &pp.ck_primary, - &pp.ro_consts_primary, - &pp.digest(), - &pp.circuit_shape_primary.r1cs_shape, - &self.r_U_primary, - &self.r_W_primary, - &self.l_u_primary, - &self.l_w_primary, - )?; - - let r_bools = r - .to_le_bits() - .iter() - .map(|b| Some(*b)) - .take(NUM_CHALLENGE_BITS) - .collect::>>() - .map(|v| v.try_into().unwrap()); - - let comm_T = Commitment::::decompress(&nifs_primary.comm_T)?; - let E_new = self.r_U_primary.comm_E + comm_T * r; - - let W_new = self.r_U_primary.comm_W + self.l_u_primary.comm_W * r; - - let mut cs_cyclefold_E = SatisfyingAssignment::>::with_capacity( - pp.circuit_shape_cyclefold.r1cs_shape.num_io + 1, - pp.circuit_shape_cyclefold.r1cs_shape.num_vars, - ); - - let circuit_cyclefold_E: CycleFoldCircuit = - CycleFoldCircuit::new(Some(self.r_U_primary.comm_E), Some(comm_T), r_bools); - - let _ = circuit_cyclefold_E.synthesize(&mut cs_cyclefold_E); - - let (l_u_cyclefold_E, l_w_cyclefold_E) = cs_cyclefold_E - .r1cs_instance_and_witness(&pp.circuit_shape_cyclefold.r1cs_shape, &pp.ck_cyclefold) - .map_err(|_| NovaError::UnSat)?; - - // TODO: check if this is better or worse than `prove_mut` with a clone of - // `self.r_U_cyclefold` - let (nifs_cyclefold_E, (r_U_cyclefold_E, r_W_cyclefold_E)) = CycleFoldNIFS::prove( - &pp.ck_cyclefold, - &pp.ro_consts_cyclefold, - &scalar_as_base::(pp.digest()), - &pp.circuit_shape_cyclefold.r1cs_shape, - &self.r_U_cyclefold, - &self.r_W_cyclefold, - &l_u_cyclefold_E, - &l_w_cyclefold_E, - )?; - - let comm_T_E = Commitment::>::decompress(&nifs_cyclefold_E.comm_T)?; - - let mut cs_cyclefold_W = SatisfyingAssignment::>::with_capacity( - pp.circuit_shape_cyclefold.r1cs_shape.num_io + 1, - pp.circuit_shape_cyclefold.r1cs_shape.num_vars, - ); - - let circuit_cyclefold_W: CycleFoldCircuit = CycleFoldCircuit::new( - Some(self.r_U_primary.comm_W), - Some(self.l_u_primary.comm_W), - r_bools, - ); - - let _ = circuit_cyclefold_W.synthesize(&mut cs_cyclefold_W); - - let (l_u_cyclefold_W, l_w_cyclefold_W) = cs_cyclefold_W - .r1cs_instance_and_witness(&pp.circuit_shape_cyclefold.r1cs_shape, &pp.ck_cyclefold) - .map_err(|_| NovaError::UnSat)?; - - // TODO: check if this is better or worse than `prove_mut` with a clone of - // r_U_cyclefold_E - let (nifs_cyclefold_W, (r_U_cyclefold_W, r_W_cyclefold_W)) = CycleFoldNIFS::prove( - &pp.ck_cyclefold, - &pp.ro_consts_cyclefold, - &scalar_as_base::(pp.digest()), - &pp.circuit_shape_cyclefold.r1cs_shape, - &r_U_cyclefold_E, - &r_W_cyclefold_E, - &l_u_cyclefold_W, - &l_w_cyclefold_W, - )?; - - let comm_T_W = Commitment::>::decompress(&nifs_cyclefold_W.comm_T)?; - - let mut cs_primary = SatisfyingAssignment::::with_capacity( - pp.circuit_shape_primary.r1cs_shape.num_io + 1, - pp.circuit_shape_primary.r1cs_shape.num_vars, - ); - - let data_p = FoldingData::new(self.r_U_primary.clone(), self.l_u_primary.clone(), comm_T); - let data_c_E = FoldingData::new(self.r_U_cyclefold.clone(), l_u_cyclefold_E, comm_T_E); - let data_c_W = FoldingData::new(r_U_cyclefold_E, l_u_cyclefold_W, comm_T_W); - - let inputs_primary: AugmentedCircuitInputs, E1> = AugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - as Engine>::Base::from(self.i as u64), - self.z0_primary.clone(), - Some(self.zi_primary.clone()), - Some(data_p), - Some(data_c_E), - Some(data_c_W), - Some(E_new), - Some(W_new), - ); - - let circuit_primary: AugmentedCircuit<'_, Dual, E1, C1> = AugmentedCircuit::new( - &pp.augmented_circuit_params, - pp.ro_consts_circuit_primary.clone(), - Some(inputs_primary), - c_primary, - ); - - let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; - - let (l_u_primary, l_w_primary) = cs_primary - .r1cs_instance_and_witness(&pp.circuit_shape_primary.r1cs_shape, &pp.ck_primary) - .map_err(|_| NovaError::UnSat)?; - - self.zi_primary = zi_primary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::, _>>()?; - - self.r_U_primary = r_U_primary; - self.r_W_primary = r_W_primary; - self.l_u_primary = l_u_primary; - self.l_w_primary = l_w_primary; - self.r_U_cyclefold = r_U_cyclefold_W; - self.r_W_cyclefold = r_W_cyclefold_W; - - self.i += 1; - - Ok(()) - } - - /// Verify the correctness of the `RecursiveSNARK` - pub fn verify( - &self, - pp: &PublicParams, - num_steps: usize, - z0_primary: &[E1::Scalar], - ) -> Result, NovaError> { - // number of steps cannot be zero - let is_num_steps_zero = num_steps == 0; - - // check if the provided proof has executed num_steps - let is_num_steps_not_match = self.i != num_steps; - - // check if the initial inputs match - let is_inputs_not_match = self.z0_primary != z0_primary; - - // check if the (relaxed) R1CS instances have two public outputs - let is_instance_has_two_outputs = self.r_U_primary.X.len() != 2; - - if is_num_steps_zero - || is_num_steps_not_match - || is_inputs_not_match - || is_instance_has_two_outputs - { - return Err(NovaError::ProofVerifyError); - } - - // Calculate the hashes of the primary running instance and cyclefold running - // instance - let (hash_primary, hash_cyclefold) = { - let mut hasher = as Engine>::RO::new( - pp.ro_consts_primary.clone(), - 2 + 2 * pp.F_arity_primary + 2 * NUM_FE_IN_EMULATED_POINT + 3, - ); - hasher.absorb(pp.digest()); - hasher.absorb(E1::Scalar::from(num_steps as u64)); - for e in z0_primary { - hasher.absorb(*e); - } - for e in &self.zi_primary { - hasher.absorb(*e); - } - absorb_primary_relaxed_r1cs::>(&self.r_U_primary, &mut hasher); - let hash_primary = hasher.squeeze(NUM_HASH_BITS); - - let mut hasher = as Engine>::RO::new( - pp.ro_consts_cyclefold.clone(), - 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, - ); - hasher.absorb(pp.digest()); - hasher.absorb(E1::Scalar::from(num_steps as u64)); - self.r_U_cyclefold.absorb_in_ro(&mut hasher); - let hash_cyclefold = hasher.squeeze(NUM_HASH_BITS); - - (hash_primary, hash_cyclefold) - }; - - // Verify the hashes equal the public IO for the final primary instance - if scalar_as_base::>(hash_primary) != self.l_u_primary.X[0] - || scalar_as_base::>(hash_cyclefold) != self.l_u_primary.X[1] - { - return Err(NovaError::ProofVerifyError); - } - - // Verify the satisfiability of running relaxed instances, and the final primary - // instance. - let (res_r_primary, (res_l_primary, res_r_cyclefold)) = rayon::join( - || { - pp.circuit_shape_primary.r1cs_shape.is_sat_relaxed( - &pp.ck_primary, - &self.r_U_primary, - &self.r_W_primary, - ) - }, - || { - rayon::join( - || { - pp.circuit_shape_primary.r1cs_shape.is_sat( - &pp.ck_primary, - &self.l_u_primary, - &self.l_w_primary, - ) - }, - || { - pp.circuit_shape_cyclefold.r1cs_shape.is_sat_relaxed( - &pp.ck_cyclefold, - &self.r_U_cyclefold, - &self.r_W_cyclefold, - ) - }, - ) - }, - ); - - res_r_primary?; - res_l_primary?; - res_r_cyclefold?; - - Ok(self.zi_primary.clone()) - } -} - -// #[cfg(test)] -// mod test { -// use std::marker::PhantomData; - -// use bellpepper_core::num::AllocatedNum; - -// use super::*; -// use crate::{ -// provider::{Bn256EngineKZG, PallasEngine, Secp256k1Engine}, -// traits::snark::default_ck_hint, -// }; - -// #[derive(Clone)] -// struct SquareCircuit { -// _p: PhantomData, -// } - -// impl StepCircuit for SquareCircuit { -// fn arity(&self) -> usize { -// 1 -// } - -// fn synthesize>( -// &self, -// cs: &mut CS, -// z: &[AllocatedNum], -// ) -> Result>, SynthesisError> { -// let x = &z[0]; -// let x_sq = x.square(cs.namespace(|| "x_sq"))?; - -// Ok(vec![x_sq]) -// } -// } - -// fn test_trivial_cyclefold_prove_verify_with() { -// let primary_circuit = SquareCircuit:: { _p: PhantomData }; - -// let pp = -// PublicParams::::setup(&primary_circuit, &*default_ck_hint(), -// &*default_ck_hint()); - -// let z0 = vec![E::Scalar::from(2u64)]; - -// let mut recursive_snark = RecursiveSNARK::new(&pp, &primary_circuit, -// &z0).unwrap(); - -// (1..5).for_each(|iter| { -// let res_proof = recursive_snark.prove_step(&pp, -// &primary_circuit); res_proof.unwrap(); - -// let res_verify = recursive_snark.verify(&pp, iter, &z0); -// res_verify.unwrap(); -// }); -// } - -// #[test] -// fn test_cyclefold_prove_verify() { -// test_trivial_cyclefold_prove_verify_with::(); -// test_trivial_cyclefold_prove_verify_with::(); -// test_trivial_cyclefold_prove_verify_with::(); -// } -// } diff --git a/src/cyclefold/util.rs b/src/cyclefold/util.rs deleted file mode 100644 index d5f9411..0000000 --- a/src/cyclefold/util.rs +++ /dev/null @@ -1,90 +0,0 @@ -//! This module defines some useful utilities for RO absorbing, and the Folding -//! data used in the CycleFold folding scheme. - -use ff::Field; -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, - gadgets::{f_to_nat, nat_to_limbs, scalar_as_base}, - r1cs::{R1CSInstance, RelaxedR1CSInstance}, - traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROTrait}, - Commitment, -}; - -/// Absorb a commitment over engine `E1` into an RO over engine `E2` by -/// absorbing the limbs -pub(super) fn absorb_primary_commitment( - comm: &impl CommitmentTrait, - ro: &mut impl ROTrait, -) where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - let (x, y, is_infinity) = comm.to_coordinates(); - - let x_limbs = nat_to_limbs(&f_to_nat(&x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); - let y_limbs = nat_to_limbs(&f_to_nat(&y), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); - - for limb in x_limbs { - ro.absorb(scalar_as_base::(limb)); - } - for limb in y_limbs { - ro.absorb(scalar_as_base::(limb)); - } - if is_infinity { - ro.absorb(::Scalar::ONE); - } else { - ro.absorb(::Scalar::ZERO); - } -} - -pub(super) fn absorb_primary_r1cs( - u: &R1CSInstance, - ro: &mut impl ROTrait, -) where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - absorb_primary_commitment::(&u.comm_W, ro); - for x in &u.X { - ro.absorb(*x); - } -} - -pub(super) fn absorb_cyclefold_r1cs(u: &R1CSInstance, ro: &mut E::RO) { - u.comm_W.absorb_in_ro(ro); - u.X.iter().for_each(|x| { - let limbs: Vec = nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); - limbs - .into_iter() - .for_each(|limb| ro.absorb(scalar_as_base::(limb))); - }); -} - -pub(super) fn absorb_primary_relaxed_r1cs(U: &RelaxedR1CSInstance, ro: &mut E2::RO) -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - absorb_primary_commitment::(&U.comm_W, ro); - absorb_primary_commitment::(&U.comm_E, ro); - ro.absorb(U.u); - for e in &U.X { - ro.absorb(*e); - } -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub(super) struct FoldingData { - pub U: RelaxedR1CSInstance, - pub u: R1CSInstance, - pub T: Commitment, -} - -impl FoldingData { - pub fn new(U: RelaxedR1CSInstance, u: R1CSInstance, T: Commitment) -> Self { - Self { U, u, T } - } -} diff --git a/src/errors.rs b/src/errors.rs deleted file mode 100644 index ddd517d..0000000 --- a/src/errors.rs +++ /dev/null @@ -1,101 +0,0 @@ -//! This module defines errors returned by the library. -use core::fmt::Debug; - -use thiserror::Error; - -/// Errors returned by Nova -#[derive(Debug, Eq, PartialEq, Error)] -#[non_exhaustive] -pub enum NovaError { - /// returned if the supplied row or col in (row,col,val) tuple is out of - /// range - #[error("InvalidIndex")] - InvalidIndex, - /// returned if the step circuit calls inputize or alloc_io in its - /// synthesize method instead of passing output with the return value - #[error("InvalidStepCircuitIO")] - InvalidStepCircuitIO, - /// returned if the supplied input is not of the right length - #[error("InvalidInputLength")] - InvalidInputLength, - /// returned if the supplied witness is not of the right length - #[error("InvalidWitnessLength")] - InvalidWitnessLength, - /// returned if the supplied witness is not a satisfying witness to a given - /// shape and instance - #[error("UnSat")] - UnSat, - /// returned if the supplied witness is not a satisfying witness to a given - /// shape and instance, with error constraint index - #[error("UnSatIndex")] - UnSatIndex(usize), - /// returned when the supplied compressed commitment cannot be decompressed - #[error("DecompressionError")] - DecompressionError, - /// returned if proof verification fails - #[error("ProofVerifyError")] - ProofVerifyError, - /// returned if the provided commitment key is not of sufficient length - #[error("InvalidCommitmentKeyLength")] - InvalidCommitmentKeyLength, - /// returned if the provided number of steps is zero - #[error("InvalidNumSteps")] - InvalidNumSteps, - /// returned if there is an error in the proof/verification of a PCS - #[error("PCSError")] - PCSError(#[from] PCSError), - /// returned when an invalid sum-check proof is provided - #[error("InvalidSumcheckProof")] - InvalidSumcheckProof, - /// returned when the initial input to an incremental computation differs - /// from a previously declared arity - #[error("InvalidInitialInputLength")] - InvalidInitialInputLength, - /// returned when the step execution produces an output whose length differs - /// from a previously declared arity - #[error("InvalidStepOutputLength")] - InvalidStepOutputLength, - /// returned when the transcript engine encounters an overflow of the round - /// number - #[error("InternalTranscriptError")] - InternalTranscriptError, - /// returned when the multiset check fails - #[error("InvalidMultisetProof")] - InvalidMultisetProof, - /// returned when the product proof check fails - #[error("InvalidProductProof")] - InvalidProductProof, - /// returned when the consistency with public IO and assignment used fails - #[error("IncorrectWitness")] - IncorrectWitness, - /// return when error during synthesis - #[error("SynthesisError: {0}")] - SynthesisError(String), - /// returned when there is an error creating a digest - #[error("DigestError")] - DigestError, - /// returned when the prover cannot prove the provided statement due to - /// completeness error - #[error("InternalError")] - InternalError, -} - -/// Errors specific to the Polynomial commitment scheme -#[derive(Debug, Eq, PartialEq, Error)] -pub enum PCSError { - /// returned when an invalid PCS evaluation argument is provided - #[error("InvalidPCS")] - InvalidPCS, - /// returned when there is a Zeromorph error - #[error("ZMError")] - ZMError, - /// returned when a length check fails in a PCS - #[error("LengthError")] - LengthError, -} - -impl From for NovaError { - fn from(err: bellpepper_core::SynthesisError) -> Self { - Self::SynthesisError(err.to_string()) - } -} diff --git a/src/fast_serde.rs b/src/fast_serde.rs deleted file mode 100644 index 16bebd7..0000000 --- a/src/fast_serde.rs +++ /dev/null @@ -1,100 +0,0 @@ -//! This module implements fast serde for reading and writing -//! key objects requires for proof generation and verification. -//! With WASM in particular, serializing via standard binary serializers -//! like bincode causes a dramatic decrease in performance. This simple -//! serializers parses in bytes very efficiently. -//! -//! In the future, it can be extended to do direct memory access to the -//! javascript runtime. For now it does a single copy of the data into -//! the rust runtime. - -use std::io::{Cursor, Read}; - -use thiserror::Error; - -pub static MAGIC_NUMBER: [u8; 4] = [0x50, 0x4C, 0x55, 0x54]; -pub enum SerdeByteTypes { - AuxParams = 0x01, - UniversalKZGParam = 0x02, - CommitmentKey = 0x03, - ProverParams = 0x04 -} - -#[derive(Debug, Error)] -pub enum SerdeByteError { - #[error("{}", "invalid magic number")] - InvalidMagicNumber, - #[error("{}", "invalid serde type")] - InvalidSerdeType, - #[error("{}", "invalid section count")] - InvalidSectionCount, - #[error("{}", "invalid section type")] - InvalidSectionType, - #[error("{}", "invalid section size")] - InvalidSectionSize, - #[error(transparent)] - IoError(#[from] std::io::Error), - #[error(transparent)] - BincodeError(#[from] Box), - #[error("{}", "g1 decode error")] - G1DecodeError, - #[error("{}", "g2 decode error")] - G2DecodeError, -} - -/// A trait for fast conversions to bytes -pub trait FastSerde: Sized { - fn to_bytes(&self) -> Vec; - fn from_bytes(bytes: &Vec) -> Result; - - fn validate_header( - cursor: &mut Cursor<&Vec>, - expected_type: SerdeByteTypes, - expected_sections: u8, - ) -> Result<(), SerdeByteError> { - let mut magic = [0u8; 4]; - cursor.read_exact(&mut magic)?; - if magic != MAGIC_NUMBER { - return Err(SerdeByteError::InvalidMagicNumber); - } - - let mut serde_type = [0u8; 1]; - cursor.read_exact(&mut serde_type)?; - if serde_type[0] != expected_type as u8 { - return Err(SerdeByteError::InvalidSerdeType); - } - - let mut num_sections = [0u8; 1]; - cursor.read_exact(&mut num_sections)?; - if num_sections[0] != expected_sections { - return Err(SerdeByteError::InvalidSectionCount); - } - - Ok(()) - } - - fn read_section_bytes( - cursor: &mut Cursor<&Vec>, - expected_type: u8, - ) -> Result, SerdeByteError> { - let mut section_type = [0u8; 1]; - cursor.read_exact(&mut section_type)?; - if section_type[0] != expected_type { - return Err(SerdeByteError::InvalidSectionType); - } - - let mut section_size = [0u8; 4]; - cursor.read_exact(&mut section_size)?; - let size = u32::from_le_bytes(section_size) as usize; - let mut section_data = vec![0u8; size]; - cursor.read_exact(&mut section_data)?; - - Ok(section_data) - } - - fn write_section_bytes(out: &mut Vec, section_type: u8, data: &Vec) { - out.push(section_type); - out.extend_from_slice(&(data.len() as u32).to_le_bytes()); - out.extend_from_slice(data); - } -} diff --git a/src/gadgets/ecc.rs b/src/gadgets/ecc.rs deleted file mode 100644 index d189f3f..0000000 --- a/src/gadgets/ecc.rs +++ /dev/null @@ -1,1161 +0,0 @@ -//! This module implements various elliptic curve gadgets -#![allow(non_snake_case)] -use bellpepper::gadgets::{boolean_utils::conditionally_select, Assignment}; -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, SynthesisError, -}; -use ff::{Field, PrimeField}; - -use crate::{ - gadgets::utils::{ - alloc_num_equals, alloc_one, alloc_zero, conditionally_select2, select_num_or_one, - select_num_or_zero, select_num_or_zero2, select_one_or_diff2, select_one_or_num2, - select_zero_or_num2, - }, - traits::Group, -}; - -/// `AllocatedPoint` provides an elliptic curve abstraction inside a circuit. -#[derive(Debug, Clone)] -pub struct AllocatedPoint { - pub(crate) x: AllocatedNum, - pub(crate) y: AllocatedNum, - pub(crate) is_infinity: AllocatedNum, -} - -impl AllocatedPoint { - /// Allocates a new point on the curve using coordinates provided by - /// `coords`. If coords = None, it allocates the default infinity point - pub fn alloc>( - mut cs: CS, - coords: Option<(G::Base, G::Base, bool)>, - ) -> Result { - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok(coords.map_or(G::Base::ZERO, |c| c.0)) - })?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(coords.map_or(G::Base::ZERO, |c| c.1)) - })?; - let is_infinity = AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { - Ok(if coords.map_or(true, |c| c.2) { - G::Base::ONE - } else { - G::Base::ZERO - }) - })?; - cs.enforce( - || "is_infinity is bit", - |lc| lc + is_infinity.get_variable(), - |lc| lc + CS::one() - is_infinity.get_variable(), - |lc| lc, - ); - - Ok(Self { x, y, is_infinity }) - } - - /// checks if `self` is on the curve or if it is infinity - pub fn check_on_curve(&self, mut cs: CS) -> Result<(), SynthesisError> - where - CS: ConstraintSystem, - { - // check that (x,y) is on the curve if it is not infinity - // we will check that (1- is_infinity) * y^2 = (1-is_infinity) * (x^3 + Ax + B) - // note that is_infinity is already restricted to be in the set {0, 1} - let y_square = self.y.square(cs.namespace(|| "y_square"))?; - let x_square = self.x.square(cs.namespace(|| "x_square"))?; - let x_cube = self.x.mul(cs.namespace(|| "x_cube"), &x_square)?; - - let rhs = AllocatedNum::alloc(cs.namespace(|| "rhs"), || { - if *self.is_infinity.get_value().get()? == G::Base::ONE { - Ok(G::Base::ZERO) - } else { - Ok(*x_cube.get_value().get()? - + *self.x.get_value().get()? * G::group_params().0 - + G::group_params().1) - } - })?; - - cs.enforce( - || "rhs = (1-is_infinity) * (x^3 + Ax + B)", - |lc| { - lc + x_cube.get_variable() - + (G::group_params().0, self.x.get_variable()) - + (G::group_params().1, CS::one()) - }, - |lc| lc + CS::one() - self.is_infinity.get_variable(), - |lc| lc + rhs.get_variable(), - ); - - // check that (1-infinity) * y_square = rhs - cs.enforce( - || "check that y_square * (1 - is_infinity) = rhs", - |lc| lc + y_square.get_variable(), - |lc| lc + CS::one() - self.is_infinity.get_variable(), - |lc| lc + rhs.get_variable(), - ); - - Ok(()) - } - - /// Allocates a default point on the curve, set to the identity point. - pub fn default>(mut cs: CS) -> Self { - let zero = alloc_zero(cs.namespace(|| "zero")); - let one = alloc_one(cs.namespace(|| "one")); - - Self { - x: zero.clone(), - y: zero, - is_infinity: one, - } - } - - /// Returns coordinates associated with the point. - #[allow(unused)] - pub const fn get_coordinates( - &self, - ) -> ( - &AllocatedNum, - &AllocatedNum, - &AllocatedNum, - ) { - (&self.x, &self.y, &self.is_infinity) - } - - /// Negates the provided point - pub fn negate>( - &self, - mut cs: CS, - ) -> Result { - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || Ok(-*self.y.get_value().get()?))?; - - cs.enforce( - || "check y = - self.y", - |lc| lc + self.y.get_variable(), - |lc| lc + CS::one(), - |lc| lc - y.get_variable(), - ); - - Ok(Self { - x: self.x.clone(), - y, - is_infinity: self.is_infinity.clone(), - }) - } - - /// Add two points (may be equal) - pub fn add>( - &self, - mut cs: CS, - other: &Self, - ) -> Result { - // Compute boolean equal indicating if self = other - - let equal_x = alloc_num_equals( - cs.namespace(|| "check self.x == other.x"), - &self.x, - &other.x, - )?; - - let equal_y = alloc_num_equals( - cs.namespace(|| "check self.y == other.y"), - &self.y, - &other.y, - )?; - - // Compute the result of the addition and the result of double self - let result_from_add = - self.add_internal(cs.namespace(|| "add internal"), other, &equal_x)?; - let result_from_double = self.double(cs.namespace(|| "double"))?; - - // Output: - // If (self == other) { - // return double(self) - // }else { - // if (self.x == other.x){ - // return infinity [negation] - // } else { - // return add(self, other) - // } - // } - let result_for_equal_x = Self::select_point_or_infinity( - cs.namespace(|| "equal_y ? result_from_double : infinity"), - &result_from_double, - &Boolean::from(equal_y), - )?; - - Self::conditionally_select( - cs.namespace(|| "equal ? result_from_double : result_from_add"), - &result_for_equal_x, - &result_from_add, - &Boolean::from(equal_x), - ) - } - - /// Adds other point to this point and returns the result. Assumes that the - /// two points are different and that both `other.is_infinity` and - /// `this.is_infinity` are bits - pub fn add_internal>( - &self, - mut cs: CS, - other: &Self, - equal_x: &AllocatedBit, - ) -> Result { - //************************************************************************/ - // lambda = (other.y - self.y) * (other.x - self.x).invert().unwrap(); - //************************************************************************/ - // First compute (other.x - self.x).inverse() - // If either self or other are the infinity point or self.x = other.x then - // compute bogus values Specifically, - // x_diff = self != inf && other != inf && self.x == other.x ? (other.x - - // self.x) : 1 - - // Compute self.is_infinity OR other.is_infinity = - // NOT(NOT(self.is_ifninity) AND NOT(other.is_infinity)) - let at_least_one_inf = AllocatedNum::alloc(cs.namespace(|| "at least one inf"), || { - Ok(G::Base::ONE - - (G::Base::ONE - *self.is_infinity.get_value().get()?) - * (G::Base::ONE - *other.is_infinity.get_value().get()?)) - })?; - cs.enforce( - || "1 - at least one inf = (1-self.is_infinity) * (1-other.is_infinity)", - |lc| lc + CS::one() - self.is_infinity.get_variable(), - |lc| lc + CS::one() - other.is_infinity.get_variable(), - |lc| lc + CS::one() - at_least_one_inf.get_variable(), - ); - - // Now compute x_diff_is_actual = at_least_one_inf OR equal_x - let x_diff_is_actual = - AllocatedNum::alloc(cs.namespace(|| "allocate x_diff_is_actual"), || { - Ok(if *equal_x.get_value().get()? { - G::Base::ONE - } else { - *at_least_one_inf.get_value().get()? - }) - })?; - cs.enforce( - || "1 - x_diff_is_actual = (1-equal_x) * (1-at_least_one_inf)", - |lc| lc + CS::one() - at_least_one_inf.get_variable(), - |lc| lc + CS::one() - equal_x.get_variable(), - |lc| lc + CS::one() - x_diff_is_actual.get_variable(), - ); - - // x_diff = 1 if either self.is_infinity or other.is_infinity or self.x = - // other.x else self.x - other.x - let x_diff = select_one_or_diff2( - cs.namespace(|| "Compute x_diff"), - &other.x, - &self.x, - &x_diff_is_actual, - )?; - - let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { - let x_diff_inv = if *x_diff_is_actual.get_value().get()? == G::Base::ONE { - // Set to default - G::Base::ONE - } else { - // Set to the actual inverse - (*other.x.get_value().get()? - *self.x.get_value().get()?) - .invert() - .unwrap() - }; - - Ok((*other.y.get_value().get()? - *self.y.get_value().get()?) * x_diff_inv) - })?; - cs.enforce( - || "Check that lambda is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + x_diff.get_variable(), - |lc| lc + other.y.get_variable() - self.y.get_variable(), - ); - - //************************************************************************/ - // x = lambda * lambda - self.x - other.x; - //************************************************************************/ - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok(*lambda.get_value().get()? * lambda.get_value().get()? - - *self.x.get_value().get()? - - *other.x.get_value().get()?) - })?; - cs.enforce( - || "check that x is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + lambda.get_variable(), - |lc| lc + x.get_variable() + self.x.get_variable() + other.x.get_variable(), - ); - - //************************************************************************/ - // y = lambda * (self.x - x) - self.y; - //************************************************************************/ - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok( - *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) - - *self.y.get_value().get()?, - ) - })?; - - cs.enforce( - || "Check that y is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + self.x.get_variable() - x.get_variable(), - |lc| lc + y.get_variable() + self.y.get_variable(), - ); - - //************************************************************************/ - // We only return the computed x, y if neither of the points is infinity and - // self.x != other.y if self.is_infinity return other.clone() - // elif other.is_infinity return self.clone() - // elif self.x == other.x return infinity - // Otherwise return the computed points. - //************************************************************************/ - // Now compute the output x - - let x1 = conditionally_select2( - cs.namespace(|| "x1 = other.is_infinity ? self.x : x"), - &self.x, - &x, - &other.is_infinity, - )?; - - let x = conditionally_select2( - cs.namespace(|| "x = self.is_infinity ? other.x : x1"), - &other.x, - &x1, - &self.is_infinity, - )?; - - let y1 = conditionally_select2( - cs.namespace(|| "y1 = other.is_infinity ? self.y : y"), - &self.y, - &y, - &other.is_infinity, - )?; - - let y = conditionally_select2( - cs.namespace(|| "y = self.is_infinity ? other.y : y1"), - &other.y, - &y1, - &self.is_infinity, - )?; - - let is_infinity1 = select_num_or_zero2( - cs.namespace(|| "is_infinity1 = other.is_infinity ? self.is_infinity : 0"), - &self.is_infinity, - &other.is_infinity, - )?; - - let is_infinity = conditionally_select2( - cs.namespace(|| "is_infinity = self.is_infinity ? other.is_infinity : is_infinity1"), - &other.is_infinity, - &is_infinity1, - &self.is_infinity, - )?; - - Ok(Self { x, y, is_infinity }) - } - - /// Doubles the supplied point. - pub fn double>( - &self, - mut cs: CS, - ) -> Result { - //*************************************************************/ - // lambda = (G::Base::from(3) * self.x * self.x + G::GG::A()) - // * (G::Base::from(2)) * self.y).invert().unwrap(); - // ********************************************************** - - // Compute tmp = (G::Base::ONE + G::Base::ONE)* self.y ? self != inf : 1 - let tmp_actual = AllocatedNum::alloc(cs.namespace(|| "tmp_actual"), || { - Ok(*self.y.get_value().get()? + *self.y.get_value().get()?) - })?; - cs.enforce( - || "check tmp_actual", - |lc| lc + CS::one() + CS::one(), - |lc| lc + self.y.get_variable(), - |lc| lc + tmp_actual.get_variable(), - ); - - let tmp = select_one_or_num2(cs.namespace(|| "tmp"), &tmp_actual, &self.is_infinity)?; - - // Now compute lambda as (G::Base::from(3) * self.x * self.x + G::GG::A()) * - // tmp_inv - - let prod_1 = AllocatedNum::alloc(cs.namespace(|| "alloc prod 1"), || { - Ok(G::Base::from(3) * self.x.get_value().get()? * self.x.get_value().get()?) - })?; - cs.enforce( - || "Check prod 1", - |lc| lc + (G::Base::from(3), self.x.get_variable()), - |lc| lc + self.x.get_variable(), - |lc| lc + prod_1.get_variable(), - ); - - let lambda = AllocatedNum::alloc(cs.namespace(|| "alloc lambda"), || { - let tmp_inv = if *self.is_infinity.get_value().get()? == G::Base::ONE { - // Return default value 1 - G::Base::ONE - } else { - // Return the actual inverse - (*tmp.get_value().get()?).invert().unwrap() - }; - - Ok(tmp_inv * (*prod_1.get_value().get()? + G::group_params().0)) - })?; - - cs.enforce( - || "Check lambda", - |lc| lc + tmp.get_variable(), - |lc| lc + lambda.get_variable(), - |lc| lc + prod_1.get_variable() + (G::group_params().0, CS::one()), - ); - - // ********************************************************** - // x = lambda * lambda - self.x - self.x; - // ********************************************************** - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok( - ((*lambda.get_value().get()?) * (*lambda.get_value().get()?)) - - *self.x.get_value().get()? - - self.x.get_value().get()?, - ) - })?; - cs.enforce( - || "Check x", - |lc| lc + lambda.get_variable(), - |lc| lc + lambda.get_variable(), - |lc| lc + x.get_variable() + self.x.get_variable() + self.x.get_variable(), - ); - - // ********************************************************** - // y = lambda * (self.x - x) - self.y; - // ********************************************************** - let y = - AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok((*lambda.get_value().get()?) - * (*self.x.get_value().get()? - x.get_value().get()?) - - self.y.get_value().get()?) - })?; - cs.enforce( - || "Check y", - |lc| lc + lambda.get_variable(), - |lc| lc + self.x.get_variable() - x.get_variable(), - |lc| lc + y.get_variable() + self.y.get_variable(), - ); - - // ********************************************************** - // Only return the computed x and y if the point is not infinity - // ********************************************************** - // x - let x = select_zero_or_num2(cs.namespace(|| "final x"), &x, &self.is_infinity)?; - - // y - let y = select_zero_or_num2(cs.namespace(|| "final y"), &y, &self.is_infinity)?; - - // is_infinity - let is_infinity = self.is_infinity.clone(); - - Ok(Self { x, y, is_infinity }) - } - - /// A gadget for scalar multiplication, optimized to use incomplete addition - /// law. The optimization here is analogous to , - /// except we use complete addition law over affine coordinates instead of - /// projective coordinates for the tail bits - pub fn scalar_mul>( - &self, - mut cs: CS, - scalar_bits: &[AllocatedBit], - ) -> Result { - let split_len = core::cmp::min(scalar_bits.len(), (G::Base::NUM_BITS - 2) as usize); - let (incomplete_bits, complete_bits) = scalar_bits.split_at(split_len); - - // we convert AllocatedPoint into AllocatedPointNonInfinity; we deal with the - // case where self.is_infinity = 1 below - let mut p = AllocatedPointNonInfinity::from_allocated_point(self); - - // we assume the first bit to be 1, so we must initialize acc to self and double - // it we remove this assumption below - let mut acc = p; - p = acc.double_incomplete(cs.namespace(|| "double"))?; - - // perform the double-and-add loop to compute the scalar mul using incomplete - // addition law - for (i, bit) in incomplete_bits.iter().enumerate().skip(1) { - let temp = acc.add_incomplete(cs.namespace(|| format!("add {i}")), &p)?; - acc = AllocatedPointNonInfinity::conditionally_select( - cs.namespace(|| format!("acc_iteration_{i}")), - &temp, - &acc, - &Boolean::from(bit.clone()), - )?; - - p = p.double_incomplete(cs.namespace(|| format!("double {i}")))?; - } - - // convert back to AllocatedPoint - let res = { - // we set acc.is_infinity = self.is_infinity - let acc = acc.to_allocated_point(&self.is_infinity); - - // we remove the initial slack if bits[0] is as not as assumed (i.e., it is not - // 1) - let acc_minus_initial = { - let neg = self.negate(cs.namespace(|| "negate"))?; - acc.add(cs.namespace(|| "res minus self"), &neg) - }?; - - Self::conditionally_select( - cs.namespace(|| "remove slack if necessary"), - &acc, - &acc_minus_initial, - &Boolean::from(scalar_bits[0].clone()), - )? - }; - - // when self.is_infinity = 1, return the default point, else return res - // we already set res.is_infinity to be self.is_infinity, so we do not need to - // set it here - let default = Self::default(cs.namespace(|| "default")); - let x = conditionally_select2( - cs.namespace(|| "check if self.is_infinity is zero (x)"), - &default.x, - &res.x, - &self.is_infinity, - )?; - - let y = conditionally_select2( - cs.namespace(|| "check if self.is_infinity is zero (y)"), - &default.y, - &res.y, - &self.is_infinity, - )?; - - // we now perform the remaining scalar mul using complete addition law - let mut acc = Self { - x, - y, - is_infinity: res.is_infinity, - }; - let mut p_complete = p.to_allocated_point(&self.is_infinity); - - for (i, bit) in complete_bits.iter().enumerate() { - let temp = acc.add(cs.namespace(|| format!("add_complete {i}")), &p_complete)?; - acc = Self::conditionally_select( - cs.namespace(|| format!("acc_complete_iteration_{i}")), - &temp, - &acc, - &Boolean::from(bit.clone()), - )?; - - p_complete = p_complete.double(cs.namespace(|| format!("double_complete {i}")))?; - } - - Ok(acc) - } - - /// If condition outputs a otherwise outputs b - pub fn conditionally_select>( - mut cs: CS, - a: &Self, - b: &Self, - condition: &Boolean, - ) -> Result { - let x = conditionally_select(cs.namespace(|| "select x"), &a.x, &b.x, condition)?; - - let y = conditionally_select(cs.namespace(|| "select y"), &a.y, &b.y, condition)?; - - let is_infinity = conditionally_select( - cs.namespace(|| "select is_infinity"), - &a.is_infinity, - &b.is_infinity, - condition, - )?; - - Ok(Self { x, y, is_infinity }) - } - - /// If condition outputs a otherwise infinity - pub fn select_point_or_infinity>( - mut cs: CS, - a: &Self, - condition: &Boolean, - ) -> Result { - let x = select_num_or_zero(cs.namespace(|| "select x"), &a.x, condition)?; - - let y = select_num_or_zero(cs.namespace(|| "select y"), &a.y, condition)?; - - let is_infinity = select_num_or_one( - cs.namespace(|| "select is_infinity"), - &a.is_infinity, - condition, - )?; - - Ok(Self { x, y, is_infinity }) - } -} - -#[derive(Clone, Debug)] -/// `AllocatedPoint` but one that is guaranteed to be not infinity -pub struct AllocatedPointNonInfinity { - x: AllocatedNum, - y: AllocatedNum, -} - -impl AllocatedPointNonInfinity { - /// Creates a new `AllocatedPointNonInfinity` from the specified coordinates - #[allow(unused)] - pub const fn new(x: AllocatedNum, y: AllocatedNum) -> Self { - Self { x, y } - } - - /// Allocates a new point on the curve using coordinates provided by - /// `coords`. - #[allow(unused)] - pub fn alloc>( - mut cs: CS, - coords: Option<(G::Base, G::Base)>, - ) -> Result { - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - coords.map_or(Err(SynthesisError::AssignmentMissing), |c| Ok(c.0)) - })?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - coords.map_or(Err(SynthesisError::AssignmentMissing), |c| Ok(c.1)) - })?; - - Ok(Self { x, y }) - } - - /// Turns an `AllocatedPoint` into an `AllocatedPointNonInfinity` (assumes - /// it is not infinity) - pub fn from_allocated_point(p: &AllocatedPoint) -> Self { - Self { - x: p.x.clone(), - y: p.y.clone(), - } - } - - /// Returns an `AllocatedPoint` from an `AllocatedPointNonInfinity` - pub fn to_allocated_point(&self, is_infinity: &AllocatedNum) -> AllocatedPoint { - AllocatedPoint { - x: self.x.clone(), - y: self.y.clone(), - is_infinity: is_infinity.clone(), - } - } - - /// Returns coordinates associated with the point. - #[allow(unused)] - pub const fn get_coordinates(&self) -> (&AllocatedNum, &AllocatedNum) { - (&self.x, &self.y) - } - - /// Add two points assuming self != +/- other - pub fn add_incomplete(&self, mut cs: CS, other: &Self) -> Result - where - CS: ConstraintSystem, - { - // allocate a free variable that an honest prover sets to lambda = - // (y2-y1)/(x2-x1) - let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { - if *other.x.get_value().get()? == *self.x.get_value().get()? { - Ok(G::Base::ONE) - } else { - Ok((*other.y.get_value().get()? - *self.y.get_value().get()?) - * (*other.x.get_value().get()? - *self.x.get_value().get()?) - .invert() - .unwrap()) - } - })?; - cs.enforce( - || "Check that lambda is computed correctly", - |lc| lc + lambda.get_variable(), - |lc| lc + other.x.get_variable() - self.x.get_variable(), - |lc| lc + other.y.get_variable() - self.y.get_variable(), - ); - - //************************************************************************/ - // x = lambda * lambda - self.x - other.x; - //************************************************************************/ - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok(*lambda.get_value().get()? * lambda.get_value().get()? - - *self.x.get_value().get()? - - *other.x.get_value().get()?) - })?; - cs.enforce( - || "check that x is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + lambda.get_variable(), - |lc| lc + x.get_variable() + self.x.get_variable() + other.x.get_variable(), - ); - - //************************************************************************/ - // y = lambda * (self.x - x) - self.y; - //************************************************************************/ - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok( - *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) - - *self.y.get_value().get()?, - ) - })?; - - cs.enforce( - || "Check that y is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + self.x.get_variable() - x.get_variable(), - |lc| lc + y.get_variable() + self.y.get_variable(), - ); - - Ok(Self { x, y }) - } - - /// doubles the point; since this is called with a point not at infinity, it - /// is guaranteed to be not infinity - pub fn double_incomplete>( - &self, - mut cs: CS, - ) -> Result { - // lambda = (3 x^2 + a) / 2 * y - - let x_sq = self.x.square(cs.namespace(|| "x_sq"))?; - - let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { - let n = G::Base::from(3) * x_sq.get_value().get()? + G::group_params().0; - let d = G::Base::from(2) * *self.y.get_value().get()?; - if d == G::Base::ZERO { - Ok(G::Base::ONE) - } else { - Ok(n * d.invert().unwrap()) - } - })?; - cs.enforce( - || "Check that lambda is computed correctly", - |lc| lc + lambda.get_variable(), - |lc| lc + (G::Base::from(2), self.y.get_variable()), - |lc| lc + (G::Base::from(3), x_sq.get_variable()) + (G::group_params().0, CS::one()), - ); - - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok(*lambda.get_value().get()? * *lambda.get_value().get()? - - *self.x.get_value().get()? - - *self.x.get_value().get()?) - })?; - - cs.enforce( - || "check that x is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + lambda.get_variable(), - |lc| lc + x.get_variable() + (G::Base::from(2), self.x.get_variable()), - ); - - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok( - *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) - - *self.y.get_value().get()?, - ) - })?; - - cs.enforce( - || "Check that y is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + self.x.get_variable() - x.get_variable(), - |lc| lc + y.get_variable() + self.y.get_variable(), - ); - - Ok(Self { x, y }) - } - - /// If condition outputs a otherwise outputs b - pub fn conditionally_select>( - mut cs: CS, - a: &Self, - b: &Self, - condition: &Boolean, - ) -> Result { - let x = conditionally_select(cs.namespace(|| "select x"), &a.x, &b.x, condition)?; - let y = conditionally_select(cs.namespace(|| "select y"), &a.y, &b.y, condition)?; - - Ok(Self { x, y }) - } -} - -#[cfg(test)] -mod tests { - use expect_test::{expect, Expect}; - use ff::{Field, PrimeFieldBits}; - use group::Curve; - use halo2curves::CurveAffine; - use rand::rngs::OsRng; - - use super::*; - use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - solver::SatisfyingAssignment, - test_shape_cs::TestShapeCS, - }, - provider::{ - bn256_grumpkin::{bn256, grumpkin}, - Bn256EngineIPA, Bn256EngineKZG, GrumpkinEngine, - }, - traits::{snark::default_ck_hint, Engine}, - }; - - #[derive(Debug, Clone)] - pub struct Point { - x: G::Base, - y: G::Base, - is_infinity: bool, - } - - impl Point { - pub fn new(x: G::Base, y: G::Base, is_infinity: bool) -> Self { - Self { x, y, is_infinity } - } - - pub fn random_vartime() -> Self { - loop { - let x = G::Base::random(&mut OsRng); - let y = (x.square() * x + G::group_params().1).sqrt(); - if y.is_some().unwrap_u8() == 1 { - return Self { - x, - y: y.unwrap(), - is_infinity: false, - }; - } - } - } - - /// Add any two points - pub fn add(&self, other: &Self) -> Self { - if self.x == other.x { - // If self == other then call double - if self.y == other.y { - self.double() - } else { - // if self.x == other.x and self.y != other.y then return infinity - Self { - x: G::Base::ZERO, - y: G::Base::ZERO, - is_infinity: true, - } - } - } else { - self.add_internal(other) - } - } - - /// Add two different points - pub fn add_internal(&self, other: &Self) -> Self { - if self.is_infinity { - return other.clone(); - } - - if other.is_infinity { - return self.clone(); - } - - let lambda = (other.y - self.y) * (other.x - self.x).invert().unwrap(); - let x = lambda * lambda - self.x - other.x; - let y = lambda * (self.x - x) - self.y; - Self { - x, - y, - is_infinity: false, - } - } - - pub fn double(&self) -> Self { - if self.is_infinity { - return Self { - x: G::Base::ZERO, - y: G::Base::ZERO, - is_infinity: true, - }; - } - - let lambda = G::Base::from(3) - * self.x - * self.x - * ((G::Base::ONE + G::Base::ONE) * self.y).invert().unwrap(); - let x = lambda * lambda - self.x - self.x; - let y = lambda * (self.x - x) - self.y; - Self { - x, - y, - is_infinity: false, - } - } - - pub fn scalar_mul(&self, scalar: &G::Scalar) -> Self { - let mut res = Self { - x: G::Base::ZERO, - y: G::Base::ZERO, - is_infinity: true, - }; - - let bits = scalar.to_le_bits(); - for i in (0..bits.len()).rev() { - res = res.double(); - if bits[i] { - res = self.add(&res); - } - } - res - } - } - - // Allocate a random point. Only used for testing - pub fn alloc_random_point>( - mut cs: CS, - ) -> Result, SynthesisError> { - // get a random point - let p = Point::::random_vartime(); - AllocatedPoint::alloc(cs.namespace(|| "alloc p"), Some((p.x, p.y, p.is_infinity))) - } - - /// Make the point io - pub fn inputize_allocated_point>( - p: &AllocatedPoint, - mut cs: CS, - ) { - let _ = p.x.inputize(cs.namespace(|| "Input point.x")); - let _ = p.y.inputize(cs.namespace(|| "Input point.y")); - let _ = p - .is_infinity - .inputize(cs.namespace(|| "Input point.is_infinity")); - } - - #[test] - fn test_ecc_ops() { - test_ecc_ops_with::::GE>(); - test_ecc_ops_with::::GE>(); - } - - fn test_ecc_ops_with() - where - G: Group, - C: CurveAffine, - { - // perform some curve arithmetic - let a = Point::::random_vartime(); - let b = Point::::random_vartime(); - let c = a.add(&b); - let d = a.double(); - let s = G::Scalar::random(&mut OsRng); - let e = a.scalar_mul(&s); - - // perform the same computation by translating to curve types - let a_curve = C::from_xy( - C::Base::from_repr(a.x.to_repr()).unwrap(), - C::Base::from_repr(a.y.to_repr()).unwrap(), - ) - .unwrap(); - let b_curve = C::from_xy( - C::Base::from_repr(b.x.to_repr()).unwrap(), - C::Base::from_repr(b.y.to_repr()).unwrap(), - ) - .unwrap(); - let c_curve = (a_curve + b_curve).to_affine(); - let d_curve = (a_curve + a_curve).to_affine(); - let e_curve = a_curve - .mul(C::Scalar::from_repr(s.to_repr()).unwrap()) - .to_affine(); - - // transform c, d, and e into curve types - let c_curve_2 = C::from_xy( - C::Base::from_repr(c.x.to_repr()).unwrap(), - C::Base::from_repr(c.y.to_repr()).unwrap(), - ) - .unwrap(); - let d_curve_2 = C::from_xy( - C::Base::from_repr(d.x.to_repr()).unwrap(), - C::Base::from_repr(d.y.to_repr()).unwrap(), - ) - .unwrap(); - let e_curve_2 = C::from_xy( - C::Base::from_repr(e.x.to_repr()).unwrap(), - C::Base::from_repr(e.y.to_repr()).unwrap(), - ) - .unwrap(); - - // check that we have the same outputs - assert_eq!(c_curve, c_curve_2); - assert_eq!(d_curve, d_curve_2); - assert_eq!(e_curve, e_curve_2); - } - - fn synthesize_smul(mut cs: CS) -> (AllocatedPoint, AllocatedPoint, G::Scalar) - where - G: Group, - CS: ConstraintSystem, - { - let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); - inputize_allocated_point(&a, cs.namespace(|| "inputize a")); - - let s = G::Scalar::random(&mut OsRng); - // Allocate bits for s - let bits: Vec = s - .to_le_bits() - .into_iter() - .enumerate() - .map(|(i, bit)| AllocatedBit::alloc(cs.namespace(|| format!("bit {i}")), Some(bit))) - .collect::, SynthesisError>>() - .unwrap(); - let e = a.scalar_mul(cs.namespace(|| "Scalar Mul"), &bits).unwrap(); - inputize_allocated_point(&e, cs.namespace(|| "inputize e")); - (a, e, s) - } - - #[test] - fn test_ecc_circuit_ops() { - test_ecc_circuit_ops_with::( - &expect!["2738"], - &expect!["2724"], - ); - test_ecc_circuit_ops_with::( - &expect!["2738"], - &expect!["2724"], - ); - } - - fn test_ecc_circuit_ops_with(expected_constraints: &Expect, expected_variables: &Expect) - where - E1: Engine::Scalar>, - E2: Engine::Scalar>, - { - // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_smul::(cs.namespace(|| "synthesize")); - expected_constraints.assert_eq(&cs.num_constraints().to_string()); - expected_variables.assert_eq(&cs.num_aux().to_string()); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - // Then the satisfying assignment - let mut cs = SatisfyingAssignment::::new(); - let (a, e, s) = synthesize_smul::(cs.namespace(|| "synthesize")); - let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - - let a_p: Point = Point::new( - a.x.get_value().unwrap(), - a.y.get_value().unwrap(), - a.is_infinity.get_value().unwrap() == ::Base::ONE, - ); - let e_p: Point = Point::new( - e.x.get_value().unwrap(), - e.y.get_value().unwrap(), - e.is_infinity.get_value().unwrap() == ::Base::ONE, - ); - let e_new = a_p.scalar_mul(&s); - assert!(e_p.x == e_new.x && e_p.y == e_new.y); - // Make sure that this is satisfiable - shape.is_sat(&ck, &inst, &witness).unwrap(); - } - - fn synthesize_add_equal(mut cs: CS) -> (AllocatedPoint, AllocatedPoint) - where - G: Group, - CS: ConstraintSystem, - { - let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); - inputize_allocated_point(&a, cs.namespace(|| "inputize a")); - let e = a.add(cs.namespace(|| "add a to a"), &a).unwrap(); - inputize_allocated_point(&e, cs.namespace(|| "inputize e")); - (a, e) - } - - #[test] - fn test_ecc_circuit_add_equal() { - test_ecc_circuit_add_equal_with::(); - test_ecc_circuit_add_equal_with::(); - } - - fn test_ecc_circuit_add_equal_with() - where - E1: Engine::Scalar>, - E2: Engine::Scalar>, - { - // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); - println!("Number of constraints: {}", cs.num_constraints()); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - // Then the satisfying assignment - let mut cs = SatisfyingAssignment::::new(); - let (a, e) = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); - let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - let a_p: Point = Point::new( - a.x.get_value().unwrap(), - a.y.get_value().unwrap(), - a.is_infinity.get_value().unwrap() == ::Base::ONE, - ); - let e_p: Point = Point::new( - e.x.get_value().unwrap(), - e.y.get_value().unwrap(), - e.is_infinity.get_value().unwrap() == ::Base::ONE, - ); - let e_new = a_p.add(&a_p); - assert!(e_p.x == e_new.x && e_p.y == e_new.y); - // Make sure that it is satisfiable - shape.is_sat(&ck, &inst, &witness).unwrap(); - } - - fn synthesize_add_negation(mut cs: CS) -> AllocatedPoint - where - G: Group, - CS: ConstraintSystem, - { - let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); - inputize_allocated_point(&a, cs.namespace(|| "inputize a")); - let b = &mut a.clone(); - b.y = AllocatedNum::alloc(cs.namespace(|| "allocate negation of a"), || { - Ok(G::Base::ZERO) - }) - .unwrap(); - inputize_allocated_point(b, cs.namespace(|| "inputize b")); - let e = a.add(cs.namespace(|| "add a to b"), b).unwrap(); - e - } - - #[test] - fn test_ecc_circuit_add_negation() { - test_ecc_circuit_add_negation_with::( - &expect!["39"], - &expect!["34"], - ); - test_ecc_circuit_add_negation_with::( - &expect!["39"], - &expect!["34"], - ); - } - - fn test_ecc_circuit_add_negation_with( - expected_constraints: &Expect, - expected_variables: &Expect, - ) where - E1: Engine::Scalar>, - E2: Engine::Scalar>, - { - // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_add_negation::(cs.namespace(|| "synthesize add equal")); - expected_constraints.assert_eq(&cs.num_constraints().to_string()); - expected_variables.assert_eq(&cs.num_aux().to_string()); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - // Then the satisfying assignment - let mut cs = SatisfyingAssignment::::new(); - let e = synthesize_add_negation::(cs.namespace(|| "synthesize add negation")); - let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - let e_p: Point = Point::new( - e.x.get_value().unwrap(), - e.y.get_value().unwrap(), - e.is_infinity.get_value().unwrap() == ::Base::ONE, - ); - assert!(e_p.is_infinity); - // Make sure that it is satisfiable - shape.is_sat(&ck, &inst, &witness).unwrap(); - } -} diff --git a/src/gadgets/mod.rs b/src/gadgets/mod.rs deleted file mode 100644 index d7af674..0000000 --- a/src/gadgets/mod.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! This module implements various gadgets necessary for Nova and applications -//! built with Nova. -mod ecc; -pub(crate) use ecc::AllocatedPoint; - -mod nonnative; -pub(crate) use nonnative::{ - bignat::{nat_to_limbs, BigNat}, - util::{f_to_nat, Num}, -}; - -mod r1cs; -pub(crate) use r1cs::{ - conditionally_select_alloc_relaxed_r1cs, - conditionally_select_vec_allocated_relaxed_r1cs_instance, AllocatedR1CSInstance, - AllocatedRelaxedR1CSInstance, -}; - -mod utils; -#[cfg(test)] -pub(crate) use utils::alloc_one; -pub(crate) use utils::{ - alloc_bignat_constant, alloc_num_equals, alloc_scalar_as_base, alloc_zero, - conditionally_select_allocated_bit, conditionally_select_bignat, le_bits_to_num, - scalar_as_base, -}; diff --git a/src/gadgets/nonnative/bignat.rs b/src/gadgets/nonnative/bignat.rs deleted file mode 100644 index 9af34dc..0000000 --- a/src/gadgets/nonnative/bignat.rs +++ /dev/null @@ -1,908 +0,0 @@ -use std::{ - borrow::Borrow, - cmp::{max, min}, - convert::From, -}; - -use bellpepper_core::{ConstraintSystem, LinearCombination, SynthesisError}; -use ff::PrimeField; -use itertools::Itertools as _; -use num_bigint::BigInt; -use num_traits::cast::ToPrimitive; - -use super::{ - util::{f_to_nat, nat_to_f, Bitvector, Num}, - OptionExt, -}; - -/// Compute the natural number represented by an array of limbs. -/// The limbs are assumed to be based the `limb_width` power of 2. -pub fn limbs_to_nat, I: DoubleEndedIterator>( - limbs: I, - limb_width: usize, -) -> BigInt { - limbs.rev().fold(BigInt::from(0), |mut acc, limb| { - acc <<= limb_width as u32; - acc += f_to_nat(limb.borrow()); - acc - }) -} - -fn int_with_n_ones(n: usize) -> BigInt { - let mut m = BigInt::from(1); - m <<= n as u32; - m -= 1; - m -} - -/// Compute the limbs encoding a natural number. -/// The limbs are assumed to be based the `limb_width` power of 2. -pub fn nat_to_limbs( - nat: &BigInt, - limb_width: usize, - n_limbs: usize, -) -> Result, SynthesisError> { - let mask = int_with_n_ones(limb_width); - let mut nat = nat.clone(); - if nat.bits() as usize <= n_limbs * limb_width { - Ok((0..n_limbs) - .map(|_| { - let r = &nat & &mask; - nat >>= limb_width as u32; - nat_to_f(&r).unwrap() - }) - .collect()) - } else { - eprintln!("nat {nat} does not fit in {n_limbs} limbs of width {limb_width}"); - Err(SynthesisError::Unsatisfiable) - } -} - -#[derive(Clone, PartialEq, Eq)] -pub struct BigNatParams { - pub min_bits: usize, - pub max_word: BigInt, - pub limb_width: usize, - pub n_limbs: usize, -} - -impl BigNatParams { - pub fn new(limb_width: usize, n_limbs: usize) -> Self { - let mut max_word = BigInt::from(1) << limb_width as u32; - max_word -= 1; - Self { - max_word, - n_limbs, - limb_width, - min_bits: 0, - } - } -} - -/// A representation of a large natural number (a member of {0, 1, 2, ... }) -#[derive(Clone)] -pub struct BigNat { - /// The linear combinations which constrain the value of each limb of the - /// number - pub limbs: Vec>, - /// The witness values for each limb (filled at witness-time) - pub limb_values: Option>, - /// The value of the whole number (filled at witness-time) - pub value: Option, - /// Parameters - pub params: BigNatParams, -} - -impl PartialEq for BigNat { - fn eq(&self, other: &Self) -> bool { - self.value == other.value && self.params == other.params - } -} -impl Eq for BigNat {} - -impl From> for Polynomial { - fn from(other: BigNat) -> Self { - Self { - coefficients: other.limbs, - values: other.limb_values, - } - } -} - -impl BigNat { - /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width - /// `limb_width` each. If `max_word` is missing, then it is assumed to - /// be `(2 << limb_width) - 1`. The value is provided by a closure - /// returning limb values. - pub fn alloc_from_limbs( - mut cs: CS, - f: F, - max_word: Option, - limb_width: usize, - n_limbs: usize, - ) -> Result - where - CS: ConstraintSystem, - F: FnOnce() -> Result, SynthesisError>, - { - let values_cell = f(); - let mut value = None; - let mut limb_values = None; - let limbs = (0..n_limbs) - .map(|limb_i| { - cs.alloc( - || format!("limb {limb_i}"), - || match values_cell { - Ok(ref vs) => { - if vs.len() != n_limbs { - eprintln!("Values do not match stated limb count"); - return Err(SynthesisError::Unsatisfiable); - } - if value.is_none() { - value = Some(limbs_to_nat::(vs.iter(), limb_width)); - } - if limb_values.is_none() { - limb_values = Some(vs.clone()); - } - Ok(vs[limb_i]) - } - // Hack b/c SynthesisError and io::Error don't implement Clone - Err(ref e) => Err(SynthesisError::from(std::io::Error::new( - std::io::ErrorKind::Other, - format!("{e}"), - ))), - }, - ) - .map(|v| LinearCombination::zero() + v) - }) - .collect::, _>>()?; - Ok(Self { - value, - limb_values, - limbs, - params: BigNatParams { - min_bits: 0, - n_limbs, - max_word: max_word.unwrap_or_else(|| int_with_n_ones(limb_width)), - limb_width, - }, - }) - } - - /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width - /// `limb_width` each. The `max_word` is guaranteed to be `(2 << - /// limb_width) - 1`. The value is provided by a closure returning a - /// natural number. - pub fn alloc_from_nat( - mut cs: CS, - f: F, - limb_width: usize, - n_limbs: usize, - ) -> Result - where - CS: ConstraintSystem, - F: FnOnce() -> Result, - { - let all_values_cell = - f().and_then(|v| Ok((nat_to_limbs::(&v, limb_width, n_limbs)?, v))); - let mut value = None; - let mut limb_values = Vec::new(); - let limbs = (0..n_limbs) - .map(|limb_i| { - cs.alloc( - || format!("limb {limb_i}"), - || match all_values_cell { - Ok((ref vs, ref v)) => { - if value.is_none() { - value = Some(v.clone()); - } - limb_values.push(vs[limb_i]); - Ok(vs[limb_i]) - } - // Hack b/c SynthesisError and io::Error don't implement Clone - Err(ref e) => Err(SynthesisError::from(std::io::Error::new( - std::io::ErrorKind::Other, - format!("{e}"), - ))), - }, - ) - .map(|v| LinearCombination::zero() + v) - }) - .collect::, _>>()?; - Ok(Self { - value, - limb_values: (!limb_values.is_empty()).then_some(limb_values), - limbs, - params: BigNatParams::new(limb_width, n_limbs), - }) - } - - /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width - /// `limb_width` each. The `max_word` is guaranteed to be `(2 << - /// limb_width) - 1`. The value is provided by an allocated number - pub fn from_num>( - mut cs: CS, - n: &Num, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let bignat = Self::alloc_from_nat( - cs.namespace(|| "bignat"), - || { - Ok({ - n.value - .as_ref() - .map(|n| f_to_nat(n)) - .ok_or(SynthesisError::AssignmentMissing)? - }) - }, - limb_width, - n_limbs, - )?; - - // check if bignat equals n - // (1) decompose `bignat` into a bitvector `bv` - let bv = bignat.decompose(cs.namespace(|| "bv"))?; - // (2) recompose bits and check if it equals n - n.is_equal(cs.namespace(|| "n"), &bv); - - Ok(bignat) - } - - pub fn as_limbs(&self) -> Vec> { - let mut limbs = Vec::new(); - for (i, lc) in self.limbs.iter().enumerate() { - limbs.push(Num::new( - self.limb_values.as_ref().map(|vs| vs[i]), - lc.clone(), - )); - } - limbs - } - - pub fn assert_well_formed>( - &self, - mut cs: CS, - ) -> Result<(), SynthesisError> { - // swap the option and iterator - let limb_values_split = - (0..self.limbs.len()).map(|i| self.limb_values.as_ref().map(|vs| vs[i])); - for (i, (limb, limb_value)) in self.limbs.iter().zip_eq(limb_values_split).enumerate() { - Num::new(limb_value, limb.clone()) - .fits_in_bits(cs.namespace(|| format!("{i}")), self.params.limb_width)?; - } - Ok(()) - } - - /// Break `self` up into a bit-vector. - pub fn decompose>( - &self, - mut cs: CS, - ) -> Result, SynthesisError> { - let limb_values_split = - (0..self.limbs.len()).map(|i| self.limb_values.as_ref().map(|vs| vs[i])); - let bitvectors: Vec> = self - .limbs - .iter() - .zip_eq(limb_values_split) - .enumerate() - .map(|(i, (limb, limb_value))| { - Num::new(limb_value, limb.clone()).decompose( - cs.namespace(|| format!("subdecmop {i}")), - self.params.limb_width, - ) - }) - .collect::, _>>()?; - let mut bits = Vec::new(); - let mut values = Vec::new(); - let mut allocations = Vec::new(); - for bv in bitvectors { - bits.extend(bv.bits); - if let Some(vs) = bv.values { - values.extend(vs) - }; - allocations.extend(bv.allocations); - } - let values = (!values.is_empty()).then_some(values); - Ok(Bitvector { - bits, - values, - allocations, - }) - } - - pub fn enforce_limb_width_agreement( - &self, - other: &Self, - location: &str, - ) -> Result { - if self.params.limb_width == other.params.limb_width { - Ok(self.params.limb_width) - } else { - eprintln!( - "Limb widths {}, {}, do not agree at {}", - self.params.limb_width, other.params.limb_width, location - ); - Err(SynthesisError::Unsatisfiable) - } - } - - pub fn from_poly(poly: Polynomial, limb_width: usize, max_word: BigInt) -> Self { - Self { - params: BigNatParams { - min_bits: 0, - max_word, - n_limbs: poly.coefficients.len(), - limb_width, - }, - limbs: poly.coefficients, - value: poly - .values - .as_ref() - .map(|limb_values| limbs_to_nat::(limb_values.iter(), limb_width)), - limb_values: poly.values, - } - } - - /// Constrain `self` to be equal to `other`, after carrying both. - pub fn equal_when_carried>( - &self, - mut cs: CS, - other: &Self, - ) -> Result<(), SynthesisError> { - self.enforce_limb_width_agreement(other, "equal_when_carried")?; - - // We'll propagate carries over the first `n` limbs. - let n = min(self.limbs.len(), other.limbs.len()); - let target_base = BigInt::from(1u8) << self.params.limb_width as u32; - let mut accumulated_extra = BigInt::from(0usize); - let max_word = max(&self.params.max_word, &other.params.max_word); - let carry_bits = - (((max_word.to_f64().unwrap() * 2.0).log2() - self.params.limb_width as f64).ceil() - + 0.1) as usize; - let mut carry_in = Num::new(Some(Scalar::ZERO), LinearCombination::zero()); - - for i in 0..n { - let carry = Num::alloc(cs.namespace(|| format!("carry value {i}")), || { - Ok(nat_to_f( - &((f_to_nat(&self.limb_values.grab()?[i]) - + f_to_nat(&carry_in.value.unwrap()) - + max_word - - f_to_nat(&other.limb_values.grab()?[i])) - / &target_base), - ) - .unwrap()) - })?; - accumulated_extra += max_word; - - cs.enforce( - || format!("carry {i}"), - |lc| lc, - |lc| lc, - |lc| { - lc + &carry_in.num + &self.limbs[i] - &other.limbs[i] - + (nat_to_f(max_word).unwrap(), CS::one()) - - (nat_to_f(&target_base).unwrap(), &carry.num) - - ( - nat_to_f(&(&accumulated_extra % &target_base)).unwrap(), - CS::one(), - ) - }, - ); - - accumulated_extra /= &target_base; - - if i < n - 1 { - carry.fits_in_bits(cs.namespace(|| format!("carry {i} decomp")), carry_bits)?; - } else { - cs.enforce( - || format!("carry {i} is out"), - |lc| lc, - |lc| lc, - |lc| lc + &carry.num - (nat_to_f(&accumulated_extra).unwrap(), CS::one()), - ); - } - carry_in = carry; - } - - for (i, zero_limb) in self.limbs.iter().enumerate().skip(n) { - cs.enforce( - || format!("zero self {i}"), - |lc| lc, - |lc| lc, - |lc| lc + zero_limb, - ); - } - for (i, zero_limb) in other.limbs.iter().enumerate().skip(n) { - cs.enforce( - || format!("zero other {i}"), - |lc| lc, - |lc| lc, - |lc| lc + zero_limb, - ); - } - Ok(()) - } - - /// Constrain `self` to be equal to `other`, after carrying both. - /// Uses regrouping internally to take full advantage of the field size and - /// reduce the amount of carrying. - pub fn equal_when_carried_regroup>( - &self, - mut cs: CS, - other: &Self, - ) -> Result<(), SynthesisError> { - self.enforce_limb_width_agreement(other, "equal_when_carried_regroup")?; - let max_word = max(&self.params.max_word, &other.params.max_word); - let carry_bits = - (((max_word.to_f64().unwrap() * 2.0).log2() - self.params.limb_width as f64).ceil() - + 0.1) as usize; - let limbs_per_group = max( - (Scalar::CAPACITY as usize - carry_bits) / self.params.limb_width, - 1, - ); - - let self_grouped = self.group_limbs(limbs_per_group); - let other_grouped = other.group_limbs(limbs_per_group); - self_grouped.equal_when_carried(cs.namespace(|| "grouped"), &other_grouped) - } - - pub fn add(&self, other: &Self) -> Result { - self.enforce_limb_width_agreement(other, "add")?; - let n_limbs = max(self.params.n_limbs, other.params.n_limbs); - let max_word = &self.params.max_word + &other.params.max_word; - let limbs: Vec> = (0..n_limbs) - .map(|i| match (self.limbs.get(i), other.limbs.get(i)) { - (Some(a), Some(b)) => a.clone() + b, - (Some(a), None) => a.clone(), - (None, Some(b)) => b.clone(), - (None, None) => unreachable!(), - }) - .collect(); - let limb_values: Option> = self.limb_values.as_ref().and_then(|x| { - other.limb_values.as_ref().map(|y| { - (0..n_limbs) - .map(|i| match (x.get(i), y.get(i)) { - (Some(a), Some(b)) => { - let mut t = *a; - t.add_assign(b); - t - } - (Some(a), None) | (None, Some(a)) => *a, - (None, None) => unreachable!(), - }) - .collect() - }) - }); - let value = self - .value - .as_ref() - .and_then(|x| other.value.as_ref().map(|y| x + y)); - Ok(Self { - limb_values, - value, - limbs, - params: BigNatParams { - min_bits: max(self.params.min_bits, other.params.min_bits), - n_limbs, - max_word, - limb_width: self.params.limb_width, - }, - }) - } - - /// Compute a `BigNat` constrained to be equal to `self * other % modulus`. - pub fn mult_mod>( - &self, - mut cs: CS, - other: &Self, - modulus: &Self, - ) -> Result<(Self, Self), SynthesisError> { - self.enforce_limb_width_agreement(other, "mult_mod")?; - let limb_width = self.params.limb_width; - let quotient_bits = - (self.n_bits() + other.n_bits()).saturating_sub(modulus.params.min_bits); - let quotient_limbs = quotient_bits.saturating_sub(1) / limb_width + 1; - let quotient = Self::alloc_from_nat( - cs.namespace(|| "quotient"), - || { - Ok({ - let mut x = self.value.grab()?.clone(); - x *= other.value.grab()?; - x /= modulus.value.grab()?; - x - }) - }, - self.params.limb_width, - quotient_limbs, - )?; - quotient.assert_well_formed(cs.namespace(|| "quotient rangecheck"))?; - let remainder = Self::alloc_from_nat( - cs.namespace(|| "remainder"), - || { - Ok({ - let mut x = self.value.grab()?.clone(); - x *= other.value.grab()?; - x %= modulus.value.grab()?; - x - }) - }, - self.params.limb_width, - modulus.limbs.len(), - )?; - remainder.assert_well_formed(cs.namespace(|| "remainder rangecheck"))?; - let a_poly = Polynomial::from(self.clone()); - let b_poly = Polynomial::from(other.clone()); - let mod_poly = Polynomial::from(modulus.clone()); - let q_poly = Polynomial::from(quotient.clone()); - let r_poly = Polynomial::from(remainder.clone()); - - // a * b - let left = a_poly.alloc_product(cs.namespace(|| "left"), &b_poly)?; - let right_product = q_poly.alloc_product(cs.namespace(|| "right_product"), &mod_poly)?; - // q * m + r - let right = right_product.sum(&r_poly); - - let left_max_word = { - let mut x = BigInt::from(min(self.limbs.len(), other.limbs.len())); - x *= &self.params.max_word; - x *= &other.params.max_word; - x - }; - let right_max_word = { - let mut x = BigInt::from(min(quotient.limbs.len(), modulus.limbs.len())); - x *= "ient.params.max_word; - x *= &modulus.params.max_word; - x += &remainder.params.max_word; - x - }; - - let left_int = Self::from_poly(left, limb_width, left_max_word); - let right_int = Self::from_poly(right, limb_width, right_max_word); - left_int.equal_when_carried_regroup(cs.namespace(|| "carry"), &right_int)?; - Ok((quotient, remainder)) - } - - /// Compute a `BigNat` constrained to be equal to `self * other % modulus`. - pub fn red_mod>( - &self, - mut cs: CS, - modulus: &Self, - ) -> Result { - self.enforce_limb_width_agreement(modulus, "red_mod")?; - let limb_width = self.params.limb_width; - let quotient_bits = self.n_bits().saturating_sub(modulus.params.min_bits); - let quotient_limbs = quotient_bits.saturating_sub(1) / limb_width + 1; - let quotient = Self::alloc_from_nat( - cs.namespace(|| "quotient"), - || Ok(self.value.grab()? / modulus.value.grab()?), - self.params.limb_width, - quotient_limbs, - )?; - quotient.assert_well_formed(cs.namespace(|| "quotient rangecheck"))?; - let remainder = Self::alloc_from_nat( - cs.namespace(|| "remainder"), - || Ok(self.value.grab()? % modulus.value.grab()?), - self.params.limb_width, - modulus.limbs.len(), - )?; - remainder.assert_well_formed(cs.namespace(|| "remainder rangecheck"))?; - let mod_poly = Polynomial::from(modulus.clone()); - let q_poly = Polynomial::from(quotient.clone()); - let r_poly = Polynomial::from(remainder.clone()); - - // q * m + r - let right_product = q_poly.alloc_product(cs.namespace(|| "right_product"), &mod_poly)?; - let right = right_product.sum(&r_poly); - - let right_max_word = { - let mut x = BigInt::from(min(quotient.limbs.len(), modulus.limbs.len())); - x *= "ient.params.max_word; - x *= &modulus.params.max_word; - x += &remainder.params.max_word; - x - }; - - let right_int = Self::from_poly(right, limb_width, right_max_word); - self.equal_when_carried_regroup(cs.namespace(|| "carry"), &right_int)?; - Ok(remainder) - } - - /// Combines limbs into groups. - pub fn group_limbs(&self, limbs_per_group: usize) -> Self { - let n_groups = (self.limbs.len() - 1) / limbs_per_group + 1; - let limb_values = self.limb_values.as_ref().map(|vs| { - let mut values: Vec = vec![Scalar::ZERO; n_groups]; - let mut shift = Scalar::ONE; - let limb_block = (0..self.params.limb_width).fold(Scalar::ONE, |mut l, _| { - l = l.double(); - l - }); - for (i, v) in vs.iter().enumerate() { - if i % limbs_per_group == 0 { - shift = Scalar::ONE; - } - let mut a = shift; - a *= v; - values[i / limbs_per_group].add_assign(&a); - shift.mul_assign(&limb_block); - } - values - }); - let limbs = { - let mut limbs: Vec> = - vec![LinearCombination::zero(); n_groups]; - let mut shift = Scalar::ONE; - let limb_block = (0..self.params.limb_width).fold(Scalar::ONE, |mut l, _| { - l = l.double(); - l - }); - for (i, limb) in self.limbs.iter().enumerate() { - if i % limbs_per_group == 0 { - shift = Scalar::ONE; - } - limbs[i / limbs_per_group] = - std::mem::replace(&mut limbs[i / limbs_per_group], LinearCombination::zero()) - + (shift, limb); - shift.mul_assign(&limb_block); - } - limbs - }; - let max_word = (0..limbs_per_group).fold(BigInt::from(0u8), |mut acc, i| { - acc.set_bit((i * self.params.limb_width) as u64, true); - acc - }) * &self.params.max_word; - Self { - params: BigNatParams { - min_bits: self.params.min_bits, - limb_width: self.params.limb_width * limbs_per_group, - n_limbs: limbs.len(), - max_word, - }, - limbs, - limb_values, - value: self.value.clone(), - } - } - - pub fn n_bits(&self) -> usize { - assert!(self.params.n_limbs > 0); - self.params.limb_width * (self.params.n_limbs - 1) + self.params.max_word.bits() as usize - } -} - -pub struct Polynomial { - pub coefficients: Vec>, - pub values: Option>, -} - -impl Polynomial { - pub fn alloc_product>( - &self, - mut cs: CS, - other: &Self, - ) -> Result { - let n_product_coeffs = self.coefficients.len() + other.coefficients.len() - 1; - let values = self.values.as_ref().and_then(|self_vs| { - other.values.as_ref().map(|other_vs| { - let mut values: Vec = std::iter::repeat_with(|| Scalar::ZERO) - .take(n_product_coeffs) - .collect(); - for (self_i, self_v) in self_vs.iter().enumerate() { - for (other_i, other_v) in other_vs.iter().enumerate() { - let mut v = *self_v; - v.mul_assign(other_v); - values[self_i + other_i].add_assign(&v); - } - } - values - }) - }); - let coefficients = (0..n_product_coeffs) - .map(|i| { - Ok(LinearCombination::zero() - + cs.alloc(|| format!("prod {i}"), || Ok(values.grab()?[i]))?) - }) - .collect::>, SynthesisError>>()?; - let product = Self { - coefficients, - values, - }; - let one = Scalar::ONE; - let mut x = Scalar::ZERO; - for _ in 1..(n_product_coeffs + 1) { - x.add_assign(&one); - cs.enforce( - || format!("pointwise product @ {x:?}"), - |lc| { - let mut i = Scalar::ONE; - self.coefficients.iter().fold(lc, |lc, c| { - let r = lc + (i, c); - i.mul_assign(&x); - r - }) - }, - |lc| { - let mut i = Scalar::ONE; - other.coefficients.iter().fold(lc, |lc, c| { - let r = lc + (i, c); - i.mul_assign(&x); - r - }) - }, - |lc| { - let mut i = Scalar::ONE; - product.coefficients.iter().fold(lc, |lc, c| { - let r = lc + (i, c); - i.mul_assign(&x); - r - }) - }, - ) - } - Ok(product) - } - - pub fn sum(&self, other: &Self) -> Self { - let n_coeffs = max(self.coefficients.len(), other.coefficients.len()); - let values = self.values.as_ref().and_then(|self_vs| { - other.values.as_ref().map(|other_vs| { - (0..n_coeffs) - .map(|i| { - let mut s = Scalar::ZERO; - if i < self_vs.len() { - s.add_assign(&self_vs[i]); - } - if i < other_vs.len() { - s.add_assign(&other_vs[i]); - } - s - }) - .collect() - }) - }); - let coefficients = (0..n_coeffs) - .map(|i| { - let mut lc = LinearCombination::zero(); - if i < self.coefficients.len() { - lc = lc + &self.coefficients[i]; - } - if i < other.coefficients.len() { - lc = lc + &other.coefficients[i]; - } - lc - }) - .collect(); - Self { - coefficients, - values, - } - } -} - -#[cfg(test)] -mod tests { - use bellpepper_core::{test_cs::TestConstraintSystem, Circuit}; - #[cfg(not(target_arch = "wasm32"))] - use proptest::prelude::*; - - use super::*; - use crate::provider::bn256_grumpkin::bn256::Scalar; - - pub struct PolynomialMultiplier { - pub a: Vec, - pub b: Vec, - } - - impl Circuit for PolynomialMultiplier { - fn synthesize>( - self, - cs: &mut CS, - ) -> Result<(), SynthesisError> { - let a = Polynomial { - coefficients: self - .a - .iter() - .enumerate() - .map(|(i, x)| { - Ok(LinearCombination::zero() - + cs.alloc(|| format!("coeff_a {i}"), || Ok(*x))?) - }) - .collect::>, SynthesisError>>()?, - values: Some(self.a), - }; - let b = Polynomial { - coefficients: self - .b - .iter() - .enumerate() - .map(|(i, x)| { - Ok(LinearCombination::zero() - + cs.alloc(|| format!("coeff_b {i}"), || Ok(*x))?) - }) - .collect::>, SynthesisError>>()?, - values: Some(self.b), - }; - let _prod = a.alloc_product(cs.namespace(|| "product"), &b)?; - Ok(()) - } - } - - #[test] - fn test_polynomial_multiplier_circuit() { - let mut cs = TestConstraintSystem::::new(); - - let circuit = PolynomialMultiplier { - a: [1, 1, 1].iter().map(|i| Scalar::from_u128(*i)).collect(), - b: [1, 1].iter().map(|i| Scalar::from_u128(*i)).collect(), - }; - - circuit.synthesize(&mut cs).expect("synthesis failed"); - - if let Some(token) = cs.which_is_unsatisfied() { - eprintln!("Error: {} is unsatisfied", token); - } - } - - #[derive(Debug)] - pub struct BigNatBitDecompInputs { - pub n: BigInt, - } - - pub struct BigNatBitDecompParams { - pub limb_width: usize, - pub n_limbs: usize, - } - - pub struct BigNatBitDecomp { - inputs: Option, - params: BigNatBitDecompParams, - } - - impl Circuit for BigNatBitDecomp { - fn synthesize>( - self, - cs: &mut CS, - ) -> Result<(), SynthesisError> { - let n = BigNat::alloc_from_nat( - cs.namespace(|| "n"), - || Ok(self.inputs.grab()?.n.clone()), - self.params.limb_width, - self.params.n_limbs, - )?; - n.decompose(cs.namespace(|| "decomp"))?; - Ok(()) - } - } - - #[cfg(not(target_arch = "wasm32"))] - proptest! { - #![proptest_config(ProptestConfig { - cases: 10, // this test is costlier as max n gets larger - .. ProptestConfig::default() - })] - #[test] - fn test_big_nat_can_decompose(n in any::(), limb_width in 40u8..200) { - let n = n as usize; - - let n_limbs = if n == 0 { - 1 - } else { - (n - 1) / limb_width as usize + 1 - }; - - let circuit = BigNatBitDecomp { - inputs: Some(BigNatBitDecompInputs { - n: BigInt::from(n), - }), - params: BigNatBitDecompParams { - limb_width: limb_width as usize, - n_limbs, - }, - }; - let mut cs = TestConstraintSystem::::new(); - circuit.synthesize(&mut cs).expect("synthesis failed"); - prop_assert!(cs.is_satisfied()); - } - } -} diff --git a/src/gadgets/nonnative/mod.rs b/src/gadgets/nonnative/mod.rs deleted file mode 100644 index 93cfc74..0000000 --- a/src/gadgets/nonnative/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -//! This module implements various gadgets necessary for doing non-native -//! arithmetic Code in this module is adapted from [bellman-bignat](https://github.com/alex-ozdemir/bellman-bignat), which is licenced under MIT - -use bellpepper_core::SynthesisError; -use ff::PrimeField; - -trait OptionExt { - fn grab(&self) -> Result<&T, SynthesisError>; -} - -impl OptionExt for Option { - fn grab(&self) -> Result<&T, SynthesisError> { - self.as_ref().ok_or(SynthesisError::AssignmentMissing) - } -} - -trait BitAccess { - fn get_bit(&self, i: usize) -> Option; -} - -impl BitAccess for Scalar { - fn get_bit(&self, i: usize) -> Option { - if i as u32 >= Scalar::NUM_BITS { - return None; - } - - let (byte_pos, bit_pos) = (i / 8, i % 8); - let byte = self.to_repr().as_ref()[byte_pos]; - let bit = byte >> bit_pos & 1; - Some(bit == 1) - } -} - -pub mod bignat; -pub mod util; diff --git a/src/gadgets/nonnative/util.rs b/src/gadgets/nonnative/util.rs deleted file mode 100644 index 1fcf366..0000000 --- a/src/gadgets/nonnative/util.rs +++ /dev/null @@ -1,293 +0,0 @@ -use std::{ - convert::From, - io::{self, Write}, -}; - -use bellpepper_core::{ - num::AllocatedNum, ConstraintSystem, LinearCombination, SynthesisError, Variable, -}; -use byteorder::WriteBytesExt; -use ff::PrimeField; -use num_bigint::{BigInt, Sign}; - -use super::{BitAccess, OptionExt}; - -#[derive(Clone)] -/// A representation of a bit -pub struct Bit { - /// The linear combination which constrain the value of the bit - pub bit: LinearCombination, - #[allow(unused)] - /// The value of the bit (filled at witness-time) - pub value: Option, -} - -#[derive(Clone)] -/// A representation of a bit-vector -pub struct Bitvector { - /// The linear combination which constrain the values of the bits - pub bits: Vec>, - /// The value of the bits (filled at witness-time) - pub values: Option>, - /// Allocated bit variables - pub allocations: Vec>, -} - -impl Bit { - /// Allocate a variable in the constraint system which can only be a - /// boolean value. - pub fn alloc>( - mut cs: CS, - value: Option, - ) -> Result { - let var = cs.alloc( - || "boolean", - || { - if *value.grab()? { - Ok(Scalar::ONE) - } else { - Ok(Scalar::ZERO) - } - }, - )?; - - // Constrain: (1 - a) * a = 0 - // This constrains a to be either 0 or 1. - cs.enforce( - || "boolean constraint", - |lc| lc + CS::one() - var, - |lc| lc + var, - |lc| lc, - ); - - Ok(Self { - bit: LinearCombination::zero() + var, - value, - }) - } -} - -pub struct Num { - pub num: LinearCombination, - pub value: Option, -} - -impl Num { - pub const fn new(value: Option, num: LinearCombination) -> Self { - Self { value, num } - } - pub fn alloc(mut cs: CS, value: F) -> Result - where - CS: ConstraintSystem, - F: FnOnce() -> Result, - { - let mut new_value = None; - let var = cs.alloc( - || "num", - || { - let tmp = value()?; - - new_value = Some(tmp); - - Ok(tmp) - }, - )?; - - Ok(Self { - value: new_value, - num: LinearCombination::zero() + var, - }) - } - - pub fn fits_in_bits>( - &self, - mut cs: CS, - n_bits: usize, - ) -> Result<(), SynthesisError> { - let v = self.value; - - // Allocate all but the first bit. - let bits: Vec = (1..n_bits) - .map(|i| { - cs.alloc( - || format!("bit {i}"), - || { - let r = if *v.grab()?.get_bit(i).grab()? { - Scalar::ONE - } else { - Scalar::ZERO - }; - Ok(r) - }, - ) - }) - .collect::>()?; - - for (i, v) in bits.iter().enumerate() { - cs.enforce( - || format!("{i} is bit"), - |lc| lc + *v, - |lc| lc + CS::one() - *v, - |lc| lc, - ) - } - - // Last bit - cs.enforce( - || "last bit", - |mut lc| { - let mut f = Scalar::ONE; - lc = lc + &self.num; - for v in bits.iter() { - f = f.double(); - lc = lc - (f, *v); - } - lc - }, - |mut lc| { - lc = lc + CS::one(); - let mut f = Scalar::ONE; - lc = lc - &self.num; - for v in bits.iter() { - f = f.double(); - lc = lc + (f, *v); - } - lc - }, - |lc| lc, - ); - Ok(()) - } - - /// Checks if the natural number equals an array of bits. - pub fn is_equal>(&self, mut cs: CS, other: &Bitvector) { - let mut f = Scalar::ONE; - let sum = other - .allocations - .iter() - .fold(LinearCombination::zero(), |lc, bit| { - let l = lc + (f, &bit.bit); - f = f.double(); - l - }); - let sum_lc = LinearCombination::zero() + &self.num - ∑ - cs.enforce(|| "sum", |lc| lc + &sum_lc, |lc| lc + CS::one(), |lc| lc); - } - - /// Compute the natural number represented by an array of limbs. - /// The limbs are assumed to be based the `limb_width` power of 2. - /// Low-index bits are low-order - pub fn decompose>( - &self, - mut cs: CS, - n_bits: usize, - ) -> Result, SynthesisError> { - let values: Option> = self.value.as_ref().map(|v| { - let num = *v; - (0..n_bits).map(|i| num.get_bit(i).unwrap()).collect() - }); - let allocations: Vec> = (0..n_bits) - .map(|bit_i| { - Bit::alloc( - cs.namespace(|| format!("bit{bit_i}")), - values.as_ref().map(|vs| vs[bit_i]), - ) - }) - .collect::, _>>()?; - let mut f = Scalar::ONE; - let sum = allocations - .iter() - .fold(LinearCombination::zero(), |lc, bit| { - let l = lc + (f, &bit.bit); - f = f.double(); - l - }); - let sum_lc = LinearCombination::zero() + &self.num - ∑ - cs.enforce(|| "sum", |lc| lc + &sum_lc, |lc| lc + CS::one(), |lc| lc); - let bits: Vec> = allocations - .iter() - .map(|a| LinearCombination::zero() + &a.bit) - .collect(); - Ok(Bitvector { - allocations, - values, - bits, - }) - } - - pub fn as_allocated_num>( - &self, - mut cs: CS, - ) -> Result, SynthesisError> { - let new = AllocatedNum::alloc(cs.namespace(|| "alloc"), || Ok(*self.value.grab()?))?; - cs.enforce( - || "eq", - |lc| lc, - |lc| lc, - |lc| lc + new.get_variable() - &self.num, - ); - Ok(new) - } -} - -impl From> for Num { - fn from(a: AllocatedNum) -> Self { - Self::new(a.get_value(), LinearCombination::zero() + a.get_variable()) - } -} - -fn write_be(f: &F, mut writer: W) -> io::Result<()> { - for digit in f.to_repr().as_ref().iter().rev() { - writer.write_u8(*digit)?; - } - - Ok(()) -} - -/// Convert a field element to a natural number -pub fn f_to_nat(f: &Scalar) -> BigInt { - let mut s = Vec::new(); - write_be(f, &mut s).unwrap(); - BigInt::from_bytes_le(Sign::Plus, f.to_repr().as_ref()) -} - -/// Convert a natural number to a field element. -/// Returns `None` if the number is too big for the field. -pub fn nat_to_f(n: &BigInt) -> Option { - Scalar::from_str_vartime(&format!("{n}")) -} - -#[cfg(test)] -mod tests { - use bitvec::field::BitField as _; - use ff::PrimeFieldBits; - use rand::SeedableRng; - use rand_chacha::ChaCha20Rng; - - // the write_be function above assumes Field::to_repr() outputs a representation - // that's an instance of `AsRef<[u8]>` in lower endian. We test that here, - // as this is not what the I2OSP standard recommends and may change in some - // implementations. - fn test_repr_is_le_with() { - let mut rng = ChaCha20Rng::from_seed([0u8; 32]); - for _i in 0..50 { - let f = F::random(&mut rng); - // This is guaranteed to be in LE - let le_bits = f.to_le_bits(); - let leftmost_u64 = le_bits[..64].load_le::(); - - // This is not - let f_repr = f.to_repr(); - let bytes: [u8; 8] = f_repr.as_ref()[..8].try_into().unwrap(); - let u64_from_repr = u64::from_le_bytes(bytes); - - assert_eq!(leftmost_u64, u64_from_repr); - } - } - - #[test] - fn test_repr_is_le() { - test_repr_is_le_with::(); - test_repr_is_le_with::(); - } -} diff --git a/src/gadgets/r1cs.rs b/src/gadgets/r1cs.rs deleted file mode 100644 index 1f5eb9d..0000000 --- a/src/gadgets/r1cs.rs +++ /dev/null @@ -1,427 +0,0 @@ -//! This module implements various gadgets necessary for folding R1CS types. -use bellpepper::gadgets::{ - boolean::Boolean, boolean_utils::conditionally_select, num::AllocatedNum, Assignment, -}; -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use ff::Field; -use itertools::Itertools as _; - -use super::nonnative::{ - bignat::BigNat, - util::{f_to_nat, Num}, -}; -use crate::{ - constants::{NUM_CHALLENGE_BITS, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD}, - gadgets::{ - ecc::AllocatedPoint, - utils::{ - alloc_bignat_constant, alloc_one, alloc_scalar_as_base, conditionally_select_bignat, - le_bits_to_num, - }, - }, - r1cs::{R1CSInstance, RelaxedR1CSInstance}, - traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, -}; - -/// An Allocated R1CS Instance -#[derive(Clone)] -pub struct AllocatedR1CSInstance { - pub(crate) W: AllocatedPoint, - pub(crate) X: [AllocatedNum; N], -} - -impl AllocatedR1CSInstance { - /// Takes the r1cs instance and creates a new allocated r1cs instance - pub fn alloc::Base>>( - mut cs: CS, - u: Option<&R1CSInstance>, - ) -> Result { - let W = AllocatedPoint::alloc( - cs.namespace(|| "allocate W"), - u.map(|u| u.comm_W.to_coordinates()), - )?; - W.check_on_curve(cs.namespace(|| "check W on curve"))?; - - let X: [AllocatedNum; N] = (0..N) - .map(|idx| { - alloc_scalar_as_base::( - cs.namespace(|| format!("allocating X[{idx}]")), - u.map(|u| u.X[idx]), - ) - }) - .collect::, _>>()? - .try_into() - .map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - Ok(Self { W, X }) - } - - /// Absorb the provided instance in the RO - pub fn absorb_in_ro(&self, ro: &mut E::ROCircuit) { - ro.absorb(&self.W.x); - ro.absorb(&self.W.y); - ro.absorb(&self.W.is_infinity); - self.X.iter().for_each(|x| ro.absorb(x)); - } -} - -/// An Allocated Relaxed R1CS Instance -#[derive(Clone)] -pub struct AllocatedRelaxedR1CSInstance { - pub(crate) W: AllocatedPoint, - pub(crate) E: AllocatedPoint, - pub(crate) u: AllocatedNum, - pub(crate) X: [BigNat; N], -} - -impl AllocatedRelaxedR1CSInstance { - /// Allocates the given `RelaxedR1CSInstance` as a witness of the circuit - pub fn alloc::Base>>( - mut cs: CS, - inst: Option<&RelaxedR1CSInstance>, - limb_width: usize, - n_limbs: usize, - ) -> Result { - // We do not need to check that W or E are well-formed (e.g., on the curve) as - // we do a hash check in the Nova augmented circuit, which ensures that - // the relaxed instance came from a prior iteration of Nova. - let W = AllocatedPoint::alloc( - cs.namespace(|| "allocate W"), - inst.map(|inst| inst.comm_W.to_coordinates()), - )?; - - let E = AllocatedPoint::alloc( - cs.namespace(|| "allocate E"), - inst.map(|inst| inst.comm_E.to_coordinates()), - )?; - - // u << |E::Base| despite the fact that u is a scalar. - // So we parse all of its bytes as a E::Base element - let u = - alloc_scalar_as_base::(cs.namespace(|| "allocate u"), inst.map(|inst| inst.u))?; - - // Allocate X. If the input instance is None then allocate components as zero. - let X = (0..N) - .map(|idx| { - BigNat::alloc_from_nat( - cs.namespace(|| format!("allocate X[{idx}]")), - || Ok(f_to_nat(&inst.map_or(E::Scalar::ZERO, |inst| inst.X[idx]))), - limb_width, - n_limbs, - ) - }) - .collect::, _>>()? - .try_into() - .map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - Ok(Self { W, E, u, X }) - } - - /// Allocates the hardcoded default `RelaxedR1CSInstance` in the circuit. - /// W = E = 0, u = 0, X0 = X1 = 0 - pub fn default::Base>>( - mut cs: CS, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let W = AllocatedPoint::default(cs.namespace(|| "allocate W")); - let E = W.clone(); - - let u = W.x.clone(); // In the default case, W.x = u = 0 - - // X is allocated and in the honest prover case set to zero - // If the prover is malicious, it can set to arbitrary values, but the resulting - // relaxed R1CS instance with the the checked default values of W, E, and u must - // still be satisfying - - let X = (0..N) - .map(|idx| { - BigNat::alloc_from_nat( - cs.namespace(|| format!("allocate X_default[{idx}]")), - || Ok(f_to_nat(&E::Scalar::ZERO)), - limb_width, - n_limbs, - ) - }) - .collect::, _>>()? - .try_into() - .map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - Ok(Self { W, E, u, X }) - } - - /// Allocates the R1CS Instance as a `RelaxedR1CSInstance` in the circuit. - /// E = 0, u = 1 - pub fn from_r1cs_instance::Base>>( - mut cs: CS, - inst: AllocatedR1CSInstance, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let E = AllocatedPoint::default(cs.namespace(|| "allocate default E")); - - let u = alloc_one(cs.namespace(|| "one")); - - let X = inst - .X - .into_iter() - .enumerate() - .map(|(idx, x)| { - BigNat::from_num( - cs.namespace(|| format!("allocate X[{idx}] from relaxed r1cs")), - &Num::from(x), - limb_width, - n_limbs, - ) - }) - .collect::, _>>()? - .try_into() - .map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - Ok(Self { W: inst.W, E, u, X }) - } - - /// Absorb the provided instance in the RO - pub fn absorb_in_ro::Base>>( - &self, - mut cs: CS, - ro: &mut E::ROCircuit, - ) -> Result<(), SynthesisError> { - ro.absorb(&self.W.x); - ro.absorb(&self.W.y); - ro.absorb(&self.W.is_infinity); - ro.absorb(&self.E.x); - ro.absorb(&self.E.y); - ro.absorb(&self.E.is_infinity); - ro.absorb(&self.u); - - self.X.iter().enumerate().try_for_each(|(idx, X)| { - X.as_limbs().iter().enumerate().try_for_each( - |(i, limb)| -> Result<(), SynthesisError> { - ro.absorb(&limb.as_allocated_num( - cs.namespace(|| format!("convert limb {i} of X_r[{idx}] to num")), - )?); - Ok(()) - }, - ) - })?; - - Ok(()) - } - - /// Folds self with a relaxed r1cs instance and returns the result - pub fn fold_with_r1cs::Base>>( - &self, - mut cs: CS, - params: &AllocatedNum, // hash of R1CSShape of F' - u: &AllocatedR1CSInstance, - T: &AllocatedPoint, - ro_consts: ROConstantsCircuit, - limb_width: usize, - n_limbs: usize, - ) -> Result { - // Compute r: - let mut ro = E::ROCircuit::new(ro_consts, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD + N); - ro.absorb(params); - - // running instance `U` does not need to absorbed since u.X[0] = Hash(params, U, - // i, z0, zi) - u.absorb_in_ro(&mut ro); - - ro.absorb(&T.x); - ro.absorb(&T.y); - ro.absorb(&T.is_infinity); - let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; - let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; - - // W_fold = self.W + r * u.W - let rW = u.W.scalar_mul(cs.namespace(|| "r * u.W"), &r_bits)?; - let W_fold = self.W.add(cs.namespace(|| "self.W + r * u.W"), &rW)?; - - // E_fold = self.E + r * T - let rT = T.scalar_mul(cs.namespace(|| "r * T"), &r_bits)?; - let E_fold = self.E.add(cs.namespace(|| "self.E + r * T"), &rT)?; - - // u_fold = u_r + r - let u_fold = AllocatedNum::alloc(cs.namespace(|| "u_fold"), || { - Ok(*self.u.get_value().get()? + r.get_value().get()?) - })?; - cs.enforce( - || "Check u_fold", - |lc| lc, - |lc| lc, - |lc| lc + u_fold.get_variable() - self.u.get_variable() - r.get_variable(), - ); - - // Fold the IO: - // Analyze r into limbs - let r_bn = BigNat::from_num( - cs.namespace(|| "allocate r_bn"), - &Num::from(r), - limb_width, - n_limbs, - )?; - - // Allocate the order of the non-native field as a constant - let m_bn = alloc_bignat_constant( - cs.namespace(|| "alloc m"), - &E::GE::group_params().2, - limb_width, - n_limbs, - )?; - - let mut X_fold = vec![]; - - for (idx, (X, x)) in self.X.iter().zip_eq(u.X.iter()).enumerate() { - let x_bn = BigNat::from_num( - cs.namespace(|| format!("allocate u.X[{idx}]_bn")), - &Num::from(x.clone()), - limb_width, - n_limbs, - )?; - - let (_, r) = x_bn.mult_mod(cs.namespace(|| format!("r*u.X[{idx}]")), &r_bn, &m_bn)?; - let r_new = X.add(&r)?; - let X_i_fold = - r_new.red_mod(cs.namespace(|| format!("reduce folded X[{idx}]")), &m_bn)?; - X_fold.push(X_i_fold); - } - - let X_fold = X_fold.try_into().map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - Ok(Self { - W: W_fold, - E: E_fold, - u: u_fold, - X: X_fold, - }) - } - - /// If the condition is true then returns this otherwise it returns the - /// other - pub fn conditionally_select::Base>>( - &self, - cs: CS, - other: &Self, - condition: &Boolean, - ) -> Result { - conditionally_select_alloc_relaxed_r1cs(cs, self, other, condition) - } -} - -/// c = cond ? a: b, where a, b: `AllocatedRelaxedR1CSInstance` -pub fn conditionally_select_alloc_relaxed_r1cs< - E: Engine, - CS: ConstraintSystem<::Base>, - const N: usize, ->( - mut cs: CS, - a: &AllocatedRelaxedR1CSInstance, - b: &AllocatedRelaxedR1CSInstance, - condition: &Boolean, -) -> Result, SynthesisError> { - let c_X = - a.X.iter() - .zip_eq(b.X.iter()) - .enumerate() - .map(|(idx, (a, b))| { - conditionally_select_bignat( - cs.namespace(|| format!("X[{idx}] = cond ? a.X[{idx}] : b.X[{idx}]")), - a, - b, - condition, - ) - }) - .collect::, _>>()?; - - let c_X = c_X.try_into().map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - let c = AllocatedRelaxedR1CSInstance { - W: conditionally_select_point( - cs.namespace(|| "W = cond ? a.W : b.W"), - &a.W, - &b.W, - condition, - )?, - E: conditionally_select_point( - cs.namespace(|| "E = cond ? a.E : b.E"), - &a.E, - &b.E, - condition, - )?, - u: conditionally_select( - cs.namespace(|| "u = cond ? a.u : b.u"), - &a.u, - &b.u, - condition, - )?, - X: c_X, - }; - Ok(c) -} - -/// c = cond ? a: b, where a, b: `Vec` -pub fn conditionally_select_vec_allocated_relaxed_r1cs_instance< - E: Engine, - CS: ConstraintSystem<::Base>, - const N: usize, ->( - mut cs: CS, - a: &[AllocatedRelaxedR1CSInstance], - b: &[AllocatedRelaxedR1CSInstance], - condition: &Boolean, -) -> Result>, SynthesisError> { - a.iter() - .enumerate() - .zip_eq(b.iter()) - .map(|((i, a), b)| { - a.conditionally_select( - cs.namespace(|| format!("cond ? a[{}]: b[{}]", i, i)), - b, - condition, - ) - }) - .collect::>, _>>() -} - -/// c = cond ? a: b, where a, b: `AllocatedPoint` -pub fn conditionally_select_point>( - mut cs: CS, - a: &AllocatedPoint, - b: &AllocatedPoint, - condition: &Boolean, -) -> Result, SynthesisError> { - let c = AllocatedPoint { - x: conditionally_select( - cs.namespace(|| "x = cond ? a.x : b.x"), - &a.x, - &b.x, - condition, - )?, - y: conditionally_select( - cs.namespace(|| "y = cond ? a.y : b.y"), - &a.y, - &b.y, - condition, - )?, - is_infinity: conditionally_select( - cs.namespace(|| "is_infinity = cond ? a.is_infinity : b.is_infinity"), - &a.is_infinity, - &b.is_infinity, - condition, - )?, - }; - Ok(c) -} diff --git a/src/gadgets/utils.rs b/src/gadgets/utils.rs deleted file mode 100644 index 9eb770e..0000000 --- a/src/gadgets/utils.rs +++ /dev/null @@ -1,404 +0,0 @@ -//! This module implements various low-level gadgets -use bellpepper::gadgets::Assignment; -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, LinearCombination, SynthesisError, -}; -use ff::{Field, PrimeField, PrimeFieldBits}; -use num_bigint::BigInt; - -use super::nonnative::bignat::{nat_to_limbs, BigNat}; -use crate::traits::Engine; - -/// Gets as input the little indian representation of a number and spits out the -/// number -pub fn le_bits_to_num( - mut cs: CS, - bits: &[AllocatedBit], -) -> Result, SynthesisError> -where - Scalar: PrimeField + PrimeFieldBits, - CS: ConstraintSystem, -{ - // We loop over the input bits and construct the constraint - // and the field element that corresponds to the result - let mut lc = LinearCombination::zero(); - let mut coeff = Scalar::ONE; - let mut fe = Some(Scalar::ZERO); - for bit in bits.iter() { - lc = lc + (coeff, bit.get_variable()); - fe = bit.get_value().map(|val| { - if val { - fe.unwrap() + coeff - } else { - fe.unwrap() - } - }); - coeff = coeff.double(); - } - let num = AllocatedNum::alloc(cs.namespace(|| "Field element"), || { - fe.ok_or(SynthesisError::AssignmentMissing) - })?; - lc = lc - num.get_variable(); - cs.enforce(|| "compute number from bits", |lc| lc, |lc| lc, |_| lc); - Ok(num) -} - -/// Allocate a variable that is set to zero -pub fn alloc_zero>(mut cs: CS) -> AllocatedNum { - let zero = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc"), || F::ZERO); - cs.enforce( - || "check zero is valid", - |lc| lc, - |lc| lc, - |lc| lc + zero.get_variable(), - ); - zero -} - -/// Allocate a variable that is set to one -pub fn alloc_one>(mut cs: CS) -> AllocatedNum { - let one = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc"), || F::ONE); - cs.enforce( - || "check one is valid", - |lc| lc + CS::one(), - |lc| lc + CS::one(), - |lc| lc + one.get_variable(), - ); - - one -} - -/// Allocate a scalar as a base. Only to be used is the scalar fits in base! -pub fn alloc_scalar_as_base( - mut cs: CS, - input: Option, -) -> Result, SynthesisError> -where - E: Engine, - CS: ConstraintSystem<::Base>, -{ - AllocatedNum::alloc(cs.namespace(|| "allocate scalar as base"), || { - let val = scalar_as_base::(input.unwrap_or(E::Scalar::ZERO)); - Ok(val) - }) -} - -/// interpret scalar as base -pub fn scalar_as_base(input: E::Scalar) -> E::Base { - let input_bits = input.to_le_bits(); - let mut mult = E::Base::ONE; - let mut val = E::Base::ZERO; - for bit in input_bits { - if bit { - val += mult; - } - mult = mult + mult; - } - val -} - -/// Allocate bignat a constant -pub fn alloc_bignat_constant>( - mut cs: CS, - val: &BigInt, - limb_width: usize, - n_limbs: usize, -) -> Result, SynthesisError> { - let limbs = nat_to_limbs(val, limb_width, n_limbs).unwrap(); - let bignat = BigNat::alloc_from_limbs( - cs.namespace(|| "alloc bignat"), - || Ok(limbs.clone()), - None, - limb_width, - n_limbs, - )?; - // Now enforce that the limbs are all equal to the constants - (0..n_limbs).for_each(|i| { - cs.enforce( - || format!("check limb {i}"), - |lc| lc + &bignat.limbs[i], - |lc| lc + CS::one(), - |lc| lc + (limbs[i], CS::one()), - ); - }); - Ok(bignat) -} - -/// Check that two numbers are equal and return a bit -pub fn alloc_num_equals>( - mut cs: CS, - a: &AllocatedNum, - b: &AllocatedNum, -) -> Result { - // Allocate and constrain `r`: result boolean bit. - // It equals `true` if `a` equals `b`, `false` otherwise - let r_value = match (a.get_value(), b.get_value()) { - (Some(a), Some(b)) => Some(a == b), - _ => None, - }; - - let r = AllocatedBit::alloc(cs.namespace(|| "r"), r_value)?; - - // Allocate t s.t. t=1 if a == b else 1/(a - b) - - let t = AllocatedNum::alloc(cs.namespace(|| "t"), || { - let a_val = *a.get_value().get()?; - let b_val = *b.get_value().get()?; - Ok(if a_val == b_val { - F::ONE - } else { - (a_val - b_val).invert().unwrap() - }) - })?; - - cs.enforce( - || "t*(a - b) = 1 - r", - |lc| lc + t.get_variable(), - |lc| lc + a.get_variable() - b.get_variable(), - |lc| lc + CS::one() - r.get_variable(), - ); - - cs.enforce( - || "r*(a - b) = 0", - |lc| lc + r.get_variable(), - |lc| lc + a.get_variable() - b.get_variable(), - |lc| lc, - ); - - Ok(r) -} - -// TODO: Figure out if this can be done better -pub fn conditionally_select_allocated_bit>( - mut cs: CS, - a: &AllocatedBit, - b: &AllocatedBit, - condition: &Boolean, -) -> Result { - let c = AllocatedBit::alloc( - cs.namespace(|| "conditionally select result"), - if condition.get_value().unwrap_or(false) { - a.get_value() - } else { - b.get_value() - }, - )?; - - // a * condition + b*(1-condition) = c -> - // a * condition - b*condition = c - b - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable() - b.get_variable(), - |_| condition.lc(CS::one(), F::ONE), - |lc| lc + c.get_variable() - b.get_variable(), - ); - - Ok(c) -} -/// If condition return a otherwise b where a and b are `BigNats` -pub fn conditionally_select_bignat>( - mut cs: CS, - a: &BigNat, - b: &BigNat, - condition: &Boolean, -) -> Result, SynthesisError> { - assert!(a.limbs.len() == b.limbs.len()); - let c = BigNat::alloc_from_nat( - cs.namespace(|| "conditional select result"), - || { - if *condition.get_value().get()? { - Ok(a.value.get()?.clone()) - } else { - Ok(b.value.get()?.clone()) - } - }, - a.params.limb_width, - a.params.n_limbs, - )?; - - // a * condition + b*(1-condition) = c -> - // a * condition - b*condition = c - b - for i in 0..c.limbs.len() { - cs.enforce( - || format!("conditional select constraint {i}"), - |lc| lc + &a.limbs[i] - &b.limbs[i], - |_| condition.lc(CS::one(), F::ONE), - |lc| lc + &c.limbs[i] - &b.limbs[i], - ); - } - Ok(c) -} - -/// Same as the above but Condition is an `AllocatedNum` that needs to be -/// 0 or 1. 1 => True, 0 => False -pub fn conditionally_select2>( - mut cs: CS, - a: &AllocatedNum, - b: &AllocatedNum, - condition: &AllocatedNum, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? == F::ONE { - Ok(*a.get_value().get()?) - } else { - Ok(*b.get_value().get()?) - } - })?; - - // a * condition + b*(1-condition) = c -> - // a * condition - b*condition = c - b - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable() - b.get_variable(), - |lc| lc + condition.get_variable(), - |lc| lc + c.get_variable() - b.get_variable(), - ); - - Ok(c) -} - -/// If condition set to 0 otherwise a. Condition is an allocated num -pub fn select_zero_or_num2>( - mut cs: CS, - a: &AllocatedNum, - condition: &AllocatedNum, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? == F::ONE { - Ok(F::ZERO) - } else { - Ok(*a.get_value().get()?) - } - })?; - - // a * (1 - condition) = c - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable(), - |lc| lc + CS::one() - condition.get_variable(), - |lc| lc + c.get_variable(), - ); - - Ok(c) -} - -/// If condition set to a otherwise 0. Condition is an allocated num -pub fn select_num_or_zero2>( - mut cs: CS, - a: &AllocatedNum, - condition: &AllocatedNum, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? == F::ONE { - Ok(*a.get_value().get()?) - } else { - Ok(F::ZERO) - } - })?; - - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable(), - |lc| lc + condition.get_variable(), - |lc| lc + c.get_variable(), - ); - - Ok(c) -} - -/// If condition set to a otherwise 0 -pub fn select_num_or_zero>( - mut cs: CS, - a: &AllocatedNum, - condition: &Boolean, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? { - Ok(*a.get_value().get()?) - } else { - Ok(F::ZERO) - } - })?; - - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable(), - |_| condition.lc(CS::one(), F::ONE), - |lc| lc + c.get_variable(), - ); - - Ok(c) -} - -/// If condition set to 1 otherwise a -pub fn select_one_or_num2>( - mut cs: CS, - a: &AllocatedNum, - condition: &AllocatedNum, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? == F::ONE { - Ok(F::ONE) - } else { - Ok(*a.get_value().get()?) - } - })?; - - cs.enforce( - || "conditional select constraint", - |lc| lc + CS::one() - a.get_variable(), - |lc| lc + condition.get_variable(), - |lc| lc + c.get_variable() - a.get_variable(), - ); - Ok(c) -} - -/// If condition set to 1 otherwise a - b -pub fn select_one_or_diff2>( - mut cs: CS, - a: &AllocatedNum, - b: &AllocatedNum, - condition: &AllocatedNum, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? == F::ONE { - Ok(F::ONE) - } else { - Ok(*a.get_value().get()? - *b.get_value().get()?) - } - })?; - - cs.enforce( - || "conditional select constraint", - |lc| lc + CS::one() - a.get_variable() + b.get_variable(), - |lc| lc + condition.get_variable(), - |lc| lc + c.get_variable() - a.get_variable() + b.get_variable(), - ); - Ok(c) -} - -/// If condition set to a otherwise 1 for boolean conditions -pub fn select_num_or_one>( - mut cs: CS, - a: &AllocatedNum, - condition: &Boolean, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? { - Ok(*a.get_value().get()?) - } else { - Ok(F::ONE) - } - })?; - - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable() - CS::one(), - |_| condition.lc(CS::one(), F::ONE), - |lc| lc + c.get_variable() - CS::one(), - ); - - Ok(c) -} diff --git a/src/nifs.rs b/src/nifs.rs deleted file mode 100644 index 7b8c387..0000000 --- a/src/nifs.rs +++ /dev/null @@ -1,414 +0,0 @@ -//! This module implements a non-interactive folding scheme -#![allow(non_snake_case)] - -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::{NUM_CHALLENGE_BITS, NUM_FE_FOR_RO, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD}, - errors::NovaError, - r1cs::{ - R1CSInstance, R1CSResult, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness, - }, - scalar_as_base, - traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROConstants, ROTrait}, - Commitment, CommitmentKey, CompressedCommitment, -}; - -/// A SNARK that holds the proof of a step of an incremental computation -#[allow(clippy::upper_case_acronyms)] -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct NIFS { - pub(crate) comm_T: CompressedCommitment, -} - -impl NIFS { - /// Takes as input a Relaxed R1CS instance-witness tuple `(U1, W1)` and - /// an R1CS instance-witness tuple `(U2, W2)` with the same structure - /// `shape` and defined with respect to the same `ck`, and outputs - /// a folded Relaxed R1CS instance-witness tuple `(U, W)` of the same shape - /// `shape`, with the guarantee that the folded witness `W` satisfies - /// the folded instance `U` if and only if `W1` satisfies `U1` and `W2` - /// satisfies `U2`. - /// - /// Note that this code is tailored for use with Nova's IVC scheme, which - /// enforces certain requirements between the two instances that are - /// folded. In particular, it requires that `U1` and `U2` are such that - /// the hash of `U1` is stored in the public IO of `U2`. - /// In this particular setting, this means that if `U2` is absorbed in the - /// RO, it implicitly absorbs `U1` as well. So the code below avoids - /// absorbing `U1` in the RO. - #[allow(clippy::too_many_arguments)] - #[tracing::instrument(skip_all, level = "trace", name = "NIFS::prove")] - pub fn prove( - ck: &CommitmentKey, - ro_consts: &ROConstants, - pp_digest: &E::Scalar, - S: &R1CSShape, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) -> Result< - ( - Self, - (RelaxedR1CSInstance, RelaxedR1CSWitness), - E::Scalar, - ), - NovaError, - > { - // Check `U1` and `U2` have the same arity - let io_arity = U1.X.len(); - if io_arity != U2.X.len() { - return Err(NovaError::InvalidInputLength); - } - - // initialize a new RO - let mut ro = E::RO::new( - ro_consts.clone(), - NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD + io_arity, - ); - - // append the digest of pp to the transcript - ro.absorb(scalar_as_base::(*pp_digest)); - - // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = - // Hash(params, U1, i, z0, zi) - U2.absorb_in_ro(&mut ro); - - // compute a commitment to the cross-term - let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; - - // append `comm_T` to the transcript and obtain a challenge - comm_T.absorb_in_ro(&mut ro); - - // compute a challenge from the RO - let r = ro.squeeze(NUM_CHALLENGE_BITS); - - // fold the instance using `r` and `comm_T` - let U = U1.fold(U2, &comm_T, &r); - - // fold the witness using `r` and `T` - let W = W1.fold(W2, &T, &r)?; - - // return the folded instance and witness - Ok(( - Self { - comm_T: comm_T.compress(), - }, - (U, W), - r, - )) - } - - /// Takes as input a Relaxed R1CS instance-witness tuple `(U1, W1)` and - /// an R1CS instance-witness tuple `(U2, W2)` with the same structure - /// `shape` and defined with respect to the same `ck`, and updates `(U1, - /// W1)` by folding `(U2, W2)` into it with the guarantee that the - /// updated witness `W` satisfies the updated instance `U` if and only - /// if `W1` satisfies `U1` and `W2` satisfies `U2`. - #[allow(clippy::too_many_arguments)] - #[tracing::instrument(skip_all, level = "trace", name = "NIFS::prove_mut")] - pub fn prove_mut( - ck: &CommitmentKey, - ro_consts: &ROConstants, - pp_digest: &E::Scalar, - S: &R1CSShape, - U1: &mut RelaxedR1CSInstance, - W1: &mut RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - T: &mut Vec, - ABC_Z_1: &mut R1CSResult, - ABC_Z_2: &mut R1CSResult, - ) -> Result<(Self, E::Scalar), NovaError> { - // initialize a new RO - let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); - - // append the digest of pp to the transcript - ro.absorb(scalar_as_base::(*pp_digest)); - - // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = - // Hash(params, U1, i, z0, zi) - U2.absorb_in_ro(&mut ro); - - // compute a commitment to the cross-term - let comm_T = S.commit_T_into(ck, U1, W1, U2, W2, T, ABC_Z_1, ABC_Z_2)?; - - // append `comm_T` to the transcript and obtain a challenge - comm_T.absorb_in_ro(&mut ro); - - // compute a challenge from the RO - let r = ro.squeeze(NUM_CHALLENGE_BITS); - - // fold the instance using `r` and `comm_T` - U1.fold_mut(U2, &comm_T, &r); - - // fold the witness using `r` and `T` - W1.fold_mut(W2, T, &r)?; - - // return the commitment - Ok(( - Self { - comm_T: comm_T.compress(), - }, - r, - )) - } - - /// Takes as input a relaxed R1CS instance `U1` and R1CS instance `U2` - /// with the same shape and defined with respect to the same parameters, - /// and outputs a folded instance `U` with the same shape, - /// with the guarantee that the folded instance `U` - /// if and only if `U1` and `U2` are satisfiable. - pub fn verify( - &self, - ro_consts: &ROConstants, - pp_digest: &E::Scalar, - U1: &RelaxedR1CSInstance, - U2: &R1CSInstance, - ) -> Result, NovaError> { - // initialize a new RO - let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); - - // append the digest of pp to the transcript - ro.absorb(scalar_as_base::(*pp_digest)); - - // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = - // Hash(params, U1, i, z0, zi) - U2.absorb_in_ro(&mut ro); - - // append `comm_T` to the transcript and obtain a challenge - let comm_T = Commitment::::decompress(&self.comm_T)?; - comm_T.absorb_in_ro(&mut ro); - - // compute a challenge from the RO - let r = ro.squeeze(NUM_CHALLENGE_BITS); - - // fold the instance using `r` and `comm_T` - let U = U1.fold(U2, &comm_T, &r); - - // return the folded instance - Ok(U) - } -} - -#[cfg(test)] -mod tests { - use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; - use ff::{Field, PrimeField}; - use rand::rngs::OsRng; - - use super::*; - use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - solver::SatisfyingAssignment, - test_shape_cs::TestShapeCS, - }, - provider::Bn256EngineKZG, - r1cs::commitment_key, - traits::{snark::default_ck_hint, Engine}, - }; - - fn synthesize_tiny_r1cs_bellpepper>( - cs: &mut CS, - x_val: Option, - ) -> Result<(), SynthesisError> { - // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are - // respectively the input and output. - let x = AllocatedNum::alloc_infallible(cs.namespace(|| "x"), || x_val.unwrap()); - let _ = x.inputize(cs.namespace(|| "x is input")); - - let x_sq = x.square(cs.namespace(|| "x_sq"))?; - let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), &x)?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + Scalar::from(5u64)) - })?; - let _ = y.inputize(cs.namespace(|| "y is output")); - - cs.enforce( - || "y = x^3 + x + 5", - |lc| { - lc + x_cu.get_variable() - + x.get_variable() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - }, - |lc| lc + CS::one(), - |lc| lc + y.get_variable(), - ); - - Ok(()) - } - - fn test_tiny_r1cs_bellpepper_with() { - // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, None); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - let ro_consts = <::RO as ROTrait< - ::Base, - ::Scalar, - >>::Constants::default(); - - // Now get the instance and assignment for one instance - let mut cs = SatisfyingAssignment::::new(); - let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(E::Scalar::from(5))); - let (U1, W1) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - - // Make sure that the first instance is satisfiable - shape.is_sat(&ck, &U1, &W1).unwrap(); - - // Now get the instance and assignment for second instance - let mut cs = SatisfyingAssignment::::new(); - let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(E::Scalar::from(135))); - let (U2, W2) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - - // Make sure that the second instance is satisfiable - shape.is_sat(&ck, &U2, &W2).unwrap(); - - // execute a sequence of folds - execute_sequence( - &ck, - &ro_consts, - &::Scalar::ZERO, - &shape, - &U1, - &W1, - &U2, - &W2, - ); - } - - #[test] - fn test_tiny_r1cs_bellpepper() { - test_tiny_r1cs_bellpepper_with::(); - } - - fn execute_sequence( - ck: &CommitmentKey, - ro_consts: &<::RO as ROTrait<::Base, ::Scalar>>::Constants, - pp_digest: &::Scalar, - shape: &R1CSShape, - U1: &R1CSInstance, - W1: &R1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) { - // produce a default running instance - let mut r_W = RelaxedR1CSWitness::default(shape); - let mut r_U = RelaxedR1CSInstance::default(ck, shape); - - // produce a step SNARK with (W1, U1) as the first incoming witness-instance - // pair - let res = NIFS::prove(ck, ro_consts, pp_digest, shape, &r_U, &r_W, U1, W1); - assert!(res.is_ok()); - let (nifs, (_U, W), _) = res.unwrap(); - - // verify the step SNARK with U1 as the first incoming instance - let res = nifs.verify(ro_consts, pp_digest, &r_U, U1); - assert!(res.is_ok()); - let U = res.unwrap(); - - assert_eq!(U, _U); - - // update the running witness and instance - r_W = W; - r_U = U; - - // produce a step SNARK with (W2, U2) as the second incoming witness-instance - // pair - let res = NIFS::prove(ck, ro_consts, pp_digest, shape, &r_U, &r_W, U2, W2); - assert!(res.is_ok()); - let (nifs, (_U, W), _) = res.unwrap(); - - // verify the step SNARK with U1 as the first incoming instance - let res = nifs.verify(ro_consts, pp_digest, &r_U, U2); - assert!(res.is_ok()); - let U = res.unwrap(); - - assert_eq!(U, _U); - - // update the running witness and instance - r_W = W; - r_U = U; - - // check if the running instance is satisfiable - shape.is_sat_relaxed(ck, &r_U, &r_W).unwrap(); - } - - fn test_tiny_r1cs_with() { - let num_vars = 3; - let S = crate::r1cs::tests::tiny_r1cs::(num_vars); - let one = ::ONE; - - // generate generators and ro constants - let ck = commitment_key(&S, &*default_ck_hint()); - let ro_consts = <::RO as ROTrait< - ::Base, - ::Scalar, - >>::Constants::default(); - - let rand_inst_witness_generator = |ck: &CommitmentKey, - I: &E::Scalar| - -> (E::Scalar, R1CSInstance, R1CSWitness) { - let i0 = *I; - - // compute a satisfying (vars, X) tuple - let (O, vars, X) = { - let z0 = i0 * i0; // constraint 0 - let z1 = i0 * z0; // constraint 1 - let z2 = z1 + i0; // constraint 2 - let i1 = z2 + one + one + one + one + one; // constraint 3 - - // store the witness and IO for the instance - let W = vec![z0, z1, z2]; - let X = vec![i0, i1]; - (i1, W, X) - }; - - let W = { - let res = R1CSWitness::new(&S, vars); - assert!(res.is_ok()); - res.unwrap() - }; - let U = { - let comm_W = W.commit(ck); - let res = R1CSInstance::new(&S, comm_W, X); - assert!(res.is_ok()); - res.unwrap() - }; - - // check that generated instance is satisfiable - S.is_sat(ck, &U, &W).unwrap(); - - (O, U, W) - }; - - let mut csprng: OsRng = OsRng; - let I = E::Scalar::random(&mut csprng); // the first input is picked randomly for the first instance - let (O, U1, W1) = rand_inst_witness_generator(&ck, &I); - let (_O, U2, W2) = rand_inst_witness_generator(&ck, &O); - - // execute a sequence of folds - execute_sequence( - &ck, - &ro_consts, - &::Scalar::ZERO, - &S, - &U1, - &W1, - &U2, - &W2, - ); - } - - #[test] - fn test_tiny_r1cs() { - test_tiny_r1cs_with::(); - } -} diff --git a/src/provider/bn256_grumpkin.rs b/src/provider/bn256_grumpkin.rs deleted file mode 100644 index e7b5a5f..0000000 --- a/src/provider/bn256_grumpkin.rs +++ /dev/null @@ -1,113 +0,0 @@ -//! This module implements the Nova traits for `bn256::Point`, `bn256::Scalar`, -//! `grumpkin::Point`, `grumpkin::Scalar`. -use std::io::Read; - -use digest::{ExtendableOutput, Update}; -use ff::{FromUniformBytes, PrimeField}; -use group::{cofactor::CofactorCurveAffine, Curve, Group as AnotherGroup}; -#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] -use grumpkin_msm::{bn256 as bn256_msm, grumpkin as grumpkin_msm}; -// Remove this when https://github.com/zcash/pasta_curves/issues/41 resolves -use halo2curves::{bn256::G2Affine, CurveAffine, CurveExt}; -use num_bigint::BigInt; -use num_traits::Num; -use rayon::prelude::*; -use sha3::Shake256; - -use crate::{ - impl_traits, - provider::{traits::DlogGroup, util::msm::cpu_best_msm}, - traits::{Group, PrimeFieldExt, TranscriptReprTrait}, -}; - -// Thus compile-time assertions checks important assumptions in the memory -// representation of group data that supports the use of Abomonation. -static_assertions::assert_eq_size!(G2Affine, [u64; 16]); - -/// Re-exports that give access to the standard aliases used in the code base, -/// for bn256 -pub mod bn256 { - pub use halo2curves::bn256::{ - Fq as Base, Fr as Scalar, G1Affine as Affine, G1Compressed as Compressed, G1 as Point, - }; -} - -/// Re-exports that give access to the standard aliases used in the code base, -/// for grumpkin -pub mod grumpkin { - pub use halo2curves::grumpkin::{ - Fq as Base, Fr as Scalar, G1Affine as Affine, G1Compressed as Compressed, G1 as Point, - }; -} - -#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] -impl_traits!( - bn256, - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", - "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", - bn256_msm -); -#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] -impl_traits!( - bn256, - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", - "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47" -); - -#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] -impl_traits!( - grumpkin, - "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", - grumpkin_msm -); -#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] -impl_traits!( - grumpkin, - "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001" -); - -#[cfg(test)] -mod tests { - use ff::Field; - use rand::thread_rng; - - use crate::provider::{ - bn256_grumpkin::{bn256, grumpkin}, - traits::DlogGroup, - util::msm::cpu_best_msm, - }; - - #[test] - fn test_bn256_msm_correctness() { - let npoints = 1usize << 16; - let points = bn256::Point::from_label(b"test", npoints); - - let mut rng = thread_rng(); - let scalars = (0..npoints) - .map(|_| bn256::Scalar::random(&mut rng)) - .collect::>(); - - let cpu_msm = cpu_best_msm(&points, &scalars); - let gpu_msm = bn256::Point::vartime_multiscalar_mul(&scalars, &points); - - assert_eq!(cpu_msm, gpu_msm); - } - - #[test] - fn test_grumpkin_msm_correctness() { - let npoints = 1usize << 16; - let points = grumpkin::Point::from_label(b"test", npoints); - - let mut rng = thread_rng(); - let scalars = (0..npoints) - .map(|_| grumpkin::Scalar::random(&mut rng)) - .collect::>(); - - let cpu_msm = cpu_best_msm(&points, &scalars); - let gpu_msm = grumpkin::Point::vartime_multiscalar_mul(&scalars, &points); - - assert_eq!(cpu_msm, gpu_msm); - } -} diff --git a/src/provider/hyperkzg.rs b/src/provider/hyperkzg.rs deleted file mode 100644 index a845938..0000000 --- a/src/provider/hyperkzg.rs +++ /dev/null @@ -1,923 +0,0 @@ -//! This module implements Nova's evaluation engine using `HyperKZG`, a -//! KZG-based polynomial commitment for multilinear polynomials HyperKZG is based on the transformation from univariate PCS to multilinear PCS in the Gemini paper (section 2.4.2 in ``). -//! However, there are some key differences: -//! (1) HyperKZG works with multilinear polynomials represented in evaluation -//! form (rather than in coefficient form in Gemini's transformation). -//! This means that Spartan's polynomial IOP can use commit to its polynomials -//! as-is without incurring any interpolations or FFTs. (2) HyperKZG is -//! specialized to use KZG as the univariate commitment scheme, so it includes -//! several optimizations (both during the transformation of -//! multilinear-to-univariate claims and within the KZG commitment scheme -//! implementation itself). (3) HyperKZG also includes optimisation based on so called Shplonk/HaloInfinite technique (``). -//! Compared to pure HyperKZG, this optimisation in theory improves prover (at -//! cost of using 1 fixed KZG opening) and verifier (at cost of eliminating MSM) -#![allow(non_snake_case)] -use core::marker::PhantomData; -use std::sync::Arc; - -use ff::{Field, PrimeFieldBits}; -use group::{prime::PrimeCurveAffine as _, Curve, Group as _}; -use itertools::Itertools as _; -use pairing::{Engine, MillerLoopResult, MultiMillerLoop}; -use rayon::{ - iter::{ - IndexedParallelIterator, IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator, - }, - prelude::*, -}; -use ref_cast::RefCast as _; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - provider::{ - kzg_commitment::{KZGCommitmentEngine, KZGProverKey, KZGVerifierKey, UniversalKZGParam}, - pedersen::Commitment, - traits::DlogGroup, - util::iterators::IndexedParallelIteratorExt as _, - }, - spartan::{math::Math, polys::univariate::UniPoly}, - traits::{ - commitment::{CommitmentEngineTrait, Len}, - evaluation::EvaluationEngineTrait, - Engine as NovaEngine, Group, TranscriptEngineTrait, TranscriptReprTrait, - }, -}; - -/// Provides an implementation of a polynomial evaluation argument -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound( - serialize = "E::G1Affine: Serialize, E::Fr: Serialize", - deserialize = "E::G1Affine: Deserialize<'de>, E::Fr: Deserialize<'de>" -))] -pub struct EvaluationArgument { - comms: Vec, - evals: Vec>, - R_x: Vec, - C_Q: E::G1Affine, - C_H: E::G1Affine, -} - -/// Provides an implementation of a polynomial evaluation engine using KZG -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct EvaluationEngine { - _p: PhantomData<(E, NE)>, -} - -// This impl block defines helper functions that are not a part of -// EvaluationEngineTrait, but that we will use to implement the trait methods. -impl EvaluationEngine -where - E: Engine, - NE: NovaEngine>, - E::G1: DlogGroup, - // the following bounds repeat existing, satisfied bounds on associated types of the above - // but are required since the equality constraints we use in the above do not transitively - // carry bounds we should be able to remove most of those constraints when rust supports - // associated_type_bounds - E::Fr: Serialize + DeserializeOwned, - E::G1Affine: Serialize + DeserializeOwned, - E::G1Affine: TranscriptReprTrait, // TODO: this bound on DlogGroup is really unusable! - E::G2Affine: Serialize + DeserializeOwned, - E::Fr: PrimeFieldBits + TranscriptReprTrait, - ::Base: TranscriptReprTrait, -{ - fn compute_challenge( - com: &[E::G1Affine], - transcript: &mut impl TranscriptEngineTrait, - ) -> E::Fr { - transcript.absorb(b"c", &com); - transcript.squeeze(b"c").unwrap() - } - - // Compute challenge q = Hash(vk, C0, ..., C_{k-1}, u0, ...., u_{t-1}, - // (f_i(u_j))_{i=0..k-1,j=0..t-1}) - // It is assumed that both 'C' and 'u' are already absorbed by the transcript - fn get_batch_challenge( - v: &[Vec], - transcript: &mut impl TranscriptEngineTrait, - ) -> E::Fr { - transcript.absorb( - b"v", - &v.iter() - .flatten() - .cloned() - .collect::>() - .as_slice(), - ); - - transcript.squeeze(b"r").unwrap() - } - - fn compute_a(c_q: &E::G1Affine, transcript: &mut impl TranscriptEngineTrait) -> E::Fr { - transcript.absorb(b"C_Q", c_q); - transcript.squeeze(b"a").unwrap() - } - - fn compute_pi_polynomials(hat_P: &[E::Fr], point: &[E::Fr]) -> Vec> { - let mut polys: Vec> = Vec::new(); - polys.push(hat_P.to_vec()); - - for i in 0..point.len() - 1 { - let Pi_len = polys[i].len() / 2; - let mut Pi = vec![E::Fr::ZERO; Pi_len]; - - (0..Pi_len) - .into_par_iter() - .map(|j| { - point[point.len() - i - 1] * (polys[i][2 * j + 1] - polys[i][2 * j]) - + polys[i][2 * j] - }) - .collect_into_vec(&mut Pi); - - polys.push(Pi); - } - - assert_eq!(polys.len(), hat_P.len().log_2()); - - polys - } - - fn compute_commitments( - ck: &UniversalKZGParam, - _C: &Commitment, - polys: &[Vec], - ) -> Vec { - let comms: Vec = (1..polys.len()) - .into_par_iter() - .map(|i| >::commit(ck, &polys[i]).comm) - .collect(); - - let mut comms_affine: Vec = vec![E::G1Affine::identity(); comms.len()]; - NE::GE::batch_normalize(&comms, &mut comms_affine); - comms_affine - } - - fn compute_evals(polys: &[Vec], u: &[E::Fr]) -> Vec> { - let mut v = vec![vec!(E::Fr::ZERO; polys.len()); u.len()]; - v.par_iter_mut().enumerate().for_each(|(i, v_i)| { - // for each point u - v_i.par_iter_mut().zip_eq(polys).for_each(|(v_ij, f)| { - // for each poly f (except the last one - since it is constant) - *v_ij = UniPoly::ref_cast(f).evaluate(&u[i]); - }); - }); - v - } - - fn compute_k_polynomial( - batched_Pi: &UniPoly, - Q_x: &UniPoly, - D: &UniPoly, - R_x: &UniPoly, - a: E::Fr, - ) -> UniPoly { - let mut tmp = Q_x.clone(); - tmp *= &D.evaluate(&a); - tmp[0] += &R_x.evaluate(&a); - let mut K_x = batched_Pi.clone(); - K_x -= &tmp; - K_x - } -} - -impl EvaluationEngineTrait for EvaluationEngine -where - E: MultiMillerLoop, - NE: NovaEngine>, - E::Fr: Serialize + DeserializeOwned, - E::G1Affine: Serialize + DeserializeOwned, - E::G2Affine: Serialize + DeserializeOwned, - E::G1: DlogGroup, - ::Base: TranscriptReprTrait, /* Note: due to the move of the bound - * TranscriptReprTrait on G::Base - * from Group to Engine */ - E::Fr: PrimeFieldBits, // TODO due to use of gen_srs_for_testing, make optional - E::Fr: TranscriptReprTrait, - E::G1Affine: TranscriptReprTrait, -{ - type EvaluationArgument = EvaluationArgument; - type ProverKey = KZGProverKey; - type VerifierKey = KZGVerifierKey; - - fn setup(ck: Arc>) -> (Self::ProverKey, Self::VerifierKey) { - let len = ck.length() - 1; - UniversalKZGParam::trim(ck, len) - } - - fn prove( - ck: &UniversalKZGParam, - _pk: &Self::ProverKey, - transcript: &mut ::TE, - _C: &Commitment, - hat_P: &[E::Fr], - point: &[E::Fr], - _eval: &E::Fr, - ) -> Result { - let x: Vec = point.to_vec(); - let ell = x.len(); - let n = hat_P.len(); - assert_eq!(n, 1 << ell); // Below we assume that n is a power of two - - // Phase 1 -- create commitments com_1, ..., com_\ell - // We do not compute final Pi (and its commitment as well since it is already - // committed according to EvaluationEngineTrait API) as it is constant and - // equals to 'eval' also known to verifier, so can be derived on its - // side as well - let polys = Self::compute_pi_polynomials(hat_P, point); - let comms = Self::compute_commitments(ck, _C, &polys); - - // Phase 2 - let r = Self::compute_challenge(&comms, transcript); - let u = vec![r, -r, r * r]; - let evals = Self::compute_evals(&polys, &u); - - // Phase 3 - // Compute B(x) = f_0(x) + q * f_1(x) + ... + q^(k-1) * f_{k-1}(x) - let q = Self::get_batch_challenge(&evals, transcript); - let batched_Pi: UniPoly = polys.into_par_iter().map(UniPoly::new).rlc(&q); - - // Q(x), R(x) = P(x) / D(x), where D(x) = (x - r) * (x + r) * (x - r^2) = 1 * - // x^3 - r^2 * x^2 - r^2 * x + r^4 - let D = UniPoly::new(vec![u[2] * u[2], -u[2], -u[2], E::Fr::from(1)]); - let (Q_x, R_x) = batched_Pi.divide_with_q_and_r(&D).unwrap(); - - let C_Q = >::commit(ck, &Q_x.coeffs) - .comm - .to_affine(); - - let a = Self::compute_a(&C_Q, transcript); - - // K(x) = P(x) - Q(x) * D(a) - R(a), note that R(a) should be subtracted from a - // free term of polynomial - let K_x = Self::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); - - // TODO: since this is a usual KZG10 we should use it as utility instead - let h = K_x.divide_minus_u(a); - let C_H = >::commit(ck, &h.coeffs) - .comm - .to_affine(); - - Ok(EvaluationArgument:: { - comms, - evals, - R_x: R_x.coeffs, - C_Q, - C_H, - }) - } - - /// A method to verify purported evaluations of a batch of polynomials - fn verify( - vk: &Self::VerifierKey, - transcript: &mut ::TE, - C: &Commitment, - point: &[E::Fr], - P_of_x: &E::Fr, - pi: &Self::EvaluationArgument, - ) -> Result<(), NovaError> { - let r = Self::compute_challenge(&pi.comms, transcript); - let u = [r, -r, r * r]; - - if pi.evals.len() != u.len() { - return Err(NovaError::ProofVerifyError); - } - if pi.R_x.len() != u.len() { - return Err(NovaError::ProofVerifyError); - } - - let mut comms = pi.comms.to_vec(); - comms.insert(0, C.comm.to_affine()); - - let q = Self::get_batch_challenge(&pi.evals, transcript); - let R_x = UniPoly::new(pi.R_x.clone()); - - let verification_failed = pi.evals.iter().zip_eq(u.iter()).any(|(evals_i, u_i)| { - // here we check correlation between R polynomial and batched evals, e.g.: - // 1) R(r) == eval at r - // 2) R(-r) == eval at -r - // 3) R(r^2) == eval at r^2 - let batched_eval = UniPoly::ref_cast(evals_i).evaluate(&q); - batched_eval != R_x.evaluate(u_i) - }); - if verification_failed { - return Err(NovaError::ProofVerifyError); - } - - // here we check that Pi polynomials were correctly constructed by the prover, - // using 'r' as a random point, e.g: P_i_even = P_i(r) + P_i(-r) * 1/2 - // P_i_odd = P_i(r) - P_i(-r) * 1/2*r - // P_i+1(r^2) == (1 - point_i) * P_i_even + point_i * P_i_odd -> should hold, - // according to Gemini transformation - let mut point = point.to_vec(); - point.reverse(); - - let r_mul_2 = E::Fr::from(2) * r; - #[allow(clippy::disallowed_methods)] - let verification_failed = pi.evals[0] - .par_iter() - .chain(&[*P_of_x]) - .zip_eq(pi.evals[1].par_iter().chain(&[*P_of_x])) - .zip(pi.evals[2][1..].par_iter().chain(&[*P_of_x])) - .enumerate() - .any(|(index, ((eval_r, eval_minus_r), eval_r_squared))| { - // some optimisation to avoid using expensive inversions: - // P_i+1(r^2) == (1 - point_i) * (P_i(r) + P_i(-r)) * 1/2 + point_i * (P_i(r) - - // P_i(-r)) * 1/2 * r is equivalent to: - // 2 * r * P_i+1(r^2) == r * (1 - point_i) * (P_i(r) + P_i(-r)) + point_i * - // (P_i(r) - P_i(-r)) - - let even = *eval_r + eval_minus_r; - let odd = *eval_r - eval_minus_r; - let right = r * ((E::Fr::ONE - point[index]) * even) + (point[index] * odd); - let left = *eval_r_squared * r_mul_2; - left != right - }); - - if verification_failed { - return Err(NovaError::ProofVerifyError); - } - - let C_P: E::G1 = comms.par_iter().map(|comm| comm.to_curve()).rlc(&q); - let C_Q = pi.C_Q; - let C_H = pi.C_H; - let r_squared = u[2]; - - // D = (x - r) * (x + r) * (x - r^2) = 1 * x^3 - r^2 * x^2 - r^2 * x + r^4 - let D = UniPoly::new(vec![ - r_squared * r_squared, - -r_squared, - -r_squared, - E::Fr::from(1), - ]); - - let a = Self::compute_a(&C_Q, transcript); - - let C_K = C_P - (C_Q * D.evaluate(&a) + vk.g * R_x.evaluate(&a)); - - let pairing_inputs: Vec<(E::G1Affine, E::G2Prepared)> = vec![ - (C_H, vk.beta_h.into()), - ((C_H * (-a) - C_K).to_affine(), vk.h.into()), - ]; - - #[allow(clippy::map_identity)] - let pairing_input_refs = pairing_inputs - .iter() - .map(|(a, b)| (a, b)) - .collect::>(); - - let pairing_result = - E::multi_miller_loop(pairing_input_refs.as_slice()).final_exponentiation(); - let successful: bool = pairing_result.is_identity().into(); - if !successful { - return Err(NovaError::ProofVerifyError); - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use bincode::Options; - use expect_test::expect; - use halo2curves::bn256::G1; - use itertools::Itertools; - - use super::*; - use crate::{ - provider::{ - keccak::Keccak256Transcript, - util::{ - iterators::DoubleEndedIteratorExt as _, test_utils::prove_verify_from_num_vars, - }, - }, - spartan::powers, - traits::TranscriptEngineTrait, - zip_with, CommitmentEngineTrait, CommitmentKey, - }; - - type E = halo2curves::bn256::Bn256; - type NE = crate::provider::Bn256EngineKZG; - type Fr = ::Scalar; - - fn test_commitment_to_k_polynomial_correctness( - ck: &CommitmentKey, - C: &Commitment, - poly: &[Fr], - point: &[Fr], - _eval: &Fr, - ) { - let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); - let mut comms = EvaluationEngine::::compute_commitments(ck, C, &polys); - comms.insert(0, C.comm.to_affine()); - - let q = Fr::from(8165763); - let q_powers = batch_challenge_powers(q, polys.len()); - let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); - - let r = Fr::from(1354678); - let r_squared = r * r; - - let divident = batched_Pi.clone(); - let D = UniPoly::new(vec![ - r_squared * r_squared, - -r_squared, - -r_squared, - Fr::from(1), - ]); - let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); - - let a = Fr::from(938576); - - let K_x = EvaluationEngine::::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); - - let mut C_P = G1::identity(); - q_powers.iter().zip_eq(comms.iter()).for_each(|(q_i, C_i)| { - C_P += *C_i * q_i; - }); - - let C_Q = <::CE as CommitmentEngineTrait< - NE, - >>::commit(ck, &Q_x.coeffs) - .comm - .to_affine(); - - // Check that Cp - Cq * D(a) - g1 * R(a) == MSM(ck, K(x)) - let C_K = C_P - C_Q * D.evaluate(&a) - ck.powers_of_g[0] * R_x.evaluate(&a); - - let C_K_expected = - <::CE as CommitmentEngineTrait>::commit( - ck, - &K_x.coeffs, - ) - .comm - .to_affine(); - - assert_eq!(C_K_expected, C_K.to_affine()); - } - - fn test_k_polynomial_correctness(poly: &[Fr], point: &[Fr], _eval: &Fr) { - let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); - let q = Fr::from(8165763); - let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); - - let r = Fr::from(56263); - let r_squared = r * r; - - let divident = batched_Pi.clone(); - let D = UniPoly::new(vec![ - r_squared * r_squared, - -r_squared, - -r_squared, - Fr::from(1), - ]); - let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); - - let a = Fr::from(190837645); - - let K_x = EvaluationEngine::::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); - - assert_eq!(Fr::from(0), K_x.evaluate(&a)); - } - - fn test_d_polynomial_correctness(poly: &[Fr], point: &[Fr], _eval: &Fr) { - let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); - let q = Fr::from(8165763); - let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); - - let r = Fr::from(2895776832); - let r_squared = r * r; - - let divident = batched_Pi.clone(); - // D(x) = (x - r) * (x + r) * (x - r^2) - let D = UniPoly::new(vec![ - r_squared * r_squared, - -r_squared, - -r_squared, - Fr::from(1), - ]); - let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); - - let evaluation_scalar = Fr::from(182746); - assert_eq!( - batched_Pi.evaluate(&evaluation_scalar), - D.evaluate(&evaluation_scalar) * Q_x.evaluate(&evaluation_scalar) - + R_x.evaluate(&evaluation_scalar) - ); - - // Check that Q(x) = (P(x) - R(x)) / D(x) - let mut P_x = batched_Pi.clone(); - let minus_R_x = UniPoly::new( - R_x.clone() - .coeffs - .into_iter() - .map(|coeff| -coeff) - .collect::>(), - ); - P_x += &minus_R_x; - - let divident = P_x.clone(); - let (Q_x_recomputed, _) = divident.divide_with_q_and_r(&D).unwrap(); - - assert_eq!(Q_x, Q_x_recomputed); - } - - fn test_batching_property_on_evaluation(poly: &[Fr], point: &[Fr], _eval: &Fr) { - let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); - - let q = Fr::from(97652); - let u = [Fr::from(10), Fr::from(20), Fr::from(50)]; - - let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); - - let q_powers = batch_challenge_powers(q, polys.len()); - for evaluation_scalar in u.iter() { - let evals = polys - .clone() - .into_iter() - .map(|poly| UniPoly::new(poly).evaluate(evaluation_scalar)) - .collect::>(); - - let expected = zip_with!((evals.iter(), q_powers.iter()), |eval, q| eval * q) - .collect::>() - .into_iter() - .sum::(); - - let actual = batched_Pi.evaluate(evaluation_scalar); - assert_eq!(expected, actual); - } - } - - #[test] - fn test_hyperkzg_shplonk_unit_tests() { - // poly = [1, 2, 1, 4, 1, 2, 1, 4] - let poly = vec![ - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - ]; - - // point = [4,3,8] - let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; - - // eval = 57 - let eval = Fr::from(57); - - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", poly.len()); - - let ck = Arc::new(ck); - let C: Commitment = KZGCommitmentEngine::commit(&ck, &poly); - - test_batching_property_on_evaluation(&poly, &point, &eval); - test_d_polynomial_correctness(&poly, &point, &eval); - test_k_polynomial_correctness(&poly, &point, &eval); - test_commitment_to_k_polynomial_correctness(&ck, &C, &poly, &point, &eval); - } - - #[test] - fn test_hyperkzg_shplonk_pcs() { - let n = 8; - - // poly = [1, 2, 1, 4, 1, 2, 1, 4] - let poly = vec![ - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - ]; - - // point = [4,3,8] - let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; - - // eval = 57 - let eval = Fr::from(57); - - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", n); - let ck = Arc::new(ck); - let (pk, vk): (KZGProverKey, KZGVerifierKey) = - EvaluationEngine::::setup(ck.clone()); - - // make a commitment - let C: Commitment = KZGCommitmentEngine::commit(&ck, &poly); - - let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let proof = EvaluationEngine::::prove( - &ck, - &pk, - &mut prover_transcript, - &C, - &poly, - &point, - &eval, - ) - .unwrap(); - - let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); - EvaluationEngine::::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof) - .unwrap(); - } - - #[test] - fn test_hyperkzg_shplonk_pcs_negative() { - let n = 8; - // poly = [1, 2, 1, 4, 1, 2, 1, 4] - let poly = vec![ - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - ]; - // point = [4,3,8] - let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; - // eval = 57 - let eval = Fr::from(57); - - // eval = 57 - let eval1 = Fr::from(56); // wrong eval - test_negative_inner(n, &poly, &point, &eval1); - - // point = [4,3,8] - let point1 = vec![Fr::from(4), Fr::from(3), Fr::from(7)]; // wrong point - test_negative_inner(n, &poly, &point1, &eval); - - // poly = [1, 2, 1, 4, 1, 2, 1, 4] - let poly1 = vec![ - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - Fr::ONE, - Fr::from(2), - Fr::from(200), - Fr::from(100), - ]; // wrong poly - test_negative_inner(n, &poly1, &point, &eval); - } - - fn test_negative_inner(n: usize, poly: &[Fr], point: &[Fr], eval: &Fr) { - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", n); - let ck = Arc::new(ck); - let (pk, vk): (KZGProverKey, KZGVerifierKey) = - EvaluationEngine::::setup(ck.clone()); - - // make a commitment - let C: Commitment = KZGCommitmentEngine::commit(&ck, poly); - - let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); - - let proof = EvaluationEngine::::prove( - &ck, - &pk, - &mut prover_transcript, - &C, - poly, - point, - eval, - ) - .unwrap(); - - assert!(EvaluationEngine::::verify( - &vk, - &mut verifier_transcript, - &C, - point, - eval, - &proof - ) - .is_err()); - } - - #[test] - fn test_hyperkzg_shplonk_pcs_negative_wrong_commitment() { - let n = 8; - // poly = [1, 2, 1, 4, 1, 2, 1, 4] - let poly = vec![ - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - ]; - // point = [4,3,8] - let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; - // eval = 57 - let eval = Fr::from(57); - - // altered_poly = [85, 84, 83, 82, 81, 80, 79, 78] - let altered_poly = vec![ - Fr::from(85), - Fr::from(84), - Fr::from(83), - Fr::from(82), - Fr::from(81), - Fr::from(80), - Fr::from(79), - Fr::from(78), - ]; - - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", n); - - let C1: Commitment = KZGCommitmentEngine::commit(&ck, &poly); // correct commitment - let C2: Commitment = KZGCommitmentEngine::commit(&ck, &altered_poly); // wrong commitment - - test_negative_inner_commitment(&poly, &point, &eval, &ck, &C1, &C2); // here we check detection when proof and commitment do not correspond - test_negative_inner_commitment(&poly, &point, &eval, &ck, &C2, &C2); // here we check detection when proof was built with wrong commitment - } - - fn test_negative_inner_commitment( - poly: &[Fr], - point: &[Fr], - eval: &Fr, - ck: &CommitmentKey, - C_prover: &Commitment, - C_verifier: &Commitment, - ) { - let ck = Arc::new(ck.clone()); - let (pk, vk): (KZGProverKey, KZGVerifierKey) = - EvaluationEngine::::setup(ck.clone()); - - let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); - - let proof = EvaluationEngine::::prove( - &ck, - &pk, - &mut prover_transcript, - C_prover, - poly, - point, - eval, - ) - .unwrap(); - - assert!(EvaluationEngine::::verify( - &vk, - &mut verifier_transcript, - C_verifier, - point, - eval, - &proof - ) - .is_err()); - } - - #[test] - fn test_hyperkzg_shplonk_eval() { - // Test with poly(X1, X2) = 1 + X1 + X2 + X1*X2 - let n = 4; - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", n); - let ck = Arc::new(ck); - let (pk, vk): (KZGProverKey, KZGVerifierKey) = - EvaluationEngine::::setup(ck.clone()); - - // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)] - let poly = vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)]; - - let C = as CommitmentEngineTrait>::commit(&ck, &poly); - - let test_inner = |point: Vec, eval: Fr| -> Result<(), NovaError> { - let mut tr = Keccak256Transcript::::new(b"TestEval"); - let proof = - EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval) - .unwrap(); - let mut tr = Keccak256Transcript::new(b"TestEval"); - EvaluationEngine::::verify(&vk, &mut tr, &C, &point, &eval, &proof) - }; - - // Call the prover with a (point, eval) pair. - // The prover does not recompute so it may produce a proof, but it should not - // verify - let point = vec![Fr::from(0), Fr::from(0)]; - let eval = Fr::ONE; - test_inner(point, eval).unwrap(); - - let point = vec![Fr::from(0), Fr::from(1)]; - let eval = Fr::from(2); - test_inner(point, eval).unwrap(); - - let point = vec![Fr::from(1), Fr::from(1)]; - let eval = Fr::from(4); - test_inner(point, eval).unwrap(); - - let point = vec![Fr::from(0), Fr::from(2)]; - let eval = Fr::from(3); - test_inner(point, eval).unwrap(); - - let point = vec![Fr::from(2), Fr::from(2)]; - let eval = Fr::from(9); - test_inner(point, eval).unwrap(); - - // Try a couple incorrect evaluations and expect failure - let point = vec![Fr::from(2), Fr::from(2)]; - let eval = Fr::from(50); - assert!(test_inner(point, eval).is_err()); - - let point = vec![Fr::from(0), Fr::from(2)]; - let eval = Fr::from(4); - assert!(test_inner(point, eval).is_err()); - } - - #[test] - fn test_hyperkzg_shplonk_transcript_correctness() { - let n = 4; - - // poly = [1, 2, 1, 4] - let poly = vec![Fr::ONE, Fr::from(2), Fr::from(1), Fr::from(4)]; - - // point = [4,3] - let point = vec![Fr::from(4), Fr::from(3)]; - - // eval = 28 - let eval = Fr::from(28); - - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", n); - let ck = Arc::new(ck); - let (pk, vk): (KZGProverKey, KZGVerifierKey) = - EvaluationEngine::::setup(ck.clone()); - - // make a commitment - let C = KZGCommitmentEngine::commit(&ck, &poly); - - // prove an evaluation - let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let proof = EvaluationEngine::::prove( - &ck, - &pk, - &mut prover_transcript, - &C, - &poly, - &point, - &eval, - ) - .unwrap(); - let post_c_p = prover_transcript.squeeze(b"c").unwrap(); - - // verify the evaluation - let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); - EvaluationEngine::::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof) - .unwrap(); - let post_c_v = verifier_transcript.squeeze(b"c").unwrap(); - - // check if the prover transcript and verifier transcript are kept in the - // same state - assert_eq!(post_c_p, post_c_v); - - let proof_bytes = bincode::DefaultOptions::new() - .with_big_endian() - .with_fixint_encoding() - .serialize(&proof) - .unwrap(); - expect!["432"].assert_eq(&proof_bytes.len().to_string()); - - // Change the proof and expect verification to fail - let mut bad_proof = proof.clone(); - bad_proof.comms[0] = (bad_proof.comms[0] + bad_proof.comms[0] * Fr::from(123)).to_affine(); - let mut verifier_transcript2 = Keccak256Transcript::::new(b"TestEval"); - assert!(EvaluationEngine::::verify( - &vk, - &mut verifier_transcript2, - &C, - &point, - &eval, - &bad_proof - ) - .is_err()); - } - - #[test] - fn test_hyperkzg_shplonk_more() { - // test the hyperkzg prover and verifier with random instances (derived from a - // seed) - for num_vars in [4, 5, 6] { - prove_verify_from_num_vars::<_, EvaluationEngine>(num_vars); - } - } - - /// Compute powers of q : (1, q, q^2, ..., q^(k-1)) - fn batch_challenge_powers(q: Fr, k: usize) -> Vec { - powers(&q, k) - } -} diff --git a/src/provider/ipa_pc.rs b/src/provider/ipa_pc.rs deleted file mode 100644 index 9ae85be..0000000 --- a/src/provider/ipa_pc.rs +++ /dev/null @@ -1,394 +0,0 @@ -//! This module implements `EvaluationEngine` using an IPA-based polynomial -//! commitment scheme -use core::iter; -use std::{marker::PhantomData, sync::Arc}; - -use ff::Field; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use crate::{ - digest::SimpleDigestible, - errors::{NovaError, PCSError}, - provider::{pedersen::CommitmentKeyExtTrait, traits::DlogGroup, util::field::batch_invert}, - spartan::polys::eq::EqPolynomial, - traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait}, - evaluation::EvaluationEngineTrait, - Engine, TranscriptEngineTrait, TranscriptReprTrait, - }, - zip_with, Commitment, CommitmentKey, CompressedCommitment, CE, -}; - -/// Provides an implementation of the prover key -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ProverKey { - pub ck_s: CommitmentKey, -} - -/// Provides an implementation of the verifier key -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct VerifierKey { - pub ck_v: Arc>, - pub ck_s: CommitmentKey, -} - -impl SimpleDigestible for VerifierKey {} - -/// Provides an implementation of a polynomial evaluation engine using IPA -#[derive(Clone, Debug)] -pub struct EvaluationEngine { - _p: PhantomData, -} - -impl EvaluationEngineTrait for EvaluationEngine -where - E: Engine, - E::GE: DlogGroup, - CommitmentKey: CommitmentKeyExtTrait, -{ - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - type EvaluationArgument = InnerProductArgument; - - fn setup( - ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, - ) -> (Self::ProverKey, Self::VerifierKey) { - let ck_c = E::CE::setup(b"ipa", 1); - - let pk = ProverKey { ck_s: ck_c.clone() }; - let vk = VerifierKey { - ck_v: ck.clone(), - ck_s: ck_c, - }; - - (pk, vk) - } - - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - transcript: &mut E::TE, - comm: &Commitment, - poly: &[E::Scalar], - point: &[E::Scalar], - eval: &E::Scalar, - ) -> Result { - let u = InnerProductInstance::new(comm, &EqPolynomial::evals_from_points(point), eval); - let w = InnerProductWitness::new(poly); - - InnerProductArgument::prove(ck.clone(), pk.ck_s.clone(), &u, &w, transcript) - } - - /// A method to verify purported evaluations of a batch of polynomials - fn verify( - vk: &Self::VerifierKey, - transcript: &mut E::TE, - comm: &Commitment, - point: &[E::Scalar], - eval: &E::Scalar, - arg: &Self::EvaluationArgument, - ) -> Result<(), NovaError> { - let u = InnerProductInstance::new(comm, &EqPolynomial::evals_from_points(point), eval); - - arg.verify(&vk.ck_v, vk.ck_s.clone(), 1 << point.len(), &u, transcript)?; - - Ok(()) - } -} - -fn inner_product(a: &[T], b: &[T]) -> T { - zip_with!(par_iter, (a, b), |x, y| *x * y).sum() -} - -/// An inner product instance consists of a commitment to a vector `a` and -/// another vector `b` and the claim that c = . -struct InnerProductInstance { - comm_a_vec: Commitment, - b_vec: Vec, - c: E::Scalar, -} - -impl InnerProductInstance -where - E: Engine, - E::GE: DlogGroup, -{ - fn new(comm_a_vec: &Commitment, b_vec: &[E::Scalar], c: &E::Scalar) -> Self { - Self { - comm_a_vec: *comm_a_vec, - b_vec: b_vec.to_vec(), - c: *c, - } - } -} - -impl TranscriptReprTrait for InnerProductInstance { - fn to_transcript_bytes(&self) -> Vec { - // we do not need to include self.b_vec as in our context it is produced from - // the transcript - [ - self.comm_a_vec.to_transcript_bytes(), - self.c.to_transcript_bytes(), - ] - .concat() - } -} - -struct InnerProductWitness { - a_vec: Vec, -} - -impl InnerProductWitness { - fn new(a_vec: &[E::Scalar]) -> Self { - Self { - a_vec: a_vec.to_vec(), - } - } -} - -/// An inner product argument -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct InnerProductArgument { - pub(in crate::provider) L_vec: Vec>, - pub(in crate::provider) R_vec: Vec>, - pub(in crate::provider) a_hat: E::Scalar, -} - -impl InnerProductArgument -where - E: Engine, - E::GE: DlogGroup, - CommitmentKey: CommitmentKeyExtTrait, -{ - const fn protocol_name() -> &'static [u8] { - b"IPA" - } - - fn prove( - ck: CommitmentKey, - mut ck_c: CommitmentKey, - U: &InnerProductInstance, - W: &InnerProductWitness, - transcript: &mut E::TE, - ) -> Result { - transcript.dom_sep(Self::protocol_name()); - - let (ck, _) = ck.split_at(U.b_vec.len()); - - if U.b_vec.len() != W.a_vec.len() { - return Err(NovaError::InvalidInputLength); - } - - // absorb the instance in the transcript - transcript.absorb(b"U", U); - - // sample a random base for committing to the inner product - let r = transcript.squeeze(b"r")?; - ck_c.scale(&r); - - // a closure that executes a step of the recursive inner product argument - let prove_inner = |a_vec: &[E::Scalar], - b_vec: &[E::Scalar], - ck: CommitmentKey, - transcript: &mut E::TE| - -> Result< - ( - CompressedCommitment, - CompressedCommitment, - Vec, - Vec, - CommitmentKey, - ), - NovaError, - > { - let n = a_vec.len(); - let (ck_L, ck_R) = ck.split_at(n / 2); - - let c_L = inner_product(&a_vec[0..n / 2], &b_vec[n / 2..n]); - let c_R = inner_product(&a_vec[n / 2..n], &b_vec[0..n / 2]); - - let L = CE::::commit( - &ck_R.combine(&ck_c), - &a_vec[0..n / 2] - .iter() - .chain(iter::once(&c_L)) - .copied() - .collect::>(), - ) - .compress(); - let R = CE::::commit( - &ck_L.combine(&ck_c), - &a_vec[n / 2..n] - .iter() - .chain(iter::once(&c_R)) - .copied() - .collect::>(), - ) - .compress(); - - transcript.absorb(b"L", &L); - transcript.absorb(b"R", &R); - - let r = transcript.squeeze(b"r")?; - let r_inverse = r.invert().unwrap(); - - // fold the left half and the right half - let a_vec_folded = zip_with!( - (a_vec[0..n / 2].par_iter(), a_vec[n / 2..n].par_iter()), - |a_L, a_R| *a_L * r + r_inverse * *a_R - ) - .collect::>(); - - let b_vec_folded = zip_with!( - (b_vec[0..n / 2].par_iter(), b_vec[n / 2..n].par_iter()), - |b_L, b_R| *b_L * r_inverse + r * *b_R - ) - .collect::>(); - - let ck_folded = CommitmentKeyExtTrait::fold(&ck_L, &ck_R, &r_inverse, &r); - - Ok((L, R, a_vec_folded, b_vec_folded, ck_folded)) - }; - - // two vectors to hold the logarithmic number of group elements - let mut L_vec: Vec> = Vec::new(); - let mut R_vec: Vec> = Vec::new(); - - // we create mutable copies of vectors and generators - let mut a_vec = W.a_vec.to_vec(); - let mut b_vec = U.b_vec.to_vec(); - let mut ck = ck; - for _i in 0..usize::try_from(U.b_vec.len().ilog2()).unwrap() { - let (L, R, a_vec_folded, b_vec_folded, ck_folded) = - prove_inner(&a_vec, &b_vec, ck, transcript)?; - L_vec.push(L); - R_vec.push(R); - - a_vec = a_vec_folded; - b_vec = b_vec_folded; - ck = ck_folded; - } - - Ok(Self { - L_vec, - R_vec, - a_hat: a_vec[0], - }) - } - - fn verify( - &self, - ck: &CommitmentKey, - mut ck_c: CommitmentKey, - n: usize, - U: &InnerProductInstance, - transcript: &mut E::TE, - ) -> Result<(), NovaError> { - let (ck, _) = ck.clone().split_at(U.b_vec.len()); - - transcript.dom_sep(Self::protocol_name()); - if U.b_vec.len() != n - || n != (1 << self.L_vec.len()) - || self.L_vec.len() != self.R_vec.len() - || self.L_vec.len() >= 32 - { - return Err(NovaError::InvalidInputLength); - } - - // absorb the instance in the transcript - transcript.absorb(b"U", U); - - // sample a random base for committing to the inner product - let r = transcript.squeeze(b"r")?; - ck_c.scale(&r); - - let P = U.comm_a_vec + CE::::commit(&ck_c, &[U.c]); - - // compute a vector of public coins using self.L_vec and self.R_vec - let r = (0..self.L_vec.len()) - .map(|i| { - transcript.absorb(b"L", &self.L_vec[i]); - transcript.absorb(b"R", &self.R_vec[i]); - transcript.squeeze(b"r") - }) - .collect::, NovaError>>()?; - - // precompute scalars necessary for verification - let r_square: Vec = (0..self.L_vec.len()) - .into_par_iter() - .map(|i| r[i] * r[i]) - .collect(); - let r_inverse = batch_invert(r.clone())?; - let r_inverse_square: Vec = (0..self.L_vec.len()) - .into_par_iter() - .map(|i| r_inverse[i] * r_inverse[i]) - .collect(); - - // compute the vector with the tensor structure - let s = { - let mut s = vec![E::Scalar::ZERO; n]; - s[0] = { - let mut v = E::Scalar::ONE; - for r_inverse_i in r_inverse { - v *= r_inverse_i; - } - v - }; - for i in 1..n { - let pos_in_r = (31 - (i as u32).leading_zeros()) as usize; - s[i] = s[i - (1 << pos_in_r)] * r_square[(self.L_vec.len() - 1) - pos_in_r]; - } - s - }; - - let ck_hat = { - let c = CE::::commit(&ck, &s).compress(); - CommitmentKey::::reinterpret_commitments_as_ck(&[c])? - }; - - let b_hat = inner_product(&U.b_vec, &s); - - let P_hat = { - let ck_folded = { - let ck_L = CommitmentKey::::reinterpret_commitments_as_ck(&self.L_vec)?; - let ck_R = CommitmentKey::::reinterpret_commitments_as_ck(&self.R_vec)?; - let ck_P = CommitmentKey::::reinterpret_commitments_as_ck(&[P.compress()])?; - ck_L.combine(&ck_R).combine(&ck_P) - }; - - CE::::commit( - &ck_folded, - &r_square - .iter() - .chain(r_inverse_square.iter()) - .chain(iter::once(&E::Scalar::ONE)) - .copied() - .collect::>(), - ) - }; - - if P_hat == CE::::commit(&ck_hat.combine(&ck_c), &[self.a_hat, self.a_hat * b_hat]) { - Ok(()) - } else { - Err(NovaError::PCSError(PCSError::InvalidPCS)) - } - } -} - -#[cfg(test)] -mod test { - use crate::provider::{ - ipa_pc::EvaluationEngine, util::test_utils::prove_verify_from_num_vars, GrumpkinEngine, - }; - - #[test] - fn test_multiple_polynomial_size() { - for num_vars in [4, 5, 6] { - prove_verify_from_num_vars::<_, EvaluationEngine>(num_vars); - } - } -} diff --git a/src/provider/keccak.rs b/src/provider/keccak.rs deleted file mode 100644 index cd63658..0000000 --- a/src/provider/keccak.rs +++ /dev/null @@ -1,247 +0,0 @@ -//! This module provides an implementation of `TranscriptEngineTrait` using -//! keccak256 -use core::marker::PhantomData; - -use sha3::{Digest, Keccak256}; - -use crate::{ - errors::NovaError, - traits::{Engine, PrimeFieldExt, TranscriptEngineTrait, TranscriptReprTrait}, -}; - -const PERSONA_TAG: &[u8] = b"NoTR"; -const DOM_SEP_TAG: &[u8] = b"NoDS"; -const KECCAK256_STATE_SIZE: usize = 64; -const KECCAK256_PREFIX_CHALLENGE_LO: u8 = 0; -const KECCAK256_PREFIX_CHALLENGE_HI: u8 = 1; - -/// Provides an implementation of `TranscriptEngine` -#[derive(Debug)] -pub struct Keccak256Transcript { - round: u16, - state: [u8; KECCAK256_STATE_SIZE], - transcript: Keccak256, - _p: PhantomData, -} - -fn compute_updated_state(keccak_instance: Keccak256, input: &[u8]) -> [u8; KECCAK256_STATE_SIZE] { - let mut updated_instance = keccak_instance; - updated_instance.update(input); - - let input_lo = &[KECCAK256_PREFIX_CHALLENGE_LO]; - let input_hi = &[KECCAK256_PREFIX_CHALLENGE_HI]; - - let mut hasher_lo = updated_instance.clone(); - let mut hasher_hi = updated_instance; - - hasher_lo.update(input_lo); - hasher_hi.update(input_hi); - - let output_lo = hasher_lo.finalize(); - let output_hi = hasher_hi.finalize(); - - [output_lo, output_hi] - .concat() - .as_slice() - .try_into() - .unwrap() -} - -impl TranscriptEngineTrait for Keccak256Transcript { - fn new(label: &'static [u8]) -> Self { - let keccak_instance = Keccak256::new(); - let input = [PERSONA_TAG, label].concat(); - let output = compute_updated_state(keccak_instance.clone(), &input); - - Self { - round: 0u16, - state: output, - transcript: keccak_instance, - _p: PhantomData, - } - } - - fn squeeze(&mut self, label: &'static [u8]) -> Result { - // we gather the full input from the round, preceded by the current state of the - // transcript - let input = [ - DOM_SEP_TAG, - self.round.to_le_bytes().as_ref(), - self.state.as_ref(), - label, - ] - .concat(); - let output = compute_updated_state(self.transcript.clone(), &input); - - // update state - self.round = { - self.round - .checked_add(1) - .ok_or(NovaError::InternalTranscriptError)? - }; - self.state.copy_from_slice(&output); - self.transcript = Keccak256::new(); - - // squeeze out a challenge - Ok(E::Scalar::from_uniform(&output)) - } - - fn absorb>(&mut self, label: &'static [u8], o: &T) { - self.transcript.update(label); - self.transcript.update(&o.to_transcript_bytes()); - } - - fn dom_sep(&mut self, bytes: &'static [u8]) { - self.transcript.update(DOM_SEP_TAG); - self.transcript.update(bytes); - } -} - -#[cfg(test)] -mod tests { - use ff::PrimeField; - use rand::Rng; - use sha3::{Digest, Keccak256}; - - use crate::{ - provider::{keccak::Keccak256Transcript, Bn256EngineKZG, GrumpkinEngine}, - traits::{Engine, PrimeFieldExt, TranscriptEngineTrait, TranscriptReprTrait}, - }; - - fn test_keccak_transcript_with( - expected_h1: &'static str, - expected_h2: &'static str, - ) { - let mut transcript: Keccak256Transcript = Keccak256Transcript::new(b"test"); - - // two scalars - let s1 = ::Scalar::from(2u64); - let s2 = ::Scalar::from(5u64); - - // add the scalars to the transcript - transcript.absorb(b"s1", &s1); - transcript.absorb(b"s2", &s2); - - // make a challenge - let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); - assert_eq!(hex::encode(c1.to_repr().as_ref()), expected_h1); - - // a scalar - let s3 = ::Scalar::from(128u64); - - // add the scalar to the transcript - transcript.absorb(b"s3", &s3); - - // make a challenge - let c2: ::Scalar = transcript.squeeze(b"c2").unwrap(); - assert_eq!(hex::encode(c2.to_repr().as_ref()), expected_h2); - } - - #[test] - fn test_keccak_transcript() { - test_keccak_transcript_with::( - "9fb71e3b74bfd0b60d97349849b895595779a240b92a6fae86bd2812692b6b0e", - "bfd4c50b7d6317e9267d5d65c985eb455a3561129c0b3beef79bfc8461a84f18", - ); - } - - #[test] - fn test_keccak_example() { - let mut hasher = Keccak256::new(); - hasher.update(0xffffffff_u32.to_le_bytes()); - let output: [u8; 32] = hasher.finalize().into(); - assert_eq!( - hex::encode(output), - "29045a592007d0c246ef02c2223570da9522d0cf0f73282c79a1bc8f0bb2c238" - ); - } - - use super::{ - DOM_SEP_TAG, KECCAK256_PREFIX_CHALLENGE_HI, KECCAK256_PREFIX_CHALLENGE_LO, - KECCAK256_STATE_SIZE, PERSONA_TAG, - }; - - fn compute_updated_state_for_testing(input: &[u8]) -> [u8; KECCAK256_STATE_SIZE] { - let input_lo = [input, &[KECCAK256_PREFIX_CHALLENGE_LO]].concat(); - let input_hi = [input, &[KECCAK256_PREFIX_CHALLENGE_HI]].concat(); - - let mut hasher_lo = Keccak256::new(); - let mut hasher_hi = Keccak256::new(); - - hasher_lo.update(&input_lo); - hasher_hi.update(&input_hi); - - let output_lo = hasher_lo.finalize(); - let output_hi = hasher_hi.finalize(); - - [output_lo, output_hi] - .concat() - .as_slice() - .try_into() - .unwrap() - } - - fn squeeze_for_testing( - transcript: &[u8], - round: u16, - state: [u8; KECCAK256_STATE_SIZE], - label: &'static [u8], - ) -> [u8; 64] { - let input = [ - transcript, - DOM_SEP_TAG, - round.to_le_bytes().as_ref(), - state.as_ref(), - label, - ] - .concat(); - compute_updated_state_for_testing(&input) - } - - // This test is meant to ensure compatibility between the incremental way of - // computing the transcript above, and the former, which materialized the - // entirety of the input vector before calling Keccak256 on it. - fn test_keccak_transcript_incremental_vs_explicit_with() { - let test_label = b"test"; - let mut transcript: Keccak256Transcript = Keccak256Transcript::new(test_label); - let mut rng = rand::thread_rng(); - - // ten scalars - let scalars = std::iter::from_fn(|| Some(::Scalar::from(rng.gen::()))) - .take(10) - .collect::>(); - - // add the scalars to the transcripts, - let mut manual_transcript: Vec = vec![]; - let labels = [ - b"s1", b"s2", b"s3", b"s4", b"s5", b"s6", b"s7", b"s8", b"s9", b"s0", - ]; - - for i in 0..10 { - transcript.absorb(&labels[i][..], &scalars[i]); - manual_transcript.extend(labels[i]); - manual_transcript.extend(scalars[i].to_transcript_bytes()); - } - - // compute the initial state - let input = [PERSONA_TAG, test_label].concat(); - let initial_state = compute_updated_state_for_testing(&input); - - // make a challenge - let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); - - let c1_bytes = squeeze_for_testing(&manual_transcript[..], 0u16, initial_state, b"c1"); - let to_hex = |g: E::Scalar| hex::encode(g.to_repr().as_ref()); - assert_eq!(to_hex(c1), to_hex(E::Scalar::from_uniform(&c1_bytes))); - } - - #[test] - fn test_keccak_transcript_incremental_vs_explicit() { - // test_keccak_transcript_incremental_vs_explicit_with::(); - // test_keccak_transcript_incremental_vs_explicit_with::(); - test_keccak_transcript_incremental_vs_explicit_with::(); - test_keccak_transcript_incremental_vs_explicit_with::(); - // test_keccak_transcript_incremental_vs_explicit_with::(); - // test_keccak_transcript_incremental_vs_explicit_with::(); - } -} diff --git a/src/provider/kzg_commitment.rs b/src/provider/kzg_commitment.rs deleted file mode 100644 index e94a166..0000000 --- a/src/provider/kzg_commitment.rs +++ /dev/null @@ -1,349 +0,0 @@ -//! Commitment engine for KZG commitments - -use std::{io::Cursor, marker::PhantomData, sync::Arc}; - -use ff::{Field, PrimeField, PrimeFieldBits}; -use group::{prime::PrimeCurveAffine, Curve, Group as _}; -use halo2curves::serde::SerdeObject; -use pairing::Engine; -use rand::rngs::StdRng; -use rand_core::{CryptoRng, RngCore, SeedableRng}; -use serde::{Deserialize, Serialize}; - -use crate::{ - digest::SimpleDigestible, - fast_serde, - fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, - provider::{pedersen::Commitment, traits::DlogGroup, util::fb_msm}, - traits::{ - commitment::{CommitmentEngineTrait, Len}, - Engine as NovaEngine, Group, TranscriptReprTrait, - }, -}; - -/// `UniversalParams` are the universal parameters for the KZG10 scheme. -#[derive(Debug, Clone, Eq, Serialize, Deserialize)] -#[serde(bound( - serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", - deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" -))] -pub struct UniversalKZGParam { - /// Group elements of the form `{ β^i G }`, where `i` ranges from 0 to - /// `degree`. - pub powers_of_g: Vec, - /// Group elements of the form `{ β^i H }`, where `i` ranges from 0 to - /// `degree`. - pub powers_of_h: Vec, -} - -impl PartialEq for UniversalKZGParam { - fn eq(&self, other: &Self) -> bool { - self.powers_of_g == other.powers_of_g && self.powers_of_h == other.powers_of_h - } -} -// for the purpose of the Len trait, we count commitment bases, i.e. G1 elements -impl Len for UniversalKZGParam { - fn length(&self) -> usize { - self.powers_of_g.len() - } -} - -/// `UnivariateProverKey` is used to generate a proof -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -#[serde(bound( - serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", - deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" -))] -pub struct KZGProverKey { - /// generators from the universal parameters - uv_params: Arc>, - /// offset at which we start reading into the SRS - offset: usize, - /// maximum supported size - supported_size: usize, -} - -impl KZGProverKey { - pub(in crate::provider) fn new( - uv_params: Arc>, - offset: usize, - supported_size: usize, - ) -> Self { - assert!( - uv_params.max_degree() >= offset + supported_size, - "not enough bases (req: {} from offset {}) in the UVKZGParams (length: {})", - supported_size, - offset, - uv_params.max_degree() - ); - Self { - uv_params, - offset, - supported_size, - } - } - - pub fn powers_of_g(&self) -> &[E::G1Affine] { - &self.uv_params.powers_of_g[self.offset..self.offset + self.supported_size] - } -} - -/// `UVKZGVerifierKey` is used to check evaluation proofs for a given -/// commitment. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -#[serde(bound(serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize",))] -pub struct KZGVerifierKey { - /// The generator of G1. - pub g: E::G1Affine, - /// The generator of G2. - pub h: E::G2Affine, - /// β times the above generator of G2. - pub beta_h: E::G2Affine, -} - -impl SimpleDigestible for KZGVerifierKey -where - E::G1Affine: Serialize, - E::G2Affine: Serialize, -{ -} - -impl UniversalKZGParam { - /// Returns the maximum supported degree - pub fn max_degree(&self) -> usize { - self.powers_of_g.len() - } - - /// Trim the universal parameters to specialize the public parameters - /// for univariate polynomials to the given `supported_size`, and - /// returns prover key and verifier key. `supported_size` should - /// be in range `1..params.len()` - /// - /// # Panics - /// If `supported_size` is greater than `self.max_degree()`, or - /// `self.max_degree()` is zero. - pub fn trim(ukzg: Arc, supported_size: usize) -> (KZGProverKey, KZGVerifierKey) { - assert!(ukzg.max_degree() > 0, "max_degree is zero"); - let g = ukzg.powers_of_g[0]; - let h = ukzg.powers_of_h[0]; - let beta_h = ukzg.powers_of_h[1]; - let pk = KZGProverKey::new(ukzg, 0, supported_size + 1); - let vk = KZGVerifierKey { g, h, beta_h }; - (pk, vk) - } -} - -impl FastSerde for UniversalKZGParam -where - E::G1Affine: SerdeObject, - E::G2Affine: SerdeObject, -{ - /// Byte format: - /// - /// [0..4] - Magic number (4 bytes) - /// [4] - Serde type: UniversalKZGParam (u8) - /// [5] - Number of sections (u8 = 2) - /// [6] - Section 1 type: powers_of_g (u8) - /// [7..11] - Section 1 size (u32) - /// [11..] - Section 1 data - /// [...+1] - Section 2 type: powers_of_h (u8) - /// [...+5] - Section 2 size (u32) - /// [...end] - Section 2 data - fn to_bytes(&self) -> Vec { - let mut out = Vec::new(); - - out.extend_from_slice(&fast_serde::MAGIC_NUMBER); - out.push(fast_serde::SerdeByteTypes::UniversalKZGParam as u8); - out.push(2); // num_sections - - Self::write_section_bytes( - &mut out, - 1, - &self - .powers_of_g - .iter() - .flat_map(|p| p.to_raw_bytes()) - .collect::>(), - ); - - Self::write_section_bytes( - &mut out, - 2, - &self - .powers_of_h - .iter() - .flat_map(|p| p.to_raw_bytes()) - .collect::>(), - ); - - out - } - - fn from_bytes(bytes: &Vec) -> Result { - let mut cursor = Cursor::new(bytes); - - Self::validate_header(&mut cursor, SerdeByteTypes::UniversalKZGParam, 2)?; - - // Read sections of points - let powers_of_g = Self::read_section_bytes(&mut cursor, 1)? - .chunks(E::G1Affine::identity().to_raw_bytes().len()) - .map(|bytes| E::G1Affine::from_raw_bytes(bytes).ok_or(SerdeByteError::G1DecodeError)) - .collect::, _>>()?; - - let powers_of_h = Self::read_section_bytes(&mut cursor, 2)? - .chunks(E::G2Affine::identity().to_raw_bytes().len()) - .map(|bytes| E::G2Affine::from_raw_bytes(bytes).ok_or(SerdeByteError::G2DecodeError)) - .collect::, _>>()?; - - Ok(Self { - powers_of_g, - powers_of_h, - }) - } -} - -impl UniversalKZGParam -where - E::Fr: PrimeFieldBits, -{ - /// Build SRS for testing. - /// WARNING: THIS FUNCTION IS FOR TESTING PURPOSE ONLY. - /// THE OUTPUT SRS SHOULD NOT BE USED IN PRODUCTION. - pub fn gen_srs_for_testing(mut rng: &mut R, max_degree: usize) -> Self { - let beta = E::Fr::random(&mut rng); - let g = E::G1::random(&mut rng); - let h = E::G2::random(rng); - - let nz_powers_of_beta = (0..=max_degree) - .scan(beta, |acc, _| { - let val = *acc; - *acc *= beta; - Some(val) - }) - .collect::>(); - - let window_size = fb_msm::get_mul_window_size(max_degree); - let scalar_bits = E::Fr::NUM_BITS as usize; - - let (powers_of_g_projective, powers_of_h_projective) = rayon::join( - || { - let g_table = fb_msm::get_window_table(scalar_bits, window_size, g); - fb_msm::multi_scalar_mul::( - scalar_bits, - window_size, - &g_table, - &nz_powers_of_beta, - ) - }, - || { - let h_table = fb_msm::get_window_table(scalar_bits, window_size, h); - fb_msm::multi_scalar_mul::( - scalar_bits, - window_size, - &h_table, - &nz_powers_of_beta, - ) - }, - ); - - let mut powers_of_g = vec![E::G1Affine::identity(); powers_of_g_projective.len()]; - let mut powers_of_h = vec![E::G2Affine::identity(); powers_of_h_projective.len()]; - - rayon::join( - || E::G1::batch_normalize(&powers_of_g_projective, &mut powers_of_g), - || E::G2::batch_normalize(&powers_of_h_projective, &mut powers_of_h), - ); - - Self { - powers_of_g, - powers_of_h, - } - } -} - -/// Commitments -#[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Serialize, Deserialize)] -#[serde(bound( - serialize = "E::G1Affine: Serialize", - deserialize = "E::G1Affine: Deserialize<'de>" -))] -pub struct UVKZGCommitment( - /// the actual commitment is an affine point. - pub E::G1Affine, -); - -impl TranscriptReprTrait for UVKZGCommitment -where - E::G1: DlogGroup, - // Note: due to the move of the bound TranscriptReprTrait on G::Base from Group to Engine - ::Base: TranscriptReprTrait, -{ - fn to_transcript_bytes(&self) -> Vec { - // TODO: avoid the round-trip through the group (to_curve .. to_coordinates) - let (x, y, is_infinity) = self.0.to_curve().to_coordinates(); - let is_infinity_byte = (!is_infinity).into(); - [ - x.to_transcript_bytes(), - y.to_transcript_bytes(), - [is_infinity_byte].to_vec(), - ] - .concat() - } -} - -/// Provides a commitment engine -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct KZGCommitmentEngine { - _p: PhantomData, -} - -impl> CommitmentEngineTrait - for KZGCommitmentEngine -where - E::G1: DlogGroup, - E::G1Affine: Serialize + for<'de> Deserialize<'de>, - E::G2Affine: Serialize + for<'de> Deserialize<'de>, - E::Fr: PrimeFieldBits, // TODO due to use of gen_srs_for_testing, make optional -{ - type CommitmentKey = UniversalKZGParam; - type Commitment = Commitment; - - fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { - // TODO: this is just for testing, replace by grabbing from a real setup for - // production - let mut bytes = [0u8; 32]; - let len = label.len().min(32); - bytes[..len].copy_from_slice(&label[..len]); - let rng = &mut StdRng::from_seed(bytes); - UniversalKZGParam::gen_srs_for_testing(rng, n.next_power_of_two()) - } - - fn commit(ck: &Self::CommitmentKey, v: &[::Scalar]) -> Self::Commitment { - assert!(ck.length() >= v.len()); - Commitment { - comm: E::G1::vartime_multiscalar_mul(v, &ck.powers_of_g[..v.len()]), - } - } -} - -impl> From> - for UVKZGCommitment -where - E::G1: Group, -{ - fn from(c: Commitment) -> Self { - Self(c.comm.to_affine()) - } -} - -impl> From> - for Commitment -where - E::G1: Group, -{ - fn from(c: UVKZGCommitment) -> Self { - Self { - comm: c.0.to_curve(), - } - } -} diff --git a/src/provider/mod.rs b/src/provider/mod.rs deleted file mode 100644 index 23ae8d9..0000000 --- a/src/provider/mod.rs +++ /dev/null @@ -1,176 +0,0 @@ -//! This module implements Nova's traits using the following several different -//! combinations - -// public modules to be used as an evaluation engine with Spartan -pub mod hyperkzg; -pub mod ipa_pc; - -// crate-public modules, made crate-public mostly for tests -pub(crate) mod bn256_grumpkin; -mod pedersen; -pub(crate) mod poseidon; -pub(crate) mod traits; -// a non-hiding variant of kzg -mod kzg_commitment; -pub(crate) mod util; - -// crate-private modules -mod keccak; -mod tests; - -use halo2curves::bn256::Bn256; - -use self::kzg_commitment::KZGCommitmentEngine; -use crate::{ - provider::{ - bn256_grumpkin::{bn256, grumpkin}, - keccak::Keccak256Transcript, - pedersen::CommitmentEngine as PedersenCommitmentEngine, - poseidon::{PoseidonRO, PoseidonROCircuit}, - }, - traits::{CurveCycleEquipped, Engine}, -}; - -/// An implementation of the Nova `Engine` trait with Grumpkin curve and -/// Pedersen commitment scheme -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct GrumpkinEngine; - -/// An implementation of the Nova `Engine` trait with BN254 curve and Pedersen -/// commitment scheme -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct Bn256EngineIPA; - -impl Engine for Bn256EngineIPA { - type Base = bn256::Base; - type Scalar = bn256::Scalar; - type GE = bn256::Point; - type RO = PoseidonRO; - type ROCircuit = PoseidonROCircuit; - type TE = Keccak256Transcript; - type CE = PedersenCommitmentEngine; -} - -impl Engine for GrumpkinEngine { - type Base = grumpkin::Base; - type Scalar = grumpkin::Scalar; - type GE = grumpkin::Point; - type RO = PoseidonRO; - type ROCircuit = PoseidonROCircuit; - type TE = Keccak256Transcript; - type CE = PedersenCommitmentEngine; -} - -/// An implementation of the Nova `Engine` trait with BN254 curve and Zeromorph -/// commitment scheme -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct Bn256EngineZM; - -impl Engine for Bn256EngineZM { - type Base = bn256::Base; - type Scalar = bn256::Scalar; - type GE = bn256::Point; - type RO = PoseidonRO; - type ROCircuit = PoseidonROCircuit; - type TE = Keccak256Transcript; - type CE = KZGCommitmentEngine; -} -/// An implementation of Nova traits with HyperKZG over the BN256 curve -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct Bn256EngineKZG; - -impl Engine for Bn256EngineKZG { - type Base = bn256::Base; - type Scalar = bn256::Scalar; - type GE = bn256::Point; - type RO = PoseidonRO; - type ROCircuit = PoseidonROCircuit; - type TE = Keccak256Transcript; - type CE = KZGCommitmentEngine; -} - -impl CurveCycleEquipped for Bn256EngineIPA { - type Secondary = GrumpkinEngine; -} - -impl CurveCycleEquipped for Bn256EngineKZG { - type Secondary = GrumpkinEngine; -} - -impl CurveCycleEquipped for Bn256EngineZM { - type Secondary = GrumpkinEngine; -} - -#[cfg(test)] -mod test { - use std::io::Read; - - use digest::{ExtendableOutput, Update}; - use group::{ff::Field, Curve, Group}; - use halo2curves::{CurveAffine, CurveExt}; - use itertools::Itertools as _; - use rand_core::OsRng; - use sha3::Shake256; - - use crate::provider::{ - bn256_grumpkin::{bn256, grumpkin}, - traits::DlogGroup, - util::msm::cpu_best_msm, - }; - - macro_rules! impl_cycle_pair_test { - ($curve:ident) => { - fn from_label_serial(label: &'static [u8], n: usize) -> Vec<$curve::Affine> { - let mut shake = Shake256::default(); - shake.update(label); - let mut reader = shake.finalize_xof(); - (0..n) - .map(|_| { - let mut uniform_bytes = [0u8; 32]; - reader.read_exact(&mut uniform_bytes).unwrap(); - let hash = $curve::Point::hash_to_curve("from_uniform_bytes"); - hash(&uniform_bytes).to_affine() - }) - .collect() - } - - let label = b"test_from_label"; - for n in [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1021, - ] { - let ck_par = <$curve::Point as DlogGroup>::from_label(label, n); - let ck_ser = from_label_serial(label, n); - assert_eq!(ck_par.len(), n); - assert_eq!(ck_ser.len(), n); - assert_eq!(ck_par, ck_ser); - } - }; - } - - fn test_msm_with>() { - let n = 8; - let coeffs = (0..n).map(|_| F::random(OsRng)).collect::>(); - let bases = (0..n) - .map(|_| A::from(A::generator() * F::random(OsRng))) - .collect::>(); - let naive = coeffs - .iter() - .zip_eq(bases.iter()) - .fold(A::CurveExt::identity(), |acc, (coeff, base)| { - acc + *base * coeff - }); - - assert_eq!(naive, cpu_best_msm(&bases, &coeffs)) - } - - #[test] - fn test_msm() { - test_msm_with::(); - test_msm_with::(); - } - - #[test] - fn test_bn256_from_label() { - impl_cycle_pair_test!(bn256); - } -} diff --git a/src/provider/pedersen.rs b/src/provider/pedersen.rs deleted file mode 100644 index dd16831..0000000 --- a/src/provider/pedersen.rs +++ /dev/null @@ -1,369 +0,0 @@ -//! This module provides an implementation of a commitment engine -use core::{ - fmt::Debug, - marker::PhantomData, - ops::{Add, Mul, MulAssign}, -}; -use std::io::Cursor; - -use ff::Field; -use group::{ - prime::{PrimeCurve, PrimeCurveAffine}, - Curve, Group, GroupEncoding, -}; -use halo2curves::serde::SerdeObject; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - fast_serde, - fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, - provider::traits::DlogGroup, - traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, - AbsorbInROTrait, Engine, ROTrait, TranscriptReprTrait, - }, - zip_with, -}; - -/// A type that holds commitment generators -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct CommitmentKey -where - E: Engine, - E::GE: DlogGroup, -{ - pub ck: Vec<::Affine>, -} - -impl Len for CommitmentKey -where - E: Engine, - E::GE: DlogGroup, -{ - fn length(&self) -> usize { - self.ck.len() - } -} - -impl FastSerde for CommitmentKey -where - ::Affine: SerdeObject, - E::GE: DlogGroup, -{ - /// Byte format: - /// - /// [0..4] - Magic number (4 bytes) - /// [4] - Serde type: CommitmentKey (u8) - /// [5] - Number of sections (u8 = 1) - /// [6] - Section 1 type: ck (u8) - /// [7..11] - Section 1 size (u32) - /// [11..] - Section 1 data - fn to_bytes(&self) -> Vec { - let mut out = Vec::new(); - - out.extend_from_slice(&fast_serde::MAGIC_NUMBER); - out.push(fast_serde::SerdeByteTypes::CommitmentKey as u8); - out.push(1); // num_sections - - Self::write_section_bytes( - &mut out, - 1, - &self - .ck - .iter() - .flat_map(|p| p.to_raw_bytes()) - .collect::>(), - ); - - out - } - - fn from_bytes(bytes: &Vec) -> Result { - let mut cursor = Cursor::new(bytes); - - // Validate header - Self::validate_header(&mut cursor, SerdeByteTypes::CommitmentKey, 1)?; - - // Read ck section - let ck = Self::read_section_bytes(&mut cursor, 1)? - .chunks( - ::Affine::identity() - .to_raw_bytes() - .len(), - ) - .map(|bytes| { - ::Affine::from_raw_bytes(bytes) - .ok_or(SerdeByteError::G1DecodeError) - }) - .collect::, _>>()?; - - Ok(Self { ck }) - } -} - -/// A type that holds a commitment -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct Commitment { - pub(crate) comm: E::GE, -} - -/// A type that holds a compressed commitment -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CompressedCommitment -where - E: Engine, - E::GE: DlogGroup, -{ - pub(crate) comm: ::Compressed, -} - -impl CommitmentTrait for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - type CompressedCommitment = CompressedCommitment; - - fn compress(&self) -> Self::CompressedCommitment { - CompressedCommitment { - comm: ::to_bytes(&self.comm).into(), - } - } - - fn to_coordinates(&self) -> (E::Base, E::Base, bool) { - self.comm.to_coordinates() - } - - fn decompress(c: &Self::CompressedCommitment) -> Result { - let opt_comm = <::GE as GroupEncoding>::from_bytes(&c.comm.clone().into()); - let Some(comm) = Option::from(opt_comm) else { - return Err(NovaError::DecompressionError); - }; - Ok(Self { comm }) - } -} - -impl Default for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - fn default() -> Self { - Self { - comm: E::GE::identity(), - } - } -} - -impl TranscriptReprTrait for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - fn to_transcript_bytes(&self) -> Vec { - let (x, y, is_infinity) = self.comm.to_coordinates(); - let is_infinity_byte = (!is_infinity).into(); - [ - x.to_transcript_bytes(), - y.to_transcript_bytes(), - [is_infinity_byte].to_vec(), - ] - .concat() - } -} - -impl AbsorbInROTrait for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - fn absorb_in_ro(&self, ro: &mut E::RO) { - let (x, y, is_infinity) = self.comm.to_coordinates(); - ro.absorb(x); - ro.absorb(y); - ro.absorb(if is_infinity { - E::Base::ONE - } else { - E::Base::ZERO - }); - } -} - -impl TranscriptReprTrait for CompressedCommitment -where - E: Engine, - E::GE: DlogGroup, -{ - fn to_transcript_bytes(&self) -> Vec { - self.comm.to_transcript_bytes() - } -} - -impl MulAssign for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - fn mul_assign(&mut self, scalar: E::Scalar) { - *self = Self { - comm: self.comm * scalar, - }; - } -} - -impl<'a, 'b, E> Mul<&'b E::Scalar> for &'a Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - type Output = Commitment; - fn mul(self, scalar: &'b E::Scalar) -> Commitment { - Commitment { - comm: self.comm * scalar, - } - } -} - -impl Mul for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - type Output = Self; - - fn mul(self, scalar: E::Scalar) -> Self { - Self { - comm: self.comm * scalar, - } - } -} - -impl Add for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - type Output = Self; - - fn add(self, other: Self) -> Self { - Self { - comm: self.comm + other.comm, - } - } -} - -/// Provides a commitment engine -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct CommitmentEngine { - _p: PhantomData, -} - -impl CommitmentEngineTrait for CommitmentEngine -where - E: Engine, - E::GE: DlogGroup, -{ - type CommitmentKey = CommitmentKey; - type Commitment = Commitment; - - fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { - Self::CommitmentKey { - ck: E::GE::from_label(label, n.next_power_of_two()), - } - } - - fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment { - assert!(ck.ck.len() >= v.len()); - Commitment { - comm: E::GE::vartime_multiscalar_mul(v, &ck.ck[..v.len()]), - } - } -} - -/// A trait listing properties of a commitment key that can be managed in a -/// divide-and-conquer fashion -pub trait CommitmentKeyExtTrait -where - E: Engine, - E::GE: DlogGroup, -{ - /// Splits the commitment key into two pieces at a specified point - fn split_at(self, n: usize) -> (Self, Self) - where - Self: Sized; - - /// Combines two commitment keys into one - fn combine(&self, other: &Self) -> Self; - - /// Folds the two commitment keys into one using the provided weights - fn fold(L: &Self, R: &Self, w1: &E::Scalar, w2: &E::Scalar) -> Self; - - /// Scales the commitment key using the provided scalar - fn scale(&mut self, r: &E::Scalar); - - /// Reinterprets commitments as commitment keys - fn reinterpret_commitments_as_ck( - c: &[<<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait< - E, - >>::CompressedCommitment], - ) -> Result - where - Self: Sized; -} - -impl CommitmentKeyExtTrait for CommitmentKey -where - E: Engine>, - E::GE: DlogGroup, -{ - fn split_at(mut self, n: usize) -> (Self, Self) { - let right = self.ck.split_off(n); - (self, Self { ck: right }) - } - - fn combine(&self, other: &Self) -> Self { - let ck = { - self.ck - .iter() - .cloned() - .chain(other.ck.iter().cloned()) - .collect::>() - }; - Self { ck } - } - - // combines the left and right halves of `self` using `w1` and `w2` as the - // weights - fn fold(L: &Self, R: &Self, w1: &E::Scalar, w2: &E::Scalar) -> Self { - debug_assert!(L.ck.len() == R.ck.len()); - let ck_curve: Vec = zip_with!(par_iter, (L.ck, R.ck), |l, r| { - E::GE::vartime_multiscalar_mul(&[*w1, *w2], &[*l, *r]) - }) - .collect(); - let mut ck_affine = vec![::Affine::identity(); L.ck.len()]; - E::GE::batch_normalize(&ck_curve, &mut ck_affine); - - Self { ck: ck_affine } - } - - /// Scales each element in `self` by `r` - fn scale(&mut self, r: &E::Scalar) { - let ck_scaled: Vec = self.ck.par_iter().map(|g| *g * r).collect(); - E::GE::batch_normalize(&ck_scaled, &mut self.ck); - } - - /// reinterprets a vector of commitments as a set of generators - fn reinterpret_commitments_as_ck(c: &[CompressedCommitment]) -> Result { - let d = c - .par_iter() - .map(|c| Commitment::::decompress(c).map(|c| c.comm)) - .collect::, NovaError>>()?; - let mut ck = vec![::Affine::identity(); d.len()]; - E::GE::batch_normalize(&d, &mut ck); - Ok(Self { ck }) - } -} diff --git a/src/provider/poseidon.rs b/src/provider/poseidon.rs deleted file mode 100644 index 78956d1..0000000 --- a/src/provider/poseidon.rs +++ /dev/null @@ -1,244 +0,0 @@ -//! Poseidon Constants and Poseidon-based RO used in Nova -use core::marker::PhantomData; - -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, SynthesisError, -}; -use ff::{PrimeField, PrimeFieldBits}; -use generic_array::typenum::U24; -use neptune::{ - circuit2::Elt, - poseidon::PoseidonConstants, - sponge::{ - api::{IOPattern, SpongeAPI, SpongeOp}, - circuit::SpongeCircuit, - vanilla::{Mode::Simplex, Sponge, SpongeTrait}, - }, - Strength, -}; -use serde::{Deserialize, Serialize}; - -use crate::traits::{ROCircuitTrait, ROTrait}; - -/// All Poseidon Constants that are used in Nova -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct PoseidonConstantsCircuit(PoseidonConstants); - -impl Default for PoseidonConstantsCircuit { - /// Generate Poseidon constants - fn default() -> Self { - Self(Sponge::::api_constants(Strength::Standard)) - } -} - -/// A Poseidon-based RO to use outside circuits -#[derive(Debug)] -pub struct PoseidonRO -where - Base: PrimeField, - Scalar: PrimeField, -{ - state: Vec, - constants: PoseidonConstantsCircuit, - num_absorbs: usize, - squeezed: bool, - _p: PhantomData, -} - -impl ROTrait for PoseidonRO -where - Base: PrimeField + PrimeFieldBits + Serialize + for<'de> Deserialize<'de>, - Scalar: PrimeField, -{ - type CircuitRO = PoseidonROCircuit; - type Constants = PoseidonConstantsCircuit; - - fn new(constants: PoseidonConstantsCircuit, num_absorbs: usize) -> Self { - Self { - state: Vec::new(), - constants, - num_absorbs, - squeezed: false, - _p: PhantomData, - } - } - - /// Absorb a new number into the state of the oracle - fn absorb(&mut self, e: Base) { - assert!(!self.squeezed, "Cannot absorb after squeezing"); - self.state.push(e); - } - - /// Compute a challenge by hashing the current state - fn squeeze(&mut self, num_bits: usize) -> Scalar { - // check if we have squeezed already - assert!(!self.squeezed, "Cannot squeeze again after squeezing"); - self.squeezed = true; - - let mut sponge = Sponge::new_with_constants(&self.constants.0, Simplex); - let acc = &mut (); - let parameter = IOPattern(vec![ - SpongeOp::Absorb(self.num_absorbs as u32), - SpongeOp::Squeeze(1u32), - ]); - - sponge.start(parameter, None, acc); - assert_eq!(self.num_absorbs, self.state.len()); - SpongeAPI::absorb(&mut sponge, self.num_absorbs as u32, &self.state, acc); - let hash = SpongeAPI::squeeze(&mut sponge, 1, acc); - sponge.finish(acc).unwrap(); - - // Only return `num_bits` - let bits = hash[0].to_le_bits(); - let mut res = Scalar::ZERO; - let mut coeff = Scalar::ONE; - for bit in bits[..num_bits].into_iter() { - if *bit { - res += coeff; - } - coeff += coeff; - } - res - } -} - -/// A Poseidon-based RO gadget to use inside the verifier circuit. -#[derive(Debug)] -pub struct PoseidonROCircuit { - // Internal state - state: Vec>, - constants: PoseidonConstantsCircuit, - num_absorbs: usize, - squeezed: bool, -} - -impl ROCircuitTrait for PoseidonROCircuit -where - Scalar: PrimeField + PrimeFieldBits + Serialize + for<'de> Deserialize<'de>, -{ - type NativeRO = PoseidonRO; - type Constants = PoseidonConstantsCircuit; - - /// Initialize the internal state and set the poseidon constants - fn new(constants: PoseidonConstantsCircuit, num_absorbs: usize) -> Self { - Self { - state: Vec::new(), - constants, - num_absorbs, - squeezed: false, - } - } - - /// Absorb a new number into the state of the oracle - fn absorb(&mut self, e: &AllocatedNum) { - assert!(!self.squeezed, "Cannot absorb after squeezing"); - self.state.push(e.clone()); - } - - /// Compute a challenge by hashing the current state - fn squeeze>( - &mut self, - mut cs: CS, - num_bits: usize, - ) -> Result, SynthesisError> { - // check if we have squeezed already - assert!(!self.squeezed, "Cannot squeeze again after squeezing"); - self.squeezed = true; - let parameter = IOPattern(vec![ - SpongeOp::Absorb(self.num_absorbs as u32), - SpongeOp::Squeeze(1u32), - ]); - let mut ns = cs.namespace(|| "ns"); - - let hash = { - let mut sponge = SpongeCircuit::new_with_constants(&self.constants.0, Simplex); - let acc = &mut ns; - assert_eq!(self.num_absorbs, self.state.len()); - - sponge.start(parameter, None, acc); - SpongeAPI::absorb( - &mut sponge, - self.num_absorbs as u32, - &(0..self.state.len()) - .map(|i| Elt::Allocated(self.state[i].clone())) - .collect::>>(), - acc, - ); - - let output = SpongeAPI::squeeze(&mut sponge, 1, acc); - sponge.finish(acc).unwrap(); - output - }; - - let hash = Elt::ensure_allocated(&hash[0], &mut ns.namespace(|| "ensure allocated"), true)?; - - // return the hash as a vector of bits, truncated - Ok(hash - .to_bits_le_strict(ns.namespace(|| "poseidon hash to boolean"))? - .iter() - .map(|boolean| match boolean { - Boolean::Is(ref x) => x.clone(), - _ => panic!("Wrong type of input. We should have never reached there"), - }) - .collect::>()[..num_bits] - .into()) - } -} - -#[cfg(test)] -mod tests { - use ff::Field; - use rand::rngs::OsRng; - - use super::*; - use crate::{ - bellpepper::solver::SatisfyingAssignment, - constants::NUM_CHALLENGE_BITS, - gadgets::le_bits_to_num, - provider::{Bn256EngineKZG, GrumpkinEngine}, - traits::Engine, - }; - - fn test_poseidon_ro_with() - where - // we can print the field elements we get from E's Base & Scalar fields, - // and compare their byte representations - <::Base as PrimeField>::Repr: std::fmt::Debug, - <::Scalar as PrimeField>::Repr: std::fmt::Debug, - <::Base as PrimeField>::Repr: - PartialEq<<::Scalar as PrimeField>::Repr>, - { - // Check that the number computed inside the circuit is equal to the number - // computed outside the circuit - let mut csprng: OsRng = OsRng; - let constants = PoseidonConstantsCircuit::::default(); - let num_absorbs = 32; - let mut ro: PoseidonRO = - PoseidonRO::new(constants.clone(), num_absorbs); - let mut ro_gadget: PoseidonROCircuit = - PoseidonROCircuit::new(constants, num_absorbs); - let mut cs = SatisfyingAssignment::::new(); - for i in 0..num_absorbs { - let num = E::Scalar::random(&mut csprng); - ro.absorb(num); - let num_gadget = - AllocatedNum::alloc_infallible(cs.namespace(|| format!("data {i}")), || num); - num_gadget - .inputize(&mut cs.namespace(|| format!("input {i}"))) - .unwrap(); - ro_gadget.absorb(&num_gadget); - } - let num = ro.squeeze(NUM_CHALLENGE_BITS); - let num2_bits = ro_gadget.squeeze(&mut cs, NUM_CHALLENGE_BITS).unwrap(); - let num2 = le_bits_to_num(&mut cs, &num2_bits).unwrap(); - assert_eq!(num.to_repr(), num2.get_value().unwrap().to_repr()); - } - - #[test] - fn test_poseidon_ro() { - test_poseidon_ro_with::(); - test_poseidon_ro_with::(); - } -} diff --git a/src/provider/tests/ipa_pc.rs b/src/provider/tests/ipa_pc.rs deleted file mode 100644 index 3007176..0000000 --- a/src/provider/tests/ipa_pc.rs +++ /dev/null @@ -1,130 +0,0 @@ -#[cfg(test)] -mod test { - use group::Curve; - use handlebars::Handlebars; - use serde_json::{json, Map, Value}; - - use crate::provider::{ - ipa_pc::EvaluationEngine, - pedersen::{CommitmentKey, CommitmentKeyExtTrait}, - tests::solidity_compatibility_utils::{ - compressed_commitment_to_json, ec_points_to_json, field_elements_to_json, - generate_pcs_solidity_unit_test_data, - }, - GrumpkinEngine, - }; - - static IPA_COMPATIBILITY_UNIT_TESTING_TEMPLATE: &str = " -// SPDX-License-Identifier: Apache-2.0 -pragma solidity ^0.8.16; -import \"@std/Test.sol\"; -import \"src/blocks/grumpkin/Grumpkin.sol\"; -import \"src/blocks/EqPolynomial.sol\"; -import \"src/Utilities.sol\"; -import \"src/blocks/IpaPcs.sol\"; - -contract IpaTest is Test { -function composeIpaInput() public pure returns (InnerProductArgument.IpaInputGrumpkin memory) { -Grumpkin.GrumpkinAffinePoint[] memory ck_v = new Grumpkin.GrumpkinAffinePoint[]({{ len ck_v }}); -{{ #each ck_v }} ck_v[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} - -Grumpkin.GrumpkinAffinePoint[] memory ck_s = new Grumpkin.GrumpkinAffinePoint[]({{ len ck_s }}); -{{ #each ck_s }} ck_s[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} - -uint256[] memory point = new uint256[]({{ len point }}); -{{ #each point }} point[{{ i }}]={{ val }};\n {{ /each }} - -uint256[] memory L_vec = new uint256[]({{ len L_vec }}); -{{ #each L_vec }} L_vec[{{ i }}]={{ compressed }};\n {{ /each }} - -uint256[] memory R_vec = new uint256[]({{ len R_vec }}); -{{ #each R_vec }} R_vec[{{ i }}]={{ compressed }};\n {{ /each }} - -uint256 a_hat = {{ a_hat }}; - -// InnerProductInstance -Grumpkin.GrumpkinAffinePoint memory commitment = Grumpkin.GrumpkinAffinePoint({{ commitment_x }}, {{ commitment_y }}); - -uint256 eval = {{ eval }}; - -return InnerProductArgument.IpaInputGrumpkin(ck_v, ck_s, point, L_vec, R_vec, commitment, eval, a_hat); -} - -function testIpaGrumpkinVerification_{{ num_vars }}_Variables() public { -InnerProductArgument.IpaInputGrumpkin memory input = composeIpaInput(); -assertTrue(InnerProductArgument.verifyGrumpkin(input, getTranscript())); -} - -function getTranscript() public pure returns (KeccakTranscriptLib.KeccakTranscript memory) { -// b\"TestEval\" in Rust -uint8[] memory label = new uint8[](8); -label[0] = 0x54; -label[1] = 0x65; -label[2] = 0x73; -label[3] = 0x74; -label[4] = 0x45; -label[5] = 0x76; -label[6] = 0x61; -label[7] = 0x6c; - -KeccakTranscriptLib.KeccakTranscript memory keccak_transcript = KeccakTranscriptLib.instantiate(label); -return keccak_transcript; -} -} -"; - - // To generate Solidity unit-test: - // cargo test test_solidity_compatibility_ipa --release -- --ignored --nocapture - // > ipa.t.sol - #[test] - #[ignore] - fn test_solidity_compatibility_ipa() { - let num_vars = 2; - - // Secondary part of verification is IPA over Grumpkin - let (commitment, point, eval, proof, vk) = - generate_pcs_solidity_unit_test_data::<_, EvaluationEngine>(num_vars); - - let num_vars_string = format!("{}", num_vars); - let eval_string = format!("{:?}", eval); - let commitment_x_string = format!("{:?}", commitment.comm.to_affine().x); - let commitment_y_string = format!("{:?}", commitment.comm.to_affine().y); - let proof_a_hat_string = format!("{:?}", proof.a_hat); - - let r_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.R_vec) - .expect("can't reinterpred R_vec"); - let l_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.L_vec) - .expect("can't reinterpred L_vec"); - - let r_vec_array = compressed_commitment_to_json::(&r_vec.ck); - let l_vec_array = compressed_commitment_to_json::(&l_vec.ck); - let point_array = field_elements_to_json::(&point); - let ckv_array = ec_points_to_json::(&vk.ck_v.ck); - let cks_array = ec_points_to_json::(&vk.ck_s.ck); - - let mut map = Map::new(); - map.insert("num_vars".to_string(), Value::String(num_vars_string)); - map.insert("eval".to_string(), Value::String(eval_string)); - map.insert( - "commitment_x".to_string(), - Value::String(commitment_x_string), - ); - map.insert( - "commitment_y".to_string(), - Value::String(commitment_y_string), - ); - map.insert("R_vec".to_string(), Value::Array(r_vec_array)); - map.insert("L_vec".to_string(), Value::Array(l_vec_array)); - map.insert("a_hat".to_string(), Value::String(proof_a_hat_string)); - map.insert("point".to_string(), Value::Array(point_array)); - map.insert("ck_v".to_string(), Value::Array(ckv_array)); - map.insert("ck_s".to_string(), Value::Array(cks_array)); - - let mut reg = Handlebars::new(); - reg.register_template_string("ipa.t.sol", IPA_COMPATIBILITY_UNIT_TESTING_TEMPLATE) - .expect("can't register template"); - - let solidity_unit_test_source = reg.render("ipa.t.sol", &json!(map)).expect("can't render"); - println!("{}", solidity_unit_test_source); - } -} diff --git a/src/provider/tests/mod.rs b/src/provider/tests/mod.rs deleted file mode 100644 index 7184cdd..0000000 --- a/src/provider/tests/mod.rs +++ /dev/null @@ -1,155 +0,0 @@ -mod ipa_pc; - -#[cfg(test)] -pub mod solidity_compatibility_utils { - use std::sync::Arc; - - use group::{ - prime::{PrimeCurve, PrimeCurveAffine}, - GroupEncoding, - }; - use rand::rngs::StdRng; - use serde_json::{Map, Value}; - - use crate::{ - provider::traits::DlogGroup, - spartan::polys::multilinear::MultilinearPolynomial, - traits::{commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine}, - }; - - pub(crate) fn generate_pcs_solidity_unit_test_data>( - num_vars: usize, - ) -> ( - >::Commitment, - Vec, - E::Scalar, - EE::EvaluationArgument, - EE::VerifierKey, - ) { - use rand_core::SeedableRng; - - let mut rng = StdRng::seed_from_u64(num_vars as u64); - - let (poly, point, eval) = crate::provider::util::test_utils::random_poly_with_eval::< - E, - StdRng, - >(num_vars, &mut rng); - - // Mock commitment key. - let ck = E::CE::setup(b"test", 1 << num_vars); - let ck_arc = Arc::new(ck.clone()); - // Commits to the provided vector using the provided generators. - let commitment = E::CE::commit(&ck_arc, poly.evaluations()); - - let (proof, vk) = prove_verify_solidity::(ck_arc, &commitment, &poly, &point, &eval); - - (commitment, point, eval, proof, vk) - } - - fn prove_verify_solidity>( - ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, - commitment: &<::CE as CommitmentEngineTrait>::Commitment, - poly: &MultilinearPolynomial<::Scalar>, - point: &[::Scalar], - eval: &::Scalar, - ) -> (EE::EvaluationArgument, EE::VerifierKey) { - use crate::traits::TranscriptEngineTrait; - - // Generate Prover and verifier key for given commitment key. - let ock = ck.clone(); - let (prover_key, verifier_key) = EE::setup(ck); - - // Generate proof. - let mut prover_transcript = E::TE::new(b"TestEval"); - let proof: EE::EvaluationArgument = EE::prove( - &*ock, - &prover_key, - &mut prover_transcript, - commitment, - poly.evaluations(), - point, - eval, - ) - .unwrap(); - let pcp = prover_transcript.squeeze(b"c").unwrap(); - - // Verify proof. - let mut verifier_transcript = E::TE::new(b"TestEval"); - EE::verify( - &verifier_key, - &mut verifier_transcript, - commitment, - point, - eval, - &proof, - ) - .unwrap(); - let pcv = verifier_transcript.squeeze(b"c").unwrap(); - - // Check if the prover transcript and verifier transcript are kept in the same - // state. - assert_eq!(pcp, pcv); - - (proof, verifier_key) - } - - pub(crate) fn field_elements_to_json(field_elements: &[E::Scalar]) -> Vec { - let mut value_vector = vec![]; - field_elements.iter().enumerate().for_each(|(i, fe)| { - let mut value = Map::new(); - value.insert("i".to_string(), Value::String(i.to_string())); - value.insert("val".to_string(), Value::String(format!("{:?}", fe))); - value_vector.push(Value::Object(value)); - }); - value_vector - } - - pub(crate) fn ec_points_to_json(ec_points: &[::Affine]) -> Vec - where - E: Engine, - E::GE: DlogGroup, - { - let mut value_vector = vec![]; - ec_points.iter().enumerate().for_each(|(i, ec_point)| { - let mut value = Map::new(); - let coordinates_info = ec_point.to_curve().to_coordinates(); - let not_infinity = !coordinates_info.2; - assert!(not_infinity); - value.insert("i".to_string(), Value::String(i.to_string())); - value.insert( - "x".to_string(), - Value::String(format!("{:?}", coordinates_info.0)), - ); - value.insert( - "y".to_string(), - Value::String(format!("{:?}", coordinates_info.1)), - ); - value_vector.push(Value::Object(value)); - }); - value_vector - } - - pub(crate) fn compressed_commitment_to_json( - ec_points: &[::Affine], - ) -> Vec - where - E: Engine, - E::GE: DlogGroup, - { - let mut value_vector = vec![]; - ec_points.iter().enumerate().for_each(|(i, ec_point)| { - let mut value = Map::new(); - let compressed_commitment_info = ec_point.to_curve().to_bytes(); - let mut data = compressed_commitment_info.as_ref().to_vec(); - data.reverse(); - - value.insert("i".to_string(), Value::String(i.to_string())); - value.insert( - "compressed".to_string(), - Value::String(format!("0x{}", hex::encode(data))), - ); - value_vector.push(Value::Object(value)); - }); - value_vector - } -} diff --git a/src/provider/traits.rs b/src/provider/traits.rs deleted file mode 100644 index 2a978f5..0000000 --- a/src/provider/traits.rs +++ /dev/null @@ -1,208 +0,0 @@ -use std::{fmt::Debug, ops::Mul}; - -use group::{ - prime::{PrimeCurve, PrimeCurveAffine}, - GroupEncoding, -}; -use serde::{Deserialize, Serialize}; - -use crate::traits::{Group, TranscriptReprTrait}; - -/// A trait that defines extensions to the Group trait -pub trait DlogGroup: - Group::ScalarExt> - + Serialize - + for<'de> Deserialize<'de> - + PrimeCurve::ScalarExt, Affine = ::AffineExt> -{ - type ScalarExt; - type AffineExt: Clone - + Debug - + Eq - + Serialize - + for<'de> Deserialize<'de> - + Sync - + Send - // technical bounds, should disappear when associated_type_bounds stabilizes - + Mul - + PrimeCurveAffine; - type Compressed: Clone - + Debug - + Eq - + From<::Repr> - + Into<::Repr> - + Serialize - + for<'de> Deserialize<'de> - + Sync - + Send - + TranscriptReprTrait; - - /// A method to compute a multiexponentation - fn vartime_multiscalar_mul(scalars: &[Self::ScalarExt], bases: &[Self::AffineExt]) -> Self; - - /// Produce a vector of group elements using a static label - fn from_label(label: &'static [u8], n: usize) -> Vec; - - /// Returns the affine coordinates (x, y, infinity) for the point - fn to_coordinates(&self) -> (::Base, ::Base, bool); -} - -/// This implementation behaves in ways specific to the halo2curves suite of -/// curves in: -// - to_coordinates, -// - vartime_multiscalar_mul, where it does not call into accelerated implementations. -// A specific reimplementation exists for the pasta curves in their own module. -#[macro_export] -macro_rules! impl_traits { - ( - $name:ident, - $order_str:literal, - $base_str:literal - ) => { - $crate::impl_traits!($name, $order_str, $base_str, cpu_best_msm); - }; - ( - $name:ident, - $order_str:literal, - $base_str:literal, - $large_msm_method: ident - ) => { - // These compile-time assertions check important assumptions in the memory - // representation of group data that supports the use of Abomonation. - static_assertions::assert_eq_size!($name::Affine, [u64; 8]); - static_assertions::assert_eq_size!($name::Point, [u64; 12]); - - impl Group for $name::Point { - type Base = $name::Base; - type Scalar = $name::Scalar; - - fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt) { - let A = $name::Point::a(); - let B = $name::Point::b(); - let order = BigInt::from_str_radix($order_str, 16).unwrap(); - let base = BigInt::from_str_radix($base_str, 16).unwrap(); - - (A, B, order, base) - } - } - - impl DlogGroup for $name::Point { - type ScalarExt = $name::Scalar; - type AffineExt = $name::Affine; - // note: for halo2curves implementations, $name::Compressed == <$name::Point as - // GroupEncoding>::Repr so the blanket impl From for T and impl - // Into apply. - type Compressed = $name::Compressed; - - fn vartime_multiscalar_mul( - scalars: &[Self::ScalarExt], - bases: &[Self::AffineExt], - ) -> Self { - #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] - if scalars.len() >= 128 { - $large_msm_method(bases, scalars) - } else { - cpu_best_msm(bases, scalars) - } - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] - cpu_best_msm(bases, scalars) - } - - fn from_label(label: &'static [u8], n: usize) -> Vec { - let mut shake = Shake256::default(); - shake.update(label); - let mut reader = shake.finalize_xof(); - let mut uniform_bytes_vec = Vec::new(); - for _ in 0..n { - let mut uniform_bytes = [0u8; 32]; - reader.read_exact(&mut uniform_bytes).unwrap(); - uniform_bytes_vec.push(uniform_bytes); - } - let gens_proj: Vec<$name::Point> = (0..n) - .into_par_iter() - .map(|i| { - let hash = $name::Point::hash_to_curve("from_uniform_bytes"); - hash(&uniform_bytes_vec[i]) - }) - .collect(); - - let num_threads = rayon::current_num_threads(); - if gens_proj.len() > num_threads { - let chunk = (gens_proj.len() as f64 / num_threads as f64).ceil() as usize; - (0..num_threads) - .into_par_iter() - .flat_map(|i| { - let start = i * chunk; - let end = if i == num_threads - 1 { - gens_proj.len() - } else { - core::cmp::min((i + 1) * chunk, gens_proj.len()) - }; - if end > start { - let mut gens = vec![$name::Affine::identity(); end - start]; - ::batch_normalize(&gens_proj[start..end], &mut gens); - gens - } else { - vec![] - } - }) - .collect() - } else { - let mut gens = vec![$name::Affine::identity(); n]; - ::batch_normalize(&gens_proj, &mut gens); - gens - } - } - - fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { - let coordinates = self.to_affine().coordinates(); - if coordinates.is_some().unwrap_u8() == 1 && ($name::Point::identity() != *self) { - (*coordinates.unwrap().x(), *coordinates.unwrap().y(), false) - } else { - (Self::Base::zero(), Self::Base::zero(), true) - } - } - } - - impl PrimeFieldExt for $name::Scalar { - fn from_uniform(bytes: &[u8]) -> Self { - let bytes_arr: [u8; 64] = bytes.try_into().unwrap(); - $name::Scalar::from_uniform_bytes(&bytes_arr) - } - } - - impl TranscriptReprTrait for $name::Compressed { - fn to_transcript_bytes(&self) -> Vec { - self.as_ref().to_vec() - } - } - - impl TranscriptReprTrait for $name::Scalar { - fn to_transcript_bytes(&self) -> Vec { - self.to_repr().to_vec() - } - } - - impl TranscriptReprTrait for $name::Affine { - fn to_transcript_bytes(&self) -> Vec { - let (x, y, is_infinity_byte) = { - let coordinates = self.coordinates(); - if coordinates.is_some().unwrap_u8() == 1 - && ($name::Affine::identity() != *self) - { - let c = coordinates.unwrap(); - (*c.x(), *c.y(), u8::from(false)) - } else { - ($name::Base::zero(), $name::Base::zero(), u8::from(false)) - } - }; - - x.to_repr() - .into_iter() - .chain(y.to_repr().into_iter()) - .chain(std::iter::once(is_infinity_byte)) - .collect() - } - } - }; -} diff --git a/src/provider/util/fb_msm.rs b/src/provider/util/fb_msm.rs deleted file mode 100644 index 231c729..0000000 --- a/src/provider/util/fb_msm.rs +++ /dev/null @@ -1,137 +0,0 @@ -/// # Fixed-base Scalar Multiplication -/// -/// This module provides an implementation of fixed-base scalar multiplication -/// on elliptic curves. -/// -/// The multiplication is optimized through a windowed method, where scalars are -/// broken into fixed-size windows, pre-computation tables are generated, and -/// results are efficiently combined. -use ff::{PrimeField, PrimeFieldBits}; -use group::{prime::PrimeCurve, Curve}; -use rayon::prelude::*; - -/// Determines the window size for scalar multiplication based on the number of -/// scalars. -/// -/// This is used to balance between pre-computation and number of point -/// additions. -pub(crate) fn get_mul_window_size(num_scalars: usize) -> usize { - if num_scalars < 32 { - 3 - } else { - (num_scalars as f64).ln().ceil() as usize - } -} - -/// Generates a table of multiples of a base point `g` for use in windowed -/// scalar multiplication. -/// -/// This pre-computes multiples of a base point for each window and organizes -/// them into a table for quick lookup during the scalar multiplication process. -/// The table is a vector of vectors, each inner vector corresponding to a -/// window and containing the multiples of `g` for that window. -pub(crate) fn get_window_table( - scalar_size: usize, - window: usize, - g: T, -) -> Vec> -where - T: Curve, - T::AffineRepr: Send, -{ - let in_window = 1 << window; - // Number of outer iterations needed to cover the entire scalar - let outerc = (scalar_size + window - 1) / window; - - // Number of multiples of the window's "outer point" needed for each window - // (fewer for the last window) - let last_in_window = 1 << (scalar_size - (outerc - 1) * window); - - let mut multiples_of_g = vec![vec![T::identity(); in_window]; outerc]; - - // Compute the multiples of g for each window - // g_outers = [ 2^{k*window}*g for k in 0..outerc] - let mut g_outer = g; - let mut g_outers = Vec::with_capacity(outerc); - for _ in 0..outerc { - g_outers.push(g_outer); - for _ in 0..window { - g_outer = g_outer.double(); - } - } - multiples_of_g - .par_iter_mut() - .enumerate() - .zip_eq(g_outers) - .for_each(|((outer, multiples_of_g), g_outer)| { - let cur_in_window = if outer == outerc - 1 { - last_in_window - } else { - in_window - }; - - // multiples_of_g = [id, g_outer, 2*g_outer, 3*g_outer, ...], - // where g_outer = 2^{outer*window}*g - let mut g_inner = T::identity(); - for inner in multiples_of_g.iter_mut().take(cur_in_window) { - *inner = g_inner; - g_inner.add_assign(&g_outer); - } - }); - multiples_of_g - .par_iter() - .map(|s| s.iter().map(|s| s.to_affine()).collect()) - .collect() -} - -/// Performs the actual windowed scalar multiplication using a pre-computed -/// table of points. -/// -/// Given a scalar and a table of pre-computed multiples of a base point, this -/// function efficiently computes the scalar multiplication by breaking the -/// scalar into windows and adding the corresponding multiples from the table. -fn windowed_mul( - outerc: usize, - window: usize, - multiples_of_g: &[Vec], - scalar: &T::Scalar, -) -> T -where - T: PrimeCurve, - T::Scalar: PrimeFieldBits, -{ - let modulus_size = ::NUM_BITS as usize; - let scalar_val: Vec = scalar.to_le_bits().into_iter().collect(); - - let mut res = T::identity(); - for outer in 0..outerc { - let mut inner = 0usize; - for i in 0..window { - if outer * window + i < modulus_size && scalar_val[outer * window + i] { - inner |= 1 << i; - } - } - res.add_assign(&multiples_of_g[outer][inner]); - } - res -} - -/// Computes multiple scalar multiplications simultaneously using the windowed -/// method. -pub(crate) fn multi_scalar_mul( - scalar_size: usize, - window: usize, - table: &[Vec], - v: &[T::Scalar], -) -> Vec -where - T: PrimeCurve, - T::Scalar: PrimeFieldBits, -{ - let outerc = (scalar_size + window - 1) / window; - assert!(outerc <= table.len()); - - v.par_iter() - .map(|e| windowed_mul::(outerc, window, table, e)) - .collect::>() -} diff --git a/src/provider/util/mod.rs b/src/provider/util/mod.rs deleted file mode 100644 index e45ba7e..0000000 --- a/src/provider/util/mod.rs +++ /dev/null @@ -1,234 +0,0 @@ -//! Utilities for provider module. -pub(in crate::provider) mod fb_msm; -pub mod msm { - use halo2curves::{msm::best_multiexp, CurveAffine}; - - // this argument swap is useful until Rust gets named arguments - // and saves significant complexity in macro code - pub fn cpu_best_msm(bases: &[C], scalars: &[C::Scalar]) -> C::Curve { - best_multiexp(scalars, bases) - } -} - -pub mod field { - use ff::{BatchInverter, Field}; - - use crate::errors::NovaError; - - #[inline] - pub fn batch_invert(mut v: Vec) -> Result, NovaError> { - // we only allocate the scratch space if every element of v is nonzero - let mut scratch_space = v - .iter() - .map(|x| { - if !x.is_zero_vartime() { - Ok(*x) - } else { - Err(NovaError::InternalError) - } - }) - .collect::, _>>()?; - let _ = BatchInverter::invert_with_external_scratch(&mut v, &mut scratch_space[..]); - Ok(v) - } -} - -pub mod iterators { - use std::{ - borrow::Borrow, - iter::DoubleEndedIterator, - ops::{AddAssign, MulAssign}, - }; - - use ff::Field; - use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; - use rayon_scan::ScanParallelIterator; - - pub trait DoubleEndedIteratorExt: DoubleEndedIterator { - /// This function employs Horner's scheme and core traits to create a - /// combination of an iterator input with the powers - /// of a provided coefficient. - fn rlc(&mut self, coefficient: &F) -> T - where - T: Clone + for<'a> MulAssign<&'a F> + for<'r> AddAssign<&'r T>, - Self::Item: Borrow, - { - let mut iter = self.rev(); - let Some(fst) = iter.next() else { - panic!("input iterator should not be empty") - }; - - iter.fold(fst.borrow().clone(), |mut acc, item| { - acc *= coefficient; - acc += item.borrow(); - acc - }) - } - } - - impl DoubleEndedIteratorExt for I {} - - pub trait IndexedParallelIteratorExt: IndexedParallelIterator { - /// This function core traits to create a combination of an iterator - /// input with the powers of a provided coefficient. - fn rlc(self, coefficient: &F) -> T - where - F: Field, - Self::Item: Borrow, - T: Clone + for<'a> MulAssign<&'a F> + for<'r> AddAssign<&'r T> + Send + Sync, - { - debug_assert!(self.len() > 0); - // generate an iterator of powers of the right length - let v = { - let mut v = vec![*coefficient; self.len()]; - v[0] = F::ONE; - v - }; - // the collect is due to Scan being unindexed - let powers: Vec<_> = v.into_par_iter().scan(|a, b| *a * *b, F::ONE).collect(); - - self.zip_eq(powers.into_par_iter()) - .map(|(pt, val)| { - let mut pt = pt.borrow().clone(); - pt *= &val; - pt - }) - .reduce_with(|mut a, b| { - a += &b; - a - }) - .unwrap() - } - } - - impl IndexedParallelIteratorExt for I {} -} - -#[cfg(test)] -pub mod test_utils { - //! Contains utilities for testing and benchmarking. - use std::sync::Arc; - - use ff::Field; - use rand::rngs::StdRng; - use rand_core::{CryptoRng, RngCore}; - - use crate::{ - spartan::polys::multilinear::MultilinearPolynomial, - traits::{commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine}, - }; - - /// Returns a random polynomial, a point and calculate its evaluation. - pub(crate) fn random_poly_with_eval( - num_vars: usize, - mut rng: &mut R, - ) -> ( - MultilinearPolynomial<::Scalar>, - Vec<::Scalar>, - ::Scalar, - ) { - // Generate random polynomial and point. - let poly = MultilinearPolynomial::random(num_vars, &mut rng); - let point = (0..num_vars) - .map(|_| ::Scalar::random(&mut rng)) - .collect::>(); - - // Calculation evaluation of point over polynomial. - let eval = poly.evaluate(&point); - - (poly, point, eval) - } - - /// Methods used to test the prove and verify flow of - /// [`MultilinearPolynomial`] Commitment Schemes (PCS). - /// - /// Generates a random polynomial and point from a seed to test a - /// proving/verifying flow of one of our [`EvaluationEngine`]. - pub(crate) fn prove_verify_from_num_vars>( - num_vars: usize, - ) { - use rand_core::SeedableRng; - - let mut rng = StdRng::seed_from_u64(num_vars as u64); - - let (poly, point, eval) = random_poly_with_eval::(num_vars, &mut rng); - - // Mock commitment key. - let ck = E::CE::setup(b"test", 1 << num_vars); - let ck = Arc::new(ck); - // Commits to the provided vector using the provided generators. - let commitment = E::CE::commit(&ck, poly.evaluations()); - - prove_verify_with::(ck, &commitment, &poly, &point, &eval, true) - } - - fn prove_verify_with>( - ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, - commitment: &<::CE as CommitmentEngineTrait>::Commitment, - poly: &MultilinearPolynomial<::Scalar>, - point: &[::Scalar], - eval: &::Scalar, - evaluate_bad_proof: bool, - ) { - use std::ops::Add; - - use crate::traits::TranscriptEngineTrait; - - // Generate Prover and verifier key for given commitment key. - let ock = ck.clone(); - let (prover_key, verifier_key) = EE::setup(ck); - - // Generate proof. - let mut prover_transcript = E::TE::new(b"TestEval"); - let proof = EE::prove( - &*ock, - &prover_key, - &mut prover_transcript, - commitment, - poly.evaluations(), - point, - eval, - ) - .unwrap(); - let pcp = prover_transcript.squeeze(b"c").unwrap(); - - // Verify proof. - let mut verifier_transcript = E::TE::new(b"TestEval"); - EE::verify( - &verifier_key, - &mut verifier_transcript, - commitment, - point, - eval, - &proof, - ) - .unwrap(); - let pcv = verifier_transcript.squeeze(b"c").unwrap(); - - // Check if the prover transcript and verifier transcript are kept in the same - // state. - assert_eq!(pcp, pcv); - - if evaluate_bad_proof { - // Generate another point to verify proof. Also produce eval. - let altered_verifier_point = point - .iter() - .map(|s| s.add(::Scalar::ONE)) - .collect::>(); - let altered_verifier_eval = - MultilinearPolynomial::evaluate_with(poly.evaluations(), &altered_verifier_point); - - // Verify proof, should fail. - let mut verifier_transcript = E::TE::new(b"TestEval"); - assert!(EE::verify( - &verifier_key, - &mut verifier_transcript, - commitment, - &altered_verifier_point, - &altered_verifier_eval, - &proof, - ) - .is_err()); - } - } -} diff --git a/src/r1cs/mod.rs b/src/r1cs/mod.rs deleted file mode 100644 index 1b1dabc..0000000 --- a/src/r1cs/mod.rs +++ /dev/null @@ -1,921 +0,0 @@ -//! This module defines R1CS related types and a folding scheme for Relaxed R1CS -mod sparse; -pub(crate) mod util; - -use core::cmp::max; - -use ff::Field; -use once_cell::sync::OnceCell; -use rand_core::{CryptoRng, RngCore}; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; -pub(crate) use sparse::SparseMatrix; - -use crate::{ - constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - gadgets::{f_to_nat, nat_to_limbs, scalar_as_base}, - traits::{ - commitment::CommitmentEngineTrait, AbsorbInROTrait, Engine, ROTrait, TranscriptReprTrait, - }, - zip_with, Commitment, CommitmentKey, CE, -}; - -/// A type that holds the shape of the R1CS matrices -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct R1CSShape { - pub(crate) num_cons: usize, - pub(crate) num_vars: usize, - pub(crate) num_io: usize, - pub(crate) A: SparseMatrix, - pub(crate) B: SparseMatrix, - pub(crate) C: SparseMatrix, - #[serde(skip, default = "OnceCell::new")] - pub(crate) digest: OnceCell, -} - -impl SimpleDigestible for R1CSShape {} - -/// A type that holds the result of a R1CS multiplication -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct R1CSResult { - pub(crate) AZ: Vec, - pub(crate) BZ: Vec, - pub(crate) CZ: Vec, -} - -/// A type that holds a witness for a given R1CS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct R1CSWitness { - W: Vec, -} - -/// A type that holds an R1CS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct R1CSInstance { - pub(crate) comm_W: Commitment, - pub(crate) X: Vec, -} - -/// A type that holds a witness for a given Relaxed R1CS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct RelaxedR1CSWitness { - pub(crate) W: Vec, - pub(crate) E: Vec, -} - -/// A type that holds a Relaxed R1CS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RelaxedR1CSInstance { - pub(crate) comm_W: Commitment, - pub(crate) comm_E: Commitment, - pub(crate) X: Vec, - pub(crate) u: E::Scalar, -} - -/// A type for functions that hints commitment key sizing by returning the floor -/// of the number of required generators. -pub type CommitmentKeyHint = dyn Fn(&R1CSShape) -> usize; - -/// Generates public parameters for a Rank-1 Constraint System (R1CS). -/// -/// This function takes into consideration the shape of the R1CS matrices and a -/// hint function for the number of generators. It returns a `CommitmentKey`. -/// -/// # Arguments -/// -/// * `S`: The shape of the R1CS matrices. -/// * `ck_floor`: A function that provides a floor for the number of generators. -/// A good function to provide is the `commitment_key_floor` field in the -/// trait `RelaxedR1CSSNARKTrait`. -pub fn commitment_key( - S: &R1CSShape, - ck_floor: &CommitmentKeyHint, -) -> CommitmentKey { - let size = commitment_key_size(S, ck_floor); - E::CE::setup(b"ck", size) -} - -/// Computes the number of generators required for the commitment key -/// corresponding to shape `S`. -pub fn commitment_key_size(S: &R1CSShape, ck_floor: &CommitmentKeyHint) -> usize { - let num_cons = S.num_cons; - let num_vars = S.num_vars; - let ck_hint = ck_floor(S); - max(max(num_cons, num_vars), ck_hint) -} - -impl R1CSShape { - /// Create an object of type `R1CSShape` from the explicitly specified R1CS - /// matrices - pub fn new( - num_cons: usize, - num_vars: usize, - num_io: usize, - A: SparseMatrix, - B: SparseMatrix, - C: SparseMatrix, - ) -> Result { - let is_valid = |num_cons: usize, - num_vars: usize, - num_io: usize, - M: &SparseMatrix| - -> Result, NovaError> { - M.iter() - .map(|(row, col, _val)| { - if row >= num_cons || col > num_io + num_vars { - Err(NovaError::InvalidIndex) - } else { - Ok(()) - } - }) - .collect::, NovaError>>() - }; - - is_valid(num_cons, num_vars, num_io, &A)?; - is_valid(num_cons, num_vars, num_io, &B)?; - is_valid(num_cons, num_vars, num_io, &C)?; - - // We require the number of public inputs/outputs to be even - if num_io % 2 != 0 { - return Err(NovaError::InvalidStepCircuitIO); - } - - Ok(Self { - num_cons, - num_vars, - num_io, - A, - B, - C, - digest: OnceCell::new(), - }) - } - - /// Generate a random [`R1CSShape`] with the specified number of - /// constraints, variables, and public inputs/outputs. - pub fn random( - num_cons: usize, - num_vars: usize, - num_io: usize, - num_entries: usize, - mut rng: &mut R, - ) -> Self { - assert!(num_cons.is_power_of_two()); - assert!(num_vars.is_power_of_two()); - assert!(num_entries.is_power_of_two()); - assert!(num_io < num_vars); - - let num_rows = num_cons; - let num_cols = num_vars + 1 + num_io; - - let (NA, NB, NC) = { - let N_div_3 = num_entries / 3; - let NC = num_entries - (2 * N_div_3); - (N_div_3, N_div_3, NC) - }; - - let A = SparseMatrix::random(num_rows, num_cols, NA, &mut rng); - let B = SparseMatrix::random(num_rows, num_cols, NB, &mut rng); - let C = SparseMatrix::random(num_rows, num_cols, NC, &mut rng); - - Self { - num_cons, - num_vars, - num_io, - A, - B, - C, - digest: Default::default(), - } - } - - /// Generate a satisfying [`RelaxedR1CSWitness`] and [`RelaxedR1CSInstance`] - /// for this [`R1CSShape`]. - pub fn random_witness_instance( - &self, - commitment_key: &CommitmentKey, - mut rng: &mut R, - ) -> (RelaxedR1CSWitness, RelaxedR1CSInstance) { - // Sample a random witness and compute the error term - let W = (0..self.num_vars) - .map(|_| E::Scalar::random(&mut rng)) - .collect::>(); - let u = E::Scalar::random(&mut rng); - let X = (0..self.num_io) - .map(|_| E::Scalar::random(&mut rng)) - .collect::>(); - - let E = self.compute_E(&W, &u, &X).unwrap(); - - let (comm_W, comm_E) = rayon::join( - || CE::::commit(commitment_key, &W), - || CE::::commit(commitment_key, &E), - ); - - let witness = RelaxedR1CSWitness { W, E }; - let instance = RelaxedR1CSInstance { - comm_W, - comm_E, - u, - X, - }; - - (witness, instance) - } - - /// returned the digest of the `R1CSShape` - pub fn digest(&self) -> E::Scalar { - self.digest - .get_or_try_init(|| DigestComputer::new(self).digest()) - .cloned() - .expect("Failure retrieving digest") - } - - // Checks regularity conditions on the R1CSShape, required in Spartan-class - // SNARKs Returns false if num_cons or num_vars are not powers of two, or if - // num_io > num_vars - #[inline] - pub(crate) fn is_regular_shape(&self) -> bool { - let cons_valid = self.num_cons.next_power_of_two() == self.num_cons; - let vars_valid = self.num_vars.next_power_of_two() == self.num_vars; - let io_lt_vars = self.num_io < self.num_vars; - cons_valid && vars_valid && io_lt_vars - } - - pub(crate) fn multiply_vec( - &self, - z: &[E::Scalar], - ) -> Result<(Vec, Vec, Vec), NovaError> { - if z.len() != self.num_io + self.num_vars + 1 { - return Err(NovaError::InvalidWitnessLength); - } - - let (Az, (Bz, Cz)) = rayon::join( - || self.A.multiply_vec(z), - || rayon::join(|| self.B.multiply_vec(z), || self.C.multiply_vec(z)), - ); - - Ok((Az, Bz, Cz)) - } - - pub(crate) fn multiply_witness( - &self, - W: &[E::Scalar], - u: &E::Scalar, - X: &[E::Scalar], - ) -> Result<(Vec, Vec, Vec), NovaError> { - if X.len() != self.num_io || W.len() != self.num_vars { - return Err(NovaError::InvalidWitnessLength); - } - - let (Az, (Bz, Cz)) = rayon::join( - || self.A.multiply_witness(W, u, X), - || { - rayon::join( - || self.B.multiply_witness(W, u, X), - || self.C.multiply_witness(W, u, X), - ) - }, - ); - - Ok((Az, Bz, Cz)) - } - - pub(crate) fn multiply_witness_into( - &self, - W: &[E::Scalar], - u: &E::Scalar, - X: &[E::Scalar], - ABC_Z: &mut R1CSResult, - ) -> Result<(), NovaError> { - if X.len() != self.num_io || W.len() != self.num_vars { - return Err(NovaError::InvalidWitnessLength); - } - - let R1CSResult { AZ, BZ, CZ } = ABC_Z; - - rayon::join( - || self.A.multiply_witness_into(W, u, X, AZ), - || { - rayon::join( - || self.B.multiply_witness_into(W, u, X, BZ), - || self.C.multiply_witness_into(W, u, X, CZ), - ) - }, - ); - - Ok(()) - } - - /// Computes the error term E = Az * Bz - u*Cz. - fn compute_E( - &self, - W: &[E::Scalar], - u: &E::Scalar, - X: &[E::Scalar], - ) -> Result, NovaError> { - if X.len() != self.num_io || W.len() != self.num_vars { - return Err(NovaError::InvalidWitnessLength); - } - - let (Az, (Bz, Cz)) = rayon::join( - || self.A.multiply_witness(W, u, X), - || { - rayon::join( - || self.B.multiply_witness(W, u, X), - || self.C.multiply_witness(W, u, X), - ) - }, - ); - - let E = zip_with!( - (Az.into_par_iter(), Bz.into_par_iter(), Cz.into_par_iter()), - |a, b, c| a * b - c * u - ) - .collect::>(); - - Ok(E) - } - - /// Checks if the Relaxed R1CS instance is satisfiable given a witness and - /// its shape - pub fn is_sat_relaxed( - &self, - ck: &CommitmentKey, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result<(), NovaError> { - assert_eq!(W.W.len(), self.num_vars); - assert_eq!(W.E.len(), self.num_cons); - assert_eq!(U.X.len(), self.num_io); - - // verify if Az * Bz - u*Cz = E - let E = self.compute_E(&W.W, &U.u, &U.X)?; - W.E.par_iter() - .zip_eq(E.into_par_iter()) - .enumerate() - .try_for_each(|(i, (we, e))| { - if *we != e { - // constraint failed, retrieve constraint name - Err(NovaError::UnSatIndex(i)) - } else { - Ok(()) - } - })?; - - // verify if comm_E and comm_W are commitments to E and W - let res_comm = { - let (comm_W, comm_E) = - rayon::join(|| CE::::commit(ck, &W.W), || CE::::commit(ck, &W.E)); - U.comm_W == comm_W && U.comm_E == comm_E - }; - - if !res_comm { - return Err(NovaError::UnSat); - } - Ok(()) - } - - /// Checks if the R1CS instance is satisfiable given a witness and its shape - pub fn is_sat( - &self, - ck: &CommitmentKey, - U: &R1CSInstance, - W: &R1CSWitness, - ) -> Result<(), NovaError> { - assert_eq!(W.W.len(), self.num_vars); - assert_eq!(U.X.len(), self.num_io); - - // verify if Az * Bz - u*Cz = 0 - let E = self.compute_E(&W.W, &E::Scalar::ONE, &U.X)?; - E.into_par_iter().enumerate().try_for_each(|(i, e)| { - if e != E::Scalar::ZERO { - Err(NovaError::UnSatIndex(i)) - } else { - Ok(()) - } - })?; - - // verify if comm_W is a commitment to W - if U.comm_W != CE::::commit(ck, &W.W) { - return Err(NovaError::UnSat); - } - Ok(()) - } - - /// A method to compute a commitment to the cross-term `T` given a - /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair - pub fn commit_T( - &self, - ck: &CommitmentKey, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) -> Result<(Vec, Commitment), NovaError> { - let (AZ_1, BZ_1, CZ_1) = tracing::trace_span!("AZ_1, BZ_1, CZ_1") - .in_scope(|| self.multiply_witness(&W1.W, &U1.u, &U1.X))?; - - let (AZ_2, BZ_2, CZ_2) = tracing::trace_span!("AZ_2, BZ_2, CZ_2") - .in_scope(|| self.multiply_witness(&W2.W, &E::Scalar::ONE, &U2.X))?; - - let (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) = - tracing::trace_span!("cross terms").in_scope(|| { - let AZ_1_circ_BZ_2 = (0..AZ_1.len()) - .into_par_iter() - .map(|i| AZ_1[i] * BZ_2[i]) - .collect::>(); - let AZ_2_circ_BZ_1 = (0..AZ_2.len()) - .into_par_iter() - .map(|i| AZ_2[i] * BZ_1[i]) - .collect::>(); - let u_1_cdot_CZ_2 = (0..CZ_2.len()) - .into_par_iter() - .map(|i| U1.u * CZ_2[i]) - .collect::>(); - let u_2_cdot_CZ_1 = (0..CZ_1.len()) - .into_par_iter() - .map(|i| CZ_1[i]) - .collect::>(); - (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) - }); - - let T = tracing::trace_span!("T").in_scope(|| { - AZ_1_circ_BZ_2 - .par_iter() - .zip_eq(&AZ_2_circ_BZ_1) - .zip_eq(&u_1_cdot_CZ_2) - .zip_eq(&u_2_cdot_CZ_1) - .map(|(((a, b), c), d)| *a + *b - *c - *d) - .collect::>() - }); - - let comm_T = CE::::commit(ck, &T); - - Ok((T, comm_T)) - } - - /// A method to compute a commitment to the cross-term `T` given a - /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair - /// - /// This is [`R1CSShape::commit_T`] but into a buffer. - pub fn commit_T_into( - &self, - ck: &CommitmentKey, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - T: &mut Vec, - ABC_Z_1: &mut R1CSResult, - ABC_Z_2: &mut R1CSResult, - ) -> Result, NovaError> { - tracing::info_span!("AZ_1, BZ_1, CZ_1") - .in_scope(|| self.multiply_witness_into(&W1.W, &U1.u, &U1.X, ABC_Z_1))?; - - let R1CSResult { - AZ: AZ_1, - BZ: BZ_1, - CZ: CZ_1, - } = ABC_Z_1; - - tracing::info_span!("AZ_2, BZ_2, CZ_2") - .in_scope(|| self.multiply_witness_into(&W2.W, &E::Scalar::ONE, &U2.X, ABC_Z_2))?; - - let R1CSResult { - AZ: AZ_2, - BZ: BZ_2, - CZ: CZ_2, - } = ABC_Z_2; - - // this doesn't allocate memory but has bad temporal cache locality -- should - // test to see which is faster - T.clear(); - tracing::info_span!("T").in_scope(|| { - (0..AZ_1.len()) - .into_par_iter() - .map(|i| { - let AZ_1_circ_BZ_2 = AZ_1[i] * BZ_2[i]; - let AZ_2_circ_BZ_1 = AZ_2[i] * BZ_1[i]; - let u_1_cdot_Cz_2_plus_Cz_1 = U1.u * CZ_2[i] + CZ_1[i]; - AZ_1_circ_BZ_2 + AZ_2_circ_BZ_1 - u_1_cdot_Cz_2_plus_Cz_1 - }) - .collect_into_vec(T) - }); - - Ok(CE::::commit(ck, T)) - } - - /// Pads the `R1CSShape` so that the shape passes `is_regular_shape` - /// Renumbers variables to accommodate padded variables - pub fn pad(&self) -> Self { - // check if the provided R1CSShape is already as required - if self.is_regular_shape() { - return self.clone(); - } - - // equalize the number of variables, constraints, and public IO - let m = max(max(self.num_vars, self.num_cons), self.num_io).next_power_of_two(); - - // check if the number of variables are as expected, then - // we simply set the number of constraints to the next power of two - if self.num_vars == m { - return Self { - num_cons: m, - num_vars: m, - num_io: self.num_io, - A: self.A.clone(), - B: self.B.clone(), - C: self.C.clone(), - digest: OnceCell::new(), - }; - } - - // otherwise, we need to pad the number of variables and renumber variable - // accesses - let num_vars_padded = m; - let num_cons_padded = m; - - let apply_pad = |mut M: SparseMatrix| -> SparseMatrix { - M.indices.par_iter_mut().for_each(|c| { - if *c >= self.num_vars { - *c += num_vars_padded - self.num_vars - } - }); - - M.cols += num_vars_padded - self.num_vars; - - let ex = { - let nnz = M.indptr.last().unwrap(); - vec![*nnz; num_cons_padded - self.num_cons] - }; - M.indptr.extend(ex); - M - }; - - let A_padded = apply_pad(self.A.clone()); - let B_padded = apply_pad(self.B.clone()); - let C_padded = apply_pad(self.C.clone()); - - Self { - num_cons: num_cons_padded, - num_vars: num_vars_padded, - num_io: self.num_io, - A: A_padded, - B: B_padded, - C: C_padded, - digest: OnceCell::new(), - } - } -} - -impl R1CSResult { - /// Produces a default `R1CSResult` given an `R1CSShape` - pub fn default(num_cons: usize) -> Self { - Self { - AZ: vec![E::Scalar::ZERO; num_cons], - BZ: vec![E::Scalar::ZERO; num_cons], - CZ: vec![E::Scalar::ZERO; num_cons], - } - } -} - -impl R1CSWitness { - /// A method to create a witness object using a vector of scalars - pub fn new(S: &R1CSShape, W: Vec) -> Result { - if S.num_vars != W.len() { - Err(NovaError::InvalidWitnessLength) - } else { - Ok(Self { W }) - } - } - - /// Commits to the witness using the supplied generators - pub fn commit(&self, ck: &CommitmentKey) -> Commitment { - CE::::commit(ck, &self.W) - } -} - -impl R1CSInstance { - /// A method to create an instance object using constituent elements - pub fn new( - S: &R1CSShape, - comm_W: Commitment, - X: Vec, - ) -> Result { - if S.num_io != X.len() { - Err(NovaError::InvalidInputLength) - } else { - Ok(Self { comm_W, X }) - } - } -} - -impl AbsorbInROTrait for R1CSInstance { - fn absorb_in_ro(&self, ro: &mut E::RO) { - self.comm_W.absorb_in_ro(ro); - for x in &self.X { - ro.absorb(scalar_as_base::(*x)); - } - } -} - -impl RelaxedR1CSWitness { - /// Produces a default `RelaxedR1CSWitness` given an `R1CSShape` - pub fn default(S: &R1CSShape) -> Self { - Self { - W: vec![E::Scalar::ZERO; S.num_vars], - E: vec![E::Scalar::ZERO; S.num_cons], - } - } - - /// Initializes a new `RelaxedR1CSWitness` from an `R1CSWitness` - pub fn from_r1cs_witness(S: &R1CSShape, witness: R1CSWitness) -> Self { - Self { - W: witness.W, - E: vec![E::Scalar::ZERO; S.num_cons], - } - } - - /// Commits to the witness using the supplied generators - pub fn commit(&self, ck: &CommitmentKey) -> (Commitment, Commitment) { - (CE::::commit(ck, &self.W), CE::::commit(ck, &self.E)) - } - - /// Folds an incoming `R1CSWitness` into the current one - pub fn fold( - &self, - W2: &R1CSWitness, - T: &[E::Scalar], - r: &E::Scalar, - ) -> Result { - let (W1, E1) = (&self.W, &self.E); - let W2 = &W2.W; - - if W1.len() != W2.len() { - return Err(NovaError::InvalidWitnessLength); - } - - let W = zip_with!((W1.par_iter(), W2), |a, b| *a + *r * *b).collect::>(); - let E = zip_with!((E1.par_iter(), T), |a, b| *a + *r * *b).collect::>(); - Ok(Self { W, E }) - } - - /// Mutably folds an incoming `R1CSWitness` into the current one - pub fn fold_mut( - &mut self, - W2: &R1CSWitness, - T: &[E::Scalar], - r: &E::Scalar, - ) -> Result<(), NovaError> { - if self.W.len() != W2.W.len() { - return Err(NovaError::InvalidWitnessLength); - } - - self.W - .par_iter_mut() - .zip_eq(&W2.W) - .for_each(|(a, b)| *a += *r * *b); - self.E - .par_iter_mut() - .zip_eq(T) - .for_each(|(a, b)| *a += *r * *b); - - Ok(()) - } - - /// Pads the provided witness to the correct length - pub fn pad(&self, S: &R1CSShape) -> Self { - let mut W = self.W.clone(); - W.extend(vec![E::Scalar::ZERO; S.num_vars - W.len()]); - - let mut E = self.E.clone(); - E.extend(vec![E::Scalar::ZERO; S.num_cons - E.len()]); - - Self { W, E } - } -} - -impl RelaxedR1CSInstance { - /// Produces a default `RelaxedR1CSInstance` given `R1CSGens` and - /// `R1CSShape` - pub fn default(_ck: &CommitmentKey, S: &R1CSShape) -> Self { - let (comm_W, comm_E) = (Commitment::::default(), Commitment::::default()); - Self { - comm_W, - comm_E, - u: E::Scalar::ZERO, - X: vec![E::Scalar::ZERO; S.num_io], - } - } - - /// Initializes a new `RelaxedR1CSInstance` from an `R1CSInstance` - pub fn from_r1cs_instance( - _ck: &CommitmentKey, - S: &R1CSShape, - instance: R1CSInstance, - ) -> Self { - assert_eq!(S.num_io, instance.X.len()); - - Self { - comm_W: instance.comm_W, - comm_E: Commitment::::default(), - u: E::Scalar::ONE, - X: instance.X, - } - } - - /// Initializes a new `RelaxedR1CSInstance` from an `R1CSInstance` - pub fn from_r1cs_instance_unchecked(comm_W: &Commitment, X: &[E::Scalar]) -> Self { - Self { - comm_W: *comm_W, - comm_E: Commitment::::default(), - u: E::Scalar::ONE, - X: X.to_vec(), - } - } - - /// Folds an incoming `RelaxedR1CSInstance` into the current one - pub fn fold(&self, U2: &R1CSInstance, comm_T: &Commitment, r: &E::Scalar) -> Self { - let (X1, u1, comm_W_1, comm_E_1) = - (&self.X, &self.u, &self.comm_W.clone(), &self.comm_E.clone()); - let (X2, comm_W_2) = (&U2.X, &U2.comm_W); - - // weighted sum of X, comm_W, comm_E, and u - let X = zip_with!((X1.par_iter(), X2), |a, b| *a + *r * *b).collect::>(); - let comm_W = *comm_W_1 + *comm_W_2 * *r; - let comm_E = *comm_E_1 + *comm_T * *r; - let u = *u1 + *r; - - Self { - comm_W, - comm_E, - X, - u, - } - } - - /// Mutably folds an incoming `RelaxedR1CSInstance` into the current one - pub fn fold_mut(&mut self, U2: &R1CSInstance, comm_T: &Commitment, r: &E::Scalar) { - let (X2, comm_W_2) = (&U2.X, &U2.comm_W); - - // weighted sum of X, comm_W, comm_E, and u - self.X.par_iter_mut().zip_eq(X2).for_each(|(a, b)| { - *a += *r * *b; - }); - self.comm_W = self.comm_W + *comm_W_2 * *r; - self.comm_E = self.comm_E + *comm_T * *r; - self.u += *r; - } -} - -impl TranscriptReprTrait for RelaxedR1CSInstance { - fn to_transcript_bytes(&self) -> Vec { - [ - self.comm_W.to_transcript_bytes(), - self.comm_E.to_transcript_bytes(), - self.u.to_transcript_bytes(), - self.X.as_slice().to_transcript_bytes(), - ] - .concat() - } -} - -impl AbsorbInROTrait for RelaxedR1CSInstance { - fn absorb_in_ro(&self, ro: &mut E::RO) { - self.comm_W.absorb_in_ro(ro); - self.comm_E.absorb_in_ro(ro); - ro.absorb(scalar_as_base::(self.u)); - - // absorb each element of self.X in bignum format - for x in &self.X { - let limbs: Vec = - nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); - for limb in limbs { - ro.absorb(scalar_as_base::(limb)); - } - } - } -} - -/// Empty buffer for `commit_T_into` -pub fn default_T(num_cons: usize) -> Vec { - Vec::with_capacity(num_cons) -} - -#[cfg(test)] -pub(crate) mod tests { - use ff::Field; - use rand_chacha::ChaCha20Rng; - use rand_core::SeedableRng; - - use super::*; - use crate::{ - provider::{Bn256EngineIPA, Bn256EngineKZG}, - r1cs::sparse::SparseMatrix, - traits::Engine, - }; - - pub(crate) fn tiny_r1cs(num_vars: usize) -> R1CSShape { - let one = ::ONE; - let (num_cons, num_vars, num_io, A, B, C) = { - let num_cons = 4; - let num_io = 2; - - // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are - // respectively the input and output. The R1CS for this problem - // consists of the following constraints: `I0 * I0 - Z0 = 0` - // `Z0 * I0 - Z1 = 0` - // `(Z1 + I0) * 1 - Z2 = 0` - // `(Z2 + 5) * 1 - I1 = 0` - - // Relaxed R1CS is a set of three sparse matrices (A B C), where there is a row - // for every constraint and a column for every entry in z = (vars, - // u, inputs) An R1CS instance is satisfiable iff: - // Az \circ Bz = u \cdot Cz + E, where z = (vars, 1, inputs) - let mut A: Vec<(usize, usize, E::Scalar)> = Vec::new(); - let mut B: Vec<(usize, usize, E::Scalar)> = Vec::new(); - let mut C: Vec<(usize, usize, E::Scalar)> = Vec::new(); - - // constraint 0 entries in (A,B,C) - // `I0 * I0 - Z0 = 0` - A.push((0, num_vars + 1, one)); - B.push((0, num_vars + 1, one)); - C.push((0, 0, one)); - - // constraint 1 entries in (A,B,C) - // `Z0 * I0 - Z1 = 0` - A.push((1, 0, one)); - B.push((1, num_vars + 1, one)); - C.push((1, 1, one)); - - // constraint 2 entries in (A,B,C) - // `(Z1 + I0) * 1 - Z2 = 0` - A.push((2, 1, one)); - A.push((2, num_vars + 1, one)); - B.push((2, num_vars, one)); - C.push((2, 2, one)); - - // constraint 3 entries in (A,B,C) - // `(Z2 + 5) * 1 - I1 = 0` - A.push((3, 2, one)); - A.push((3, num_vars, one + one + one + one + one)); - B.push((3, num_vars, one)); - C.push((3, num_vars + 2, one)); - - (num_cons, num_vars, num_io, A, B, C) - }; - - // create a shape object - let rows = num_cons; - let cols = num_vars + num_io + 1; - - R1CSShape::new( - num_cons, - num_vars, - num_io, - SparseMatrix::new(&A, rows, cols), - SparseMatrix::new(&B, rows, cols), - SparseMatrix::new(&C, rows, cols), - ) - .unwrap() - } - - fn test_pad_tiny_r1cs_with() { - let padded_r1cs = tiny_r1cs::(3).pad(); - assert!(padded_r1cs.is_regular_shape()); - - let expected_r1cs = tiny_r1cs::(4); - - assert_eq!(padded_r1cs, expected_r1cs); - } - - #[test] - fn test_pad_tiny_r1cs() { - test_pad_tiny_r1cs_with::(); - } - - fn test_random_r1cs_with() { - let mut rng = ChaCha20Rng::from_seed([0u8; 32]); - - let ck_size: usize = 16_384; - let ck = E::CE::setup(b"ipa", ck_size); - - let cases = [(16, 16, 2, 16), (16, 32, 12, 8), (256, 256, 2, 1024)]; - - for (num_cons, num_vars, num_io, num_entries) in cases { - let S = R1CSShape::::random(num_cons, num_vars, num_io, num_entries, &mut rng); - let (W, U) = S.random_witness_instance(&ck, &mut rng); - S.is_sat_relaxed(&ck, &U, &W).unwrap(); - } - } - - #[test] - fn test_random_r1cs() { - test_random_r1cs_with::(); - } -} diff --git a/src/r1cs/sparse.rs b/src/r1cs/sparse.rs deleted file mode 100644 index 16f5a79..0000000 --- a/src/r1cs/sparse.rs +++ /dev/null @@ -1,380 +0,0 @@ -//! # Sparse Matrices -//! -//! This module defines a custom implementation of CSR/CSC sparse matrices. -//! Specifically, we implement sparse matrix / dense vector multiplication -//! to compute the `A z`, `B z`, and `C z` in Nova. - -use std::{cmp::Ordering, collections::BTreeSet}; - -use ff::PrimeField; -use itertools::Itertools as _; -use rand_core::{CryptoRng, RngCore}; -use rayon::prelude::*; -use ref_cast::RefCast; -use serde::{Deserialize, Serialize}; - -/// CSR format sparse matrix, We follow the names used by scipy. -/// Detailed explanation here: -#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct SparseMatrix { - /// all non-zero values in the matrix - pub data: Vec, - /// column indices - pub indices: Vec, - /// row information - pub indptr: Vec, - /// number of columns - pub cols: usize, -} - -/// Wrapper type for encode rows of [`SparseMatrix`] -#[derive(Debug, Clone, RefCast)] -#[repr(transparent)] -pub struct RowData([usize; 2]); - -/// [`SparseMatrix`]s are often large, and this helps with cloning bottlenecks -impl Clone for SparseMatrix { - fn clone(&self) -> Self { - Self { - data: self.data.par_iter().cloned().collect(), - indices: self.indices.par_iter().cloned().collect(), - indptr: self.indptr.par_iter().cloned().collect(), - cols: self.cols, - } - } -} - -impl SparseMatrix { - /// 0x0 empty matrix - pub fn empty() -> Self { - Self { - data: vec![], - indices: vec![], - indptr: vec![0], - cols: 0, - } - } - - /// Construct from the COO representation; Vec. - /// We assume that the rows are sorted during construction. - pub fn new(matrix: &[(usize, usize, F)], rows: usize, cols: usize) -> Self { - let mut new_matrix = vec![vec![]; rows]; - for (row, col, val) in matrix { - new_matrix[*row].push((*col, *val)); - } - - for row in new_matrix.iter() { - assert!(row.windows(2).all(|w| w[0].0 < w[1].0)); - } - - let mut indptr = vec![0; rows + 1]; - for (i, col) in new_matrix.iter().enumerate() { - indptr[i + 1] = indptr[i] + col.len(); - } - - let mut indices = vec![]; - let mut data = vec![]; - for col in new_matrix { - let (idx, val): (Vec<_>, Vec<_>) = col.into_iter().unzip(); - indices.extend(idx); - data.extend(val); - } - - Self { - data, - indices, - indptr, - cols, - } - } - - /// Samples a new random matrix of size `rows` x `cols` with `num_entries` - /// non-zero entries. - pub fn random( - rows: usize, - cols: usize, - num_entries: usize, - mut rng: &mut R, - ) -> Self { - assert!(num_entries <= rows * cols); - - let mut indices = BTreeSet::<(usize, usize)>::new(); - while indices.len() < num_entries { - let row = rng.next_u32() as usize % rows; - let col = rng.next_u32() as usize % cols; - indices.insert((row, col)); - } - - let matrix = indices - .into_iter() - .map(|(row, col)| (row, col, F::random(&mut rng))) - .collect::>(); - - Self::new(&matrix, rows, cols) - } - - /// Returns an iterator into the rows - pub fn iter_rows(&self) -> impl Iterator { - self.indptr - .windows(2) - .map(|ptrs| RowData::ref_cast(ptrs.try_into().unwrap())) - } - - /// Returns a parallel iterator into the rows - pub fn par_iter_rows(&self) -> impl IndexedParallelIterator { - self.indptr - .par_windows(2) - .map(|ptrs| RowData::ref_cast(ptrs.try_into().unwrap())) - } - - /// Retrieves the data for row slice [i..j] from `row`. - /// [`RowData`] **must** be created from unmodified `self` previously to - /// guarentee safety. - pub fn get_row(&self, row: &RowData) -> impl Iterator { - self.data[row.0[0]..row.0[1]] - .iter() - .zip_eq(&self.indices[row.0[0]..row.0[1]]) - } - - /// Retrieves the data for row slice [i..j] from `ptrs`. - /// We assume that `ptrs` is indexed from `indptrs` and do not check if the - /// returned slice is actually a valid row. - pub fn get_row_unchecked(&self, ptrs: &[usize; 2]) -> impl Iterator { - self.data[ptrs[0]..ptrs[1]] - .iter() - .zip_eq(&self.indices[ptrs[0]..ptrs[1]]) - } - - /// Multiply by a dense vector; uses rayon to parallelize. - pub fn multiply_vec(&self, vector: &[F]) -> Vec { - assert_eq!(self.cols, vector.len(), "invalid shape"); - - self.multiply_vec_unchecked(vector) - } - - /// Multiply by a dense vector; uses rayon to parallelize. - /// This does not check that the shape of the matrix/vector are compatible. - #[tracing::instrument( - skip_all, - level = "trace", - name = "SparseMatrix::multiply_vec_unchecked" - )] - fn multiply_vec_unchecked(&self, vector: &[F]) -> Vec { - let mut sink: Vec = Vec::with_capacity(self.indptr.len() - 1); - self.multiply_vec_into_unchecked(vector, &mut sink); - sink - } - - fn multiply_vec_into_unchecked(&self, vector: &[F], sink: &mut Vec) { - self.indptr - .par_windows(2) - .map(|ptrs| { - self.get_row_unchecked(ptrs.try_into().unwrap()) - .map(|(val, col_idx)| *val * vector[*col_idx]) - .sum() - }) - .collect_into_vec(sink); - } - - /// Multiply by a witness representing a dense vector; uses rayon to - /// parallelize. - pub fn multiply_witness(&self, W: &[F], u: &F, X: &[F]) -> Vec { - assert_eq!(self.cols, W.len() + X.len() + 1, "invalid shape"); - - self.multiply_witness_unchecked(W, u, X) - } - - /// Multiply by a witness representing a dense vector; uses rayon to - /// parallelize. This does not check that the shape of the matrix/vector - /// are compatible. - #[tracing::instrument( - skip_all, - level = "trace", - name = "SparseMatrix::multiply_vec_unchecked" - )] - fn multiply_witness_unchecked(&self, W: &[F], u: &F, X: &[F]) -> Vec { - // preallocate the result vector - let mut sink = Vec::with_capacity(self.indptr.len() - 1); - self.multiply_witness_into_unchecked(W, u, X, &mut sink); - sink - } - - /// Multiply by a witness representing a dense vector; uses rayon to - /// parallelize. - pub fn multiply_witness_into(&self, W: &[F], u: &F, X: &[F], sink: &mut Vec) { - assert_eq!(self.cols, W.len() + X.len() + 1, "invalid shape"); - - self.multiply_witness_into_unchecked(W, u, X, sink); - } - - /// Multiply by a witness representing a dense vector; uses rayon to - /// parallelize. This does not check that the shape of the matrix/vector - /// are compatible. - fn multiply_witness_into_unchecked(&self, W: &[F], u: &F, X: &[F], sink: &mut Vec) { - let num_vars = W.len(); - self.indptr - .par_windows(2) - .map(|ptrs| { - self.get_row_unchecked(ptrs.try_into().unwrap()).fold( - F::ZERO, - |acc, (val, col_idx)| { - let val = match col_idx.cmp(&num_vars) { - Ordering::Less => *val * W[*col_idx], - Ordering::Equal => *val * *u, - Ordering::Greater => *val * X[*col_idx - num_vars - 1], - }; - acc + val - }, - ) - }) - .collect_into_vec(sink); - } - - /// number of non-zero entries - pub fn len(&self) -> usize { - *self.indptr.last().unwrap() - } - - /// empty matrix - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// returns a custom iterator - pub fn iter(&self) -> Iter<'_, F> { - let mut row = 0; - while self.indptr[row + 1] == 0 { - row += 1; - } - Iter { - matrix: self, - row, - i: 0, - nnz: *self.indptr.last().unwrap(), - } - } - - pub fn num_rows(&self) -> usize { - self.indptr.len() - 1 - } - - pub fn num_cols(&self) -> usize { - self.cols - } -} - -/// Iterator for sparse matrix -#[derive(Debug)] -pub struct Iter<'a, F: PrimeField> { - matrix: &'a SparseMatrix, - row: usize, - i: usize, - nnz: usize, -} - -impl<'a, F: PrimeField> Iterator for Iter<'a, F> { - type Item = (usize, usize, F); - - fn next(&mut self) -> Option { - // are we at the end? - if self.i == self.nnz { - return None; - } - - // compute current item - let curr_item = ( - self.row, - self.matrix.indices[self.i], - self.matrix.data[self.i], - ); - - // advance the iterator - self.i += 1; - // edge case at the end - if self.i == self.nnz { - return Some(curr_item); - } - // if `i` has moved to next row - while self.i >= self.matrix.indptr[self.row + 1] { - self.row += 1; - } - - Some(curr_item) - } -} - -// #[cfg(test)] -// mod tests { -// #[cfg(not(target_arch = "wasm32"))] -// use proptest::{ -// prelude::*, -// strategy::{BoxedStrategy, Just, Strategy}, -// }; - -// use super::SparseMatrix; -// #[cfg(not(target_arch = "wasm32"))] -// use crate::r1cs::util::FWrap; -// use crate::{ -// provider::PallasEngine, -// traits::{Engine, Group}, -// }; - -// type G = ::GE; -// type Fr = ::Scalar; - -// #[test] -// fn test_matrix_creation() { -// let matrix_data = vec![ -// (0, 1, Fr::from(2)), -// (1, 2, Fr::from(3)), -// (2, 0, Fr::from(4)), -// ]; -// let sparse_matrix = SparseMatrix::::new(&matrix_data, 3, 3); - -// assert_eq!( -// sparse_matrix.data, -// vec![Fr::from(2), Fr::from(3), Fr::from(4)] -// ); -// assert_eq!(sparse_matrix.indices, vec![1, 2, 0]); -// assert_eq!(sparse_matrix.indptr, vec![0, 1, 2, 3]); -// } - -// #[test] -// fn test_matrix_vector_multiplication() { -// let matrix_data = vec![ -// (0, 1, Fr::from(2)), -// (0, 2, Fr::from(7)), -// (1, 2, Fr::from(3)), -// (2, 0, Fr::from(4)), -// ]; -// let sparse_matrix = SparseMatrix::::new(&matrix_data, 3, 3); -// let vector = vec![Fr::from(1), Fr::from(2), Fr::from(3)]; - -// let result = sparse_matrix.multiply_vec(&vector); - -// assert_eq!(result, vec![Fr::from(25), Fr::from(9), Fr::from(4)]); -// } - -// #[cfg(not(target_arch = "wasm32"))] -// fn coo_strategy() -> BoxedStrategy)>> { -// let coo_strategy = -// any::>().prop_flat_map(|f| (0usize..100, 0usize..100, -// Just(f))); proptest::collection::vec(coo_strategy, 10).boxed() -// } - -// #[cfg(not(target_arch = "wasm32"))] -// proptest! { -// #[test] -// fn test_matrix_iter(mut coo_matrix in coo_strategy()) { -// // process the randomly generated coo matrix -// coo_matrix.sort_by_key(|(row, col, _val)| (*row, *col)); -// coo_matrix.dedup_by_key(|(row, col, _val)| (*row, *col)); -// let coo_matrix = coo_matrix.into_iter().map(|(row, col, val)| { -// (row, col, val.0) }).collect::>(); - -// let matrix = SparseMatrix::new(&coo_matrix, 100, 100); - -// prop_assert_eq!(coo_matrix, matrix.iter().collect::>()); -// } -// } -// } diff --git a/src/spartan/batched.rs b/src/spartan/batched.rs deleted file mode 100644 index 28b6b7c..0000000 --- a/src/spartan/batched.rs +++ /dev/null @@ -1,650 +0,0 @@ -//! This module implements `BatchedRelaxedR1CSSNARKTrait` using Spartan that is -//! generic over the polynomial commitment and evaluation argument (i.e., a PCS) -//! This version of Spartan does not use preprocessing so the verifier keeps the -//! entire description of R1CS matrices. This is essentially optimal for the -//! verifier when using an IPA-based polynomial commitment scheme. This batched -//! implementation batches the outer and inner sumchecks of the Spartan SNARK. - -use core::slice; -use std::{iter, sync::Arc}; - -use ff::Field; -use itertools::Itertools; -use once_cell::sync::OnceCell; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use super::{ - compute_eval_table_sparse, - math::Math, - polys::{eq::EqPolynomial, multilinear::MultilinearPolynomial}, - powers, - snark::batch_eval_reduce, - sumcheck::SumcheckProof, - PolyEvalInstance, PolyEvalWitness, -}; -use crate::{ - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness, SparseMatrix}, - spartan::{ - polys::{multilinear::SparsePolynomial, power::PowPolynomial}, - snark::batch_eval_verify, - }, - traits::{ - evaluation::EvaluationEngineTrait, - snark::{BatchedRelaxedR1CSSNARKTrait, DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Engine, TranscriptEngineTrait, - }, - zip_with, CommitmentKey, -}; - -/// A succinct proof of knowledge of a witness to a batch of relaxed R1CS -/// instances The proof is produced using Spartan's combination of the sum-check -/// and the commitment to a vector viewed as a polynomial commitment -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct BatchedRelaxedR1CSSNARK> { - sc_proof_outer: SumcheckProof, - // Claims ([Azᵢ(τᵢ)], [Bzᵢ(τᵢ)], [Czᵢ(τᵢ)]) - claims_outer: Vec<(E::Scalar, E::Scalar, E::Scalar)>, - // [Eᵢ(r_x)] - evals_E: Vec, - sc_proof_inner: SumcheckProof, - // [Wᵢ(r_y[1..])] - evals_W: Vec, - sc_proof_batch: SumcheckProof, - // [Wᵢ(r_z), Eᵢ(r_z)] - evals_batch: Vec, - eval_arg: EE::EvaluationArgument, -} - -/// A type that represents the prover's key -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ProverKey> { - pub pk_ee: EE::ProverKey, - pub vk_digest: E::Scalar, // digest of the verifier's key -} - -/// A type that represents the verifier's key -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct VerifierKey> { - pub vk_ee: EE::VerifierKey, - S: Vec>, - #[serde(skip, default = "OnceCell::new")] - pub digest: OnceCell, -} - -impl> VerifierKey { - fn new(shapes: Vec>, vk_ee: EE::VerifierKey) -> Self { - Self { - vk_ee, - S: shapes, - digest: OnceCell::new(), - } - } -} - -impl> SimpleDigestible for VerifierKey {} - -impl> DigestHelperTrait for VerifierKey { - /// Returns the digest of the verifier's key. - fn digest(&self) -> E::Scalar { - self.digest - .get_or_try_init(|| { - let dc = DigestComputer::::new(self); - dc.digest() - }) - .cloned() - .expect("Failure to retrieve digest!") - } -} - -impl> BatchedRelaxedR1CSSNARKTrait - for BatchedRelaxedR1CSSNARK -{ - type ProverKey = ProverKey; - - type VerifierKey = VerifierKey; - - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result { - // NOTE: We do not use the verifier key in this context - // TODO: This currently samples a `ck_c` element, does this need to - // be truly secret, if so, retrieve from an SRS. - let (pk_ee, _vk) = EE::setup(ck); - - Ok(ProverKey { pk_ee, vk_digest }) - } - - fn setup( - ck: Arc>, - S: Vec<&R1CSShape>, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - let (pk_ee, vk_ee) = EE::setup(ck); - - let S = S.iter().map(|s| s.pad()).collect(); - - let vk = VerifierKey::new(S, vk_ee); - - let pk = ProverKey { - pk_ee, - vk_digest: vk.digest(), - }; - - Ok((pk, vk)) - } - - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: Vec<&R1CSShape>, - U: &[RelaxedR1CSInstance], - W: &[RelaxedR1CSWitness], - ) -> Result { - let num_instances = U.len(); - // Pad shapes and ensure their sizes are correct - let S = S.iter().map(|s| s.pad()).collect::>(); - - // Pad (W,E) for each instance - let W = zip_with!(iter, (W, S), |w, s| w.pad(s)).collect::>>(); - - let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); - - transcript.absorb(b"vk", &pk.vk_digest); - if num_instances > 1 { - let num_instances_field = E::Scalar::from(num_instances as u64); - transcript.absorb(b"n", &num_instances_field); - } - transcript.absorb(b"U", &U); - - let (polys_W, polys_E): (Vec<_>, Vec<_>) = W.into_iter().map(|w| (w.W, w.E)).unzip(); - - // Append public inputs to W: Z = [W, u, X] - let polys_Z = zip_with!(iter, (polys_W, U), |w, u| [ - w.clone(), - vec![u.u], - u.X.clone() - ] - .concat()) - .collect::>>(); - - let (num_rounds_x, num_rounds_y): (Vec<_>, Vec<_>) = S - .iter() - .map(|s| (s.num_cons.log_2(), s.num_vars.log_2() + 1)) - .unzip(); - let num_rounds_x_max = *num_rounds_x.iter().max().unwrap(); - let num_rounds_y_max = *num_rounds_y.iter().max().unwrap(); - - // Generate tau polynomial corresponding to eq(τ, τ², τ⁴ , …) - // for a random challenge τ - let tau = transcript.squeeze(b"t")?; - let all_taus = PowPolynomial::squares(&tau, num_rounds_x_max); - - let polys_tau = num_rounds_x - .iter() - .map(|&num_rounds_x| PowPolynomial::evals_with_powers(&all_taus, num_rounds_x)) - .map(MultilinearPolynomial::new) - .collect::>(); - - // Compute MLEs of Az, Bz, Cz, uCz + E - let (polys_Az, polys_Bz, polys_Cz): (Vec<_>, Vec<_>, Vec<_>) = - zip_with!(par_iter, (S, polys_Z), |s, poly_Z| { - let (poly_Az, poly_Bz, poly_Cz) = s.multiply_vec(poly_Z)?; - Ok((poly_Az, poly_Bz, poly_Cz)) - }) - .collect::, NovaError>>()? - .into_iter() - .multiunzip(); - - let polys_uCz_E = zip_with!(par_iter, (U, polys_E, polys_Cz), |u, poly_E, poly_Cz| { - zip_with!(par_iter, (poly_Cz, poly_E), |cz, e| u.u * cz + e).collect::>() - }) - .collect::>(); - - let comb_func_outer = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar, - poly_D_comp: &E::Scalar| - -> E::Scalar { - *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) - }; - - // Sample challenge for random linear-combination of outer claims - let outer_r = transcript.squeeze(b"out_r")?; - let outer_r_powers = powers(&outer_r, num_instances); - - // Verify outer sumcheck: Az * Bz - uCz_E for each instance - let (sc_proof_outer, r_x, claims_outer) = - SumcheckProof::prove_cubic_with_additive_term_batch( - &vec![E::Scalar::ZERO; num_instances], - &num_rounds_x, - polys_tau, - polys_Az - .into_iter() - .map(MultilinearPolynomial::new) - .collect(), - polys_Bz - .into_iter() - .map(MultilinearPolynomial::new) - .collect(), - polys_uCz_E - .into_iter() - .map(MultilinearPolynomial::new) - .collect(), - &outer_r_powers, - comb_func_outer, - &mut transcript, - )?; - - let r_x = num_rounds_x - .iter() - .map(|&num_rounds| r_x[(num_rounds_x_max - num_rounds)..].to_vec()) - .collect::>(); - - // Extract evaluations of Az, Bz from Sumcheck and Cz, E at r_x - let (evals_Az_Bz_Cz, evals_E): (Vec<_>, Vec<_>) = zip_with!( - par_iter, - (claims_outer[1], claims_outer[2], polys_Cz, polys_E, r_x), - |eval_Az, eval_Bz, poly_Cz, poly_E, r_x| { - let (eval_Cz, eval_E) = rayon::join( - || MultilinearPolynomial::evaluate_with(poly_Cz, r_x), - || MultilinearPolynomial::evaluate_with(poly_E, r_x), - ); - ((*eval_Az, *eval_Bz, eval_Cz), eval_E) - } - ) - .unzip(); - - evals_Az_Bz_Cz.iter().zip_eq(evals_E.iter()).for_each( - |(&(eval_Az, eval_Bz, eval_Cz), &eval_E)| { - transcript.absorb( - b"claims_outer", - &[eval_Az, eval_Bz, eval_Cz, eval_E].as_slice(), - ) - }, - ); - - let inner_r = transcript.squeeze(b"in_r")?; - let inner_r_square = inner_r.square(); - let inner_r_cube = inner_r_square * inner_r; - let inner_r_powers = powers(&inner_r_cube, num_instances); - - let claims_inner_joint = evals_Az_Bz_Cz - .iter() - .map(|(eval_Az, eval_Bz, eval_Cz)| { - *eval_Az + inner_r * eval_Bz + inner_r_square * eval_Cz - }) - .collect::>(); - - let polys_ABCs = { - let inner = |M_evals_As: Vec, - M_evals_Bs: Vec, - M_evals_Cs: Vec| - -> Vec { - zip_with!( - into_par_iter, - (M_evals_As, M_evals_Bs, M_evals_Cs), - |eval_A, eval_B, eval_C| eval_A + inner_r * eval_B + inner_r_square * eval_C - ) - .collect::>() - }; - - zip_with!(par_iter, (S, r_x), |s, r_x| { - let evals_rx = EqPolynomial::evals_from_points(r_x); - let (eval_A, eval_B, eval_C) = compute_eval_table_sparse(s, &evals_rx); - MultilinearPolynomial::new(inner(eval_A, eval_B, eval_C)) - }) - .collect::>() - }; - - let polys_Z = polys_Z - .into_iter() - .zip_eq(num_rounds_y.iter()) - .map(|(mut z, &num_rounds_y)| { - z.resize(1 << num_rounds_y, E::Scalar::ZERO); - MultilinearPolynomial::new(z) - }) - .collect::>(); - - let comb_func = |poly_A_comp: &E::Scalar, poly_B_comp: &E::Scalar| -> E::Scalar { - *poly_A_comp * *poly_B_comp - }; - - let (sc_proof_inner, r_y, _claims_inner): ( - SumcheckProof, - Vec, - (Vec<_>, Vec<_>), - ) = SumcheckProof::prove_quad_batch( - &claims_inner_joint, - &num_rounds_y, - polys_ABCs, - polys_Z, - &inner_r_powers, - comb_func, - &mut transcript, - )?; - - let r_y = num_rounds_y - .iter() - .map(|num_rounds| { - let (_, r_y_hi) = r_y.split_at(num_rounds_y_max - num_rounds); - r_y_hi - }) - .collect::>(); - - let evals_W = zip_with!(par_iter, (polys_W, r_y), |poly, r_y| { - MultilinearPolynomial::evaluate_with(poly, &r_y[1..]) - }) - .collect::>(); - - // Create evaluation instances for W(r_y[1..]) and E(r_x) - let (w_vec, u_vec) = { - let mut w_vec = Vec::with_capacity(2 * num_instances); - let mut u_vec = Vec::with_capacity(2 * num_instances); - w_vec.extend(polys_W.into_iter().map(|poly| PolyEvalWitness { p: poly })); - u_vec.extend(zip_with!(iter, (evals_W, U, r_y), |eval, u, r_y| { - PolyEvalInstance { - c: u.comm_W, - x: r_y[1..].to_vec(), - e: *eval, - } - })); - - w_vec.extend(polys_E.into_iter().map(|poly| PolyEvalWitness { p: poly })); - u_vec.extend(zip_with!( - (evals_E.iter(), U.iter(), r_x), - |eval_E, u, r_x| PolyEvalInstance { - c: u.comm_E, - x: r_x, - e: *eval_E, - } - )); - (w_vec, u_vec) - }; - - let (batched_u, batched_w, sc_proof_batch, claims_batch_left) = - batch_eval_reduce(u_vec, &w_vec, &mut transcript)?; - - let eval_arg = EE::prove( - ck, - &pk.pk_ee, - &mut transcript, - &batched_u.c, - &batched_w.p, - &batched_u.x, - &batched_u.e, - )?; - - Ok(Self { - sc_proof_outer, - claims_outer: evals_Az_Bz_Cz, - evals_E, - sc_proof_inner, - evals_W, - sc_proof_batch, - evals_batch: claims_batch_left, - eval_arg, - }) - } - - fn verify( - &self, - vk: &Self::VerifierKey, - U: &[RelaxedR1CSInstance], - ) -> Result<(), NovaError> { - let num_instances = U.len(); - let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); - - transcript.absorb(b"vk", &vk.digest()); - if num_instances > 1 { - let num_instances_field = E::Scalar::from(num_instances as u64); - transcript.absorb(b"n", &num_instances_field); - } - transcript.absorb(b"U", &U); - - let num_instances = U.len(); - - let (num_rounds_x, num_rounds_y): (Vec<_>, Vec<_>) = - vk.S.iter() - .map(|s| (s.num_cons.log_2(), s.num_vars.log_2() + 1)) - .unzip(); - let num_rounds_x_max = *num_rounds_x.iter().max().unwrap(); - let num_rounds_y_max = *num_rounds_y.iter().max().unwrap(); - - // Define τ polynomials of the appropriate size for each instance - let tau = transcript.squeeze(b"t")?; - let all_taus = PowPolynomial::squares(&tau, num_rounds_x_max); - - let polys_tau = num_rounds_x - .iter() - .map(|&num_rounds_x| PowPolynomial::evals_with_powers(&all_taus, num_rounds_x)) - .map(MultilinearPolynomial::new) - .collect::>(); - - // Sample challenge for random linear-combination of outer claims - let outer_r = transcript.squeeze(b"out_r")?; - let outer_r_powers = powers(&outer_r, num_instances); - - let (claim_outer_final, r_x) = self.sc_proof_outer.verify_batch( - &vec![E::Scalar::ZERO; num_instances], - &num_rounds_x, - &outer_r_powers, - 3, - &mut transcript, - )?; - - // Since each instance has a different number of rounds, the Sumcheck - // prover skips the first num_rounds_x_max - num_rounds_x rounds. - // The evaluation point for each instance is therefore r_x[num_rounds_x_max - - // num_rounds_x..] - let r_x = num_rounds_x - .iter() - .map(|num_rounds| r_x[(num_rounds_x_max - num_rounds)..].to_vec()) - .collect::>(); - - // Extract evaluations into a vector [(Azᵢ, Bzᵢ, Czᵢ, Eᵢ)] - let ABCE_evals = || self.claims_outer.iter().zip_eq(self.evals_E.iter()); - - // Add evaluations of Az, Bz, Cz, E to transcript - for ((claim_Az, claim_Bz, claim_Cz), eval_E) in ABCE_evals() { - transcript.absorb( - b"claims_outer", - &[*claim_Az, *claim_Bz, *claim_Cz, *eval_E].as_slice(), - ) - } - - let chis_r_x = r_x - .par_iter() - .map(|r_x| EqPolynomial::evals_from_points(r_x)) - .collect::>(); - - // Evaluate τ(rₓ) for each instance - let evals_tau = zip_with!(iter, (polys_tau, chis_r_x), |poly_tau, er_x| { - MultilinearPolynomial::evaluate_with_chis(poly_tau.evaluations(), er_x) - }); - - // Compute expected claim for all instances ∑ᵢ rⁱ⋅τ(rₓ)⋅(Azᵢ⋅Bzᵢ − uᵢ⋅Czᵢ − Eᵢ) - let claim_outer_final_expected = zip_with!( - (ABCE_evals(), U.iter(), evals_tau, outer_r_powers.iter()), - |ABCE_eval, u, eval_tau, r| { - let ((claim_Az, claim_Bz, claim_Cz), eval_E) = ABCE_eval; - *r * eval_tau * (*claim_Az * claim_Bz - u.u * claim_Cz - eval_E) - } - ) - .sum::(); - - if claim_outer_final != claim_outer_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - let inner_r = transcript.squeeze(b"in_r")?; - let inner_r_square = inner_r.square(); - let inner_r_cube = inner_r_square * inner_r; - let inner_r_powers = powers(&inner_r_cube, num_instances); - - // Compute inner claims Mzᵢ = (Azᵢ + r⋅Bzᵢ + r²⋅Czᵢ), - // which are batched by Sumcheck into one claim: ∑ᵢ r³ⁱ⋅Mzᵢ - let claims_inner = self - .claims_outer - .iter() - .map(|(claim_Az, claim_Bz, claim_Cz)| { - *claim_Az + inner_r * claim_Bz + inner_r_square * claim_Cz - }) - .collect::>(); - - let (claim_inner_final, r_y) = self.sc_proof_inner.verify_batch( - &claims_inner, - &num_rounds_y, - &inner_r_powers, - 2, - &mut transcript, - )?; - let r_y: Vec> = num_rounds_y - .iter() - .map(|num_rounds| r_y[(num_rounds_y_max - num_rounds)..].to_vec()) - .collect(); - - // Compute evaluations of Zᵢ = [Wᵢ, uᵢ, Xᵢ] at r_y - // Zᵢ(r_y) = (1−r_y[0])⋅W(r_y[1..]) + r_y[0]⋅MLE([uᵢ, Xᵢ])(r_y[1..]) - let evals_Z = zip_with!(iter, (self.evals_W, U, r_y), |eval_W, U, r_y| { - let eval_X = { - // constant term - let poly_X = iter::once(U.u).chain(U.X.iter().cloned()).collect(); - SparsePolynomial::new(r_y.len() - 1, poly_X).evaluate(&r_y[1..]) - }; - (E::Scalar::ONE - r_y[0]) * eval_W + r_y[0] * eval_X - }) - .collect::>(); - - // compute evaluations of R1CS matrices M(r_x, r_y) = eq(r_y)ᵀ⋅M⋅eq(r_x) - let multi_evaluate = |M_vec: &[&SparseMatrix], - chi_r_x: &[E::Scalar], - r_y: &[E::Scalar]| - -> Vec { - let evaluate_with_table = - |M: &SparseMatrix, T_x: &[E::Scalar], T_y: &[E::Scalar]| -> E::Scalar { - M.par_iter_rows() - .enumerate() - .map(|(row_idx, row)| { - M.get_row(row) - .map(|(val, col_idx)| T_x[row_idx] * T_y[*col_idx] * val) - .sum::() - }) - .sum() - }; - - let T_x = chi_r_x; - let T_y = EqPolynomial::evals_from_points(r_y); - - M_vec - .par_iter() - .map(|&M_vec| evaluate_with_table(M_vec, T_x, &T_y)) - .collect() - }; - - // Compute inner claim ∑ᵢ r³ⁱ⋅(Aᵢ(r_x, r_y) + r⋅Bᵢ(r_x, r_y) + r²⋅Cᵢ(r_x, - // r_y))⋅Zᵢ(r_y) - let claim_inner_final_expected = zip_with!( - iter, - (vk.S, chis_r_x, r_y, evals_Z, inner_r_powers), - |S, r_x, r_y, eval_Z, r_i| { - let evals = multi_evaluate(&[&S.A, &S.B, &S.C], r_x, r_y); - let eval = evals[0] + inner_r * evals[1] + inner_r_square * evals[2]; - eval * r_i * eval_Z - } - ) - .sum::(); - - if claim_inner_final != claim_inner_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - // Create evaluation instances for W(r_y[1..]) and E(r_x) - let u_vec = { - let mut u_vec = Vec::with_capacity(2 * num_instances); - u_vec.extend(zip_with!(iter, (self.evals_W, U, r_y), |eval, u, r_y| { - PolyEvalInstance { - c: u.comm_W, - x: r_y[1..].to_vec(), - e: *eval, - } - })); - - u_vec.extend(zip_with!(iter, (self.evals_E, U, r_x), |eval, u, r_x| { - PolyEvalInstance { - c: u.comm_E, - x: r_x.to_vec(), - e: *eval, - } - })); - u_vec - }; - - let batched_u = batch_eval_verify( - u_vec, - &mut transcript, - &self.sc_proof_batch, - &self.evals_batch, - )?; - - // verify - EE::verify( - &vk.vk_ee, - &mut transcript, - &batched_u.c, - &batched_u.x, - &batched_u.e, - &self.eval_arg, - )?; - - Ok(()) - } -} - -impl> RelaxedR1CSSNARKTrait - for BatchedRelaxedR1CSSNARK -{ - type ProverKey = ProverKey; - - type VerifierKey = VerifierKey; - - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - >::ck_floor() - } - - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result { - >::initialize_pk(ck, vk_digest) - } - - fn setup( - ck: Arc>, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - >::setup(ck, vec![S]) - } - - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result { - let slice_U = slice::from_ref(U); - let slice_W = slice::from_ref(W); - >::prove(ck, pk, vec![S], slice_U, slice_W) - } - - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let slice = slice::from_ref(U); - >::verify(self, vk, slice) - } -} diff --git a/src/spartan/batched_ppsnark.rs b/src/spartan/batched_ppsnark.rs deleted file mode 100644 index 8ee9439..0000000 --- a/src/spartan/batched_ppsnark.rs +++ /dev/null @@ -1,1413 +0,0 @@ -//! batched pp snark - -use core::slice; -use std::sync::Arc; - -use ff::Field; -use itertools::{chain, Itertools as _}; -use once_cell::sync::*; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use crate::{ - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, - spartan::{ - math::Math, - polys::{ - eq::EqPolynomial, - identity::IdentityPolynomial, - masked_eq::MaskedEqPolynomial, - multilinear::{MultilinearPolynomial, SparsePolynomial}, - power::PowPolynomial, - univariate::{CompressedUniPoly, UniPoly}, - }, - powers, - ppsnark::{R1CSShapeSparkCommitment, R1CSShapeSparkRepr}, - sumcheck::{ - engine::{ - InnerSumcheckInstance, MemorySumcheckInstance, OuterSumcheckInstance, - SumcheckEngine, WitnessBoundSumcheck, - }, - SumcheckProof, - }, - PolyEvalInstance, PolyEvalWitness, - }, - traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, - evaluation::EvaluationEngineTrait, - snark::{BatchedRelaxedR1CSSNARKTrait, DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Engine, TranscriptEngineTrait, - }, - zip_with, zip_with_for_each, Commitment, CommitmentKey, CompressedCommitment, -}; - -/// A type that represents the prover's key -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ProverKey> { - pk_ee: EE::ProverKey, - S_repr: Vec>, - S_comm: Vec>, - vk_digest: E::Scalar, // digest of verifier's key -} - -/// A type that represents the verifier's key -#[derive(Debug, Deserialize, Serialize)] -#[serde(bound = "")] -pub struct VerifierKey> { - vk_ee: EE::VerifierKey, - S_comm: Vec>, - num_vars: Vec, - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} -impl> VerifierKey { - fn new( - num_vars: Vec, - S_comm: Vec>, - vk_ee: EE::VerifierKey, - ) -> Self { - Self { - num_vars, - S_comm, - vk_ee, - digest: Default::default(), - } - } -} - -impl> SimpleDigestible for VerifierKey {} - -impl> DigestHelperTrait for VerifierKey { - /// Returns the digest of the verifier's key - fn digest(&self) -> E::Scalar { - self.digest - .get_or_try_init(|| { - let dc = DigestComputer::new(self); - dc.digest() - }) - .cloned() - .expect("Failure to retrieve digest!") - } -} - -/// A succinct proof of knowledge of a witness to a relaxed R1CS instance -/// The proof is produced using Spartan's combination of the sum-check and -/// the commitment to a vector viewed as a polynomial commitment -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct BatchedRelaxedR1CSSNARK> { - // commitment to oracles: the first three are for Az, Bz, Cz, - // and the last two are for memory reads - comms_Az_Bz_Cz: Vec<[CompressedCommitment; 3]>, - comms_L_row_col: Vec<[CompressedCommitment; 2]>, - // commitments to aid the memory checks - // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] - comms_mem_oracles: Vec<[CompressedCommitment; 4]>, - - // claims about Az, Bz, and Cz polynomials - evals_Az_Bz_Cz_at_tau: Vec<[E::Scalar; 3]>, - - // sum-check - sc: SumcheckProof, - - // claims from the end of sum-check - evals_Az_Bz_Cz_W_E: Vec<[E::Scalar; 5]>, - evals_L_row_col: Vec<[E::Scalar; 2]>, - // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] - evals_mem_oracle: Vec<[E::Scalar; 4]>, - // [val_A, val_B, val_C, row, col, ts_row, ts_col] - evals_mem_preprocessed: Vec<[E::Scalar; 7]>, - - // a PCS evaluation argument - eval_arg: EE::EvaluationArgument, -} - -impl> BatchedRelaxedR1CSSNARKTrait - for BatchedRelaxedR1CSSNARK -{ - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - Box::new(|shape: &R1CSShape| -> usize { - // the commitment key should be large enough to commit to the R1CS matrices - std::cmp::max( - shape.A.len() + shape.B.len() + shape.C.len(), - std::cmp::max(shape.num_cons, 2 * shape.num_vars), - ) - }) - } - - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result { - todo!("unimplemented for batched_ppsnark"); - } - - fn setup( - ck: Arc>, - S: Vec<&R1CSShape>, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - for s in S.iter() { - // check the provided commitment key meets minimal requirements - if ck.length() < >::ck_floor()(s) { - // return Err(NovaError::InvalidCommitmentKeyLength); - return Err(NovaError::InternalError); - } - } - let (pk_ee, vk_ee) = EE::setup(ck.clone()); - - let S = S.iter().map(|s| s.pad()).collect::>(); - let S_repr = S.iter().map(R1CSShapeSparkRepr::new).collect::>(); - let S_comm = S_repr - .iter() - .map(|s_repr| s_repr.commit(&*ck)) - .collect::>(); - let num_vars = S.iter().map(|s| s.num_vars).collect::>(); - let vk = VerifierKey::new(num_vars, S_comm.clone(), vk_ee); - let pk = ProverKey { - pk_ee, - S_repr, - S_comm, - vk_digest: vk.digest(), - }; - Ok((pk, vk)) - } - - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: Vec<&R1CSShape>, - U: &[RelaxedR1CSInstance], - W: &[RelaxedR1CSWitness], - ) -> Result { - // Pad shapes so that num_vars = num_cons = Nᵢ and check the sizes are correct - let S = S.par_iter().map(|s| s.pad()).collect::>(); - - // N[i] = max(|Aᵢ|+|Bᵢ|+|Cᵢ|, 2*num_varsᵢ, num_consᵢ) - let Nis = pk.S_repr.iter().map(|s| s.N).collect::>(); - assert!(Nis.iter().all(|&Ni| Ni.is_power_of_two())); - let N_max = *Nis.iter().max().unwrap(); - - let num_instances = U.len(); - - // Pad [(Wᵢ,Eᵢ)] to the next power of 2 (not to Ni) - let W = - zip_with!(par_iter, (W, S), |w, s| w.pad(s)).collect::>>(); - - // number of rounds of sum-check - let num_rounds_sc = N_max.log_2(); - - // Initialize transcript with vk || [Uᵢ] - let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); - transcript.absorb(b"vk", &pk.vk_digest); - if num_instances > 1 { - let num_instances_field = E::Scalar::from(num_instances as u64); - transcript.absorb(b"n", &num_instances_field); - } - transcript.absorb(b"U", &U); - - // Append public inputs to Wᵢ: Zᵢ = [Wᵢ, uᵢ, Xᵢ] - let polys_Z = zip_with!(par_iter, (W, U, Nis), |W, U, Ni| { - // poly_Z will be resized later, so we preallocate the correct capacity - let mut poly_Z = Vec::with_capacity(*Ni); - poly_Z.extend(W.W.iter().chain([&U.u]).chain(U.X.iter())); - poly_Z - }) - .collect::>>(); - - // Move polys_W and polys_E, as well as U.u out of U - let (comms_W_E, us): (Vec<_>, Vec<_>) = - U.iter().map(|U| ([U.comm_W, U.comm_E], U.u)).unzip(); - let (polys_W, polys_E): (Vec<_>, Vec<_>) = W.into_iter().map(|w| (w.W, w.E)).unzip(); - - // Compute [Az, Bz, Cz] - let mut polys_Az_Bz_Cz = zip_with!(par_iter, (polys_Z, S), |z, s| { - let (Az, Bz, Cz) = s.multiply_vec(z)?; - Ok([Az, Bz, Cz]) - }) - .collect::, NovaError>>()?; - - // Commit to [Az, Bz, Cz] and add to transcript - let comms_Az_Bz_Cz = polys_Az_Bz_Cz - .par_iter() - .map(|[Az, Bz, Cz]| { - let (comm_Az, (comm_Bz, comm_Cz)) = rayon::join( - || E::CE::commit(ck, Az), - || rayon::join(|| E::CE::commit(ck, Bz), || E::CE::commit(ck, Cz)), - ); - [comm_Az, comm_Bz, comm_Cz] - }) - .collect::>(); - comms_Az_Bz_Cz - .iter() - .for_each(|comms| transcript.absorb(b"c", &comms.as_slice())); - - // Compute eq(tau) for each instance in log2(Ni) variables - let tau = transcript.squeeze(b"t")?; - let all_taus = PowPolynomial::squares(&tau, N_max.log_2()); - - let (polys_tau, coords_tau): (Vec<_>, Vec<_>) = Nis - .par_iter() - .map(|&N_i| { - let log_Ni = N_i.log_2(); - let eqp: EqPolynomial<_> = all_taus[..log_Ni].iter().cloned().collect(); - let evals = eqp.evals(); - let coords = eqp.r; - (evals, coords) - }) - .unzip(); - - // Pad [Az, Bz, Cz] to Ni - polys_Az_Bz_Cz - .par_iter_mut() - .zip_eq(Nis.par_iter()) - .for_each(|(az_bz_cz, &Ni)| { - az_bz_cz - .par_iter_mut() - .for_each(|mz| mz.resize(Ni, E::Scalar::ZERO)) - }); - - // Evaluate and commit to [Az(tau), Bz(tau), Cz(tau)] - let evals_Az_Bz_Cz_at_tau = zip_with!( - par_iter, - (polys_Az_Bz_Cz, coords_tau), - |ABCs, tau_coords| { - let [Az, Bz, Cz] = ABCs; - let (eval_Az, (eval_Bz, eval_Cz)) = rayon::join( - || MultilinearPolynomial::evaluate_with(Az, tau_coords), - || { - rayon::join( - || MultilinearPolynomial::evaluate_with(Bz, tau_coords), - || MultilinearPolynomial::evaluate_with(Cz, tau_coords), - ) - }, - ); - [eval_Az, eval_Bz, eval_Cz] - } - ) - .collect::>(); - - // absorb the claimed evaluations into the transcript - for evals in evals_Az_Bz_Cz_at_tau.iter() { - transcript.absorb(b"e", &evals.as_slice()); - } - - // Pad Zᵢ, E to Nᵢ - let polys_Z = polys_Z - .into_par_iter() - .zip_eq(Nis.par_iter()) - .map(|(mut poly_Z, &Ni)| { - poly_Z.resize(Ni, E::Scalar::ZERO); - poly_Z - }) - .collect::>(); - - // Pad both W,E to have the same size. This is inefficient for W since the - // second half is empty, but it makes it easier to handle the batching - // at the end. - let polys_E = polys_E - .into_par_iter() - .zip_eq(Nis.par_iter()) - .map(|(mut poly_E, &Ni)| { - poly_E.resize(Ni, E::Scalar::ZERO); - poly_E - }) - .collect::>(); - - let polys_W = polys_W - .into_par_iter() - .zip_eq(Nis.par_iter()) - .map(|(mut poly_W, &Ni)| { - poly_W.resize(Ni, E::Scalar::ZERO); - poly_W - }) - .collect::>(); - - // (2) send commitments to the following two oracles - // L_row(i) = eq(tau, row(i)) for all i in [0..Nᵢ] - // L_col(i) = z(col(i)) for all i in [0..Nᵢ] - let polys_L_row_col = zip_with!( - par_iter, - (S, Nis, polys_Z, polys_tau), - |S, Ni, poly_Z, poly_tau| { - let mut L_row = vec![poly_tau[0]; *Ni]; // we place mem_row[0] since resized row is appended with 0s - let mut L_col = vec![poly_Z[Ni - 1]; *Ni]; // we place mem_col[Ni-1] since resized col is appended with Ni-1 - - for (i, (val_r, val_c)) in - S.A.iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(r, c, _)| (poly_tau[r], poly_Z[c])) - .enumerate() - { - L_row[i] = val_r; - L_col[i] = val_c; - } - - [L_row, L_col] - } - ) - .collect::>(); - - let comms_L_row_col = polys_L_row_col - .par_iter() - .map(|[L_row, L_col]| { - let (comm_L_row, comm_L_col) = - rayon::join(|| E::CE::commit(ck, L_row), || E::CE::commit(ck, L_col)); - [comm_L_row, comm_L_col] - }) - .collect::>(); - - // absorb commitments to L_row and L_col in the transcript - for comms in comms_L_row_col.iter() { - transcript.absorb(b"e", &comms.as_slice()); - } - - // For each instance, batch Mz = Az + c*Bz + c^2*Cz - let c = transcript.squeeze(b"c")?; - - let polys_Mz: Vec<_> = polys_Az_Bz_Cz - .par_iter() - .map(|polys_Az_Bz_Cz| { - let poly_vec: Vec<&Vec<_>> = polys_Az_Bz_Cz.iter().collect(); - let w = PolyEvalWitness::::batch(&poly_vec[..], &c); - w.p - }) - .collect(); - - let evals_Mz: Vec<_> = zip_with!( - iter, - (comms_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau), - |comm_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau| { - let u = PolyEvalInstance::::batch( - comm_Az_Bz_Cz.as_slice(), - vec![], // ignored by the function - evals_Az_Bz_Cz_at_tau.as_slice(), - &c, - ); - u.e - } - ) - .collect(); - - // we now need to prove three claims for each instance - // (outer) - // 0 = \sum_x poly_tau(x) * (poly_Az(x) * poly_Bz(x) - poly_uCz_E(x)) - // eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau = - // (Az+c*Bz+c^2*Cz)(tau) (inner) - // eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau = \sum_y - // L_row(y) * (val_A(y) + c * val_B(y) + c^2 * val_C(y)) * L_col(y) - // (mem) - // L_row(i) = eq(tau, row(i)) - // L_col(i) = z(col(i)) - let outer_sc_inst = zip_with!( - ( - polys_Az_Bz_Cz.par_iter(), - polys_E.par_iter(), - polys_Mz.into_par_iter(), - polys_tau.par_iter(), - evals_Mz.par_iter(), - us.par_iter() - ), - |poly_ABC, poly_E, poly_Mz, poly_tau, eval_Mz, u| { - let [poly_Az, poly_Bz, poly_Cz] = poly_ABC; - let poly_uCz_E = - zip_with!(par_iter, (poly_Cz, poly_E), |cz, e| *u * cz + e).collect(); - OuterSumcheckInstance::new( - poly_tau.clone(), - poly_Az.clone(), - poly_Bz.clone(), - poly_uCz_E, - poly_Mz, // Mz = Az + c * Bz + c^2 * Cz - eval_Mz, // eval_Az_at_tau + c * eval_Az_at_tau + c^2 * eval_Cz_at_tau - ) - } - ) - .collect::>(); - - let inner_sc_inst = zip_with!( - par_iter, - (pk.S_repr, evals_Mz, polys_L_row_col), - |s_repr, eval_Mz, poly_L| { - let [poly_L_row, poly_L_col] = poly_L; - let c_square = c.square(); - let val = zip_with!( - par_iter, - (s_repr.val_A, s_repr.val_B, s_repr.val_C), - |v_a, v_b, v_c| *v_a + c * *v_b + c_square * *v_c - ) - .collect::>(); - - InnerSumcheckInstance::new( - *eval_Mz, - MultilinearPolynomial::new(poly_L_row.clone()), - MultilinearPolynomial::new(poly_L_col.clone()), - MultilinearPolynomial::new(val), - ) - } - ) - .collect::>(); - - // a third sum-check instance to prove the read-only memory claim - // we now need to prove that L_row and L_col are well-formed - let (mem_sc_inst, comms_mem_oracles, polys_mem_oracles) = { - let gamma = transcript.squeeze(b"g")?; - let r = transcript.squeeze(b"r")?; - - // We start by computing oracles and auxiliary polynomials to help prove the - // claim oracles correspond to [t_plus_r_inv_row, w_plus_r_inv_row, - // t_plus_r_inv_col, w_plus_r_inv_col] - let (comms_mem_oracles, polys_mem_oracles, mem_aux) = pk - .S_repr - .iter() - .zip_eq(polys_tau.iter()) - .zip_eq(polys_Z.iter()) - .zip_eq(polys_L_row_col.iter()) - .try_fold( - (Vec::new(), Vec::new(), Vec::new()), - |(mut comms, mut polys, mut aux), - (((s_repr, poly_tau), poly_Z), [L_row, L_col])| { - let (comm, poly, a) = MemorySumcheckInstance::::compute_oracles( - ck, - &r, - &gamma, - poly_tau, - &s_repr.row, - L_row, - &s_repr.ts_row, - poly_Z, - &s_repr.col, - L_col, - &s_repr.ts_col, - )?; - - comms.push(comm); - polys.push(poly); - aux.push(a); - - Ok::<_, NovaError>((comms, polys, aux)) - }, - )?; - - // Commit to oracles - for comms in comms_mem_oracles.iter() { - transcript.absorb(b"l", &comms.as_slice()); - } - - // Sample new random variable for eq polynomial - let rho = transcript.squeeze(b"r")?; - let all_rhos = PowPolynomial::squares(&rho, N_max.log_2()); - - let instances = zip_with!( - ( - pk.S_repr.par_iter(), - Nis.par_iter(), - polys_mem_oracles.par_iter(), - mem_aux.into_par_iter() - ), - |s_repr, Ni, polys_mem_oracles, polys_aux| { - MemorySumcheckInstance::::new( - polys_mem_oracles.clone(), - polys_aux, - PowPolynomial::evals_with_powers(&all_rhos, Ni.log_2()), - s_repr.ts_row.clone(), - s_repr.ts_col.clone(), - ) - } - ) - .collect::>(); - (instances, comms_mem_oracles, polys_mem_oracles) - }; - - let witness_sc_inst = zip_with!(par_iter, (polys_W, S), |poly_W, S| { - WitnessBoundSumcheck::new(tau, poly_W.clone(), S.num_vars) - }) - .collect::>(); - - // Run batched Sumcheck for the 3 claims for all instances. - // Note that the polynomials for claims relating to instance i have size Ni. - let (sc, rand_sc, claims_outer, claims_inner, claims_mem, claims_witness) = - Self::prove_helper( - num_rounds_sc, - mem_sc_inst, - outer_sc_inst, - inner_sc_inst, - witness_sc_inst, - &mut transcript, - )?; - - let (evals_Az_Bz_Cz_W_E, evals_L_row_col, evals_mem_oracle, evals_mem_preprocessed) = { - let evals_Az_Bz = claims_outer - .into_iter() - .map(|claims| [claims[0][0], claims[0][1]]) - .collect::>(); - - let evals_L_row_col = claims_inner - .into_iter() - .map(|claims| { - // [L_row, L_col] - [claims[0][0], claims[0][1]] - }) - .collect::>(); - - let (evals_mem_oracle, evals_mem_ts): (Vec<_>, Vec<_>) = claims_mem - .into_iter() - .map(|claims| { - ( - // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] - [claims[0][0], claims[0][1], claims[1][0], claims[1][1]], - // [ts_row, ts_col] - [claims[0][2], claims[1][2]], - ) - }) - .unzip(); - - let evals_W = claims_witness - .into_iter() - .map(|claims| claims[0][0]) - .collect::>(); - - let (evals_Cz_E, evals_mem_val_row_col): (Vec<_>, Vec<_>) = zip_with!( - iter, - (polys_Az_Bz_Cz, polys_E, pk.S_repr), - |ABCzs, poly_E, s_repr| { - let [_, _, Cz] = ABCzs; - let log_Ni = s_repr.N.log_2(); - let (_, rand_sc) = rand_sc.split_at(num_rounds_sc - log_Ni); - let rand_sc_evals = EqPolynomial::evals_from_points(rand_sc); - let e = [ - Cz, - poly_E, - &s_repr.val_A, - &s_repr.val_B, - &s_repr.val_C, - &s_repr.row, - &s_repr.col, - ] - .into_iter() - .map(|p| { - // Manually compute evaluation to avoid recomputing rand_sc_evals - zip_with!(par_iter, (p, rand_sc_evals), |p, eq| *p * eq).sum() - }) - .collect::>(); - ([e[0], e[1]], [e[2], e[3], e[4], e[5], e[6]]) - } - ) - .unzip(); - - let evals_Az_Bz_Cz_W_E = zip_with!( - (evals_Az_Bz.into_iter(), evals_Cz_E.into_iter(), evals_W), - |Az_Bz, Cz_E, W| { - let [Az, Bz] = Az_Bz; - let [Cz, E] = Cz_E; - [Az, Bz, Cz, W, E] - } - ) - .collect::>(); - - // [val_A, val_B, val_C, row, col, ts_row, ts_col] - let evals_mem_preprocessed = zip_with!( - (evals_mem_val_row_col.into_iter(), evals_mem_ts), - |eval_mem_val_row_col, eval_mem_ts| { - let [val_A, val_B, val_C, row, col] = eval_mem_val_row_col; - let [ts_row, ts_col] = eval_mem_ts; - [val_A, val_B, val_C, row, col, ts_row, ts_col] - } - ) - .collect::>(); - ( - evals_Az_Bz_Cz_W_E, - evals_L_row_col, - evals_mem_oracle, - evals_mem_preprocessed, - ) - }; - - let evals_vec = zip_with!( - iter, - ( - evals_Az_Bz_Cz_W_E, - evals_L_row_col, - evals_mem_oracle, - evals_mem_preprocessed - ), - |Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed| { - chain![Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed] - .cloned() - .collect::>() - } - ) - .collect::>(); - - let comms_vec = zip_with!( - iter, - ( - comms_Az_Bz_Cz, - comms_W_E, - comms_L_row_col, - comms_mem_oracles, - pk.S_comm - ), - |Az_Bz_Cz, comms_W_E, L_row_col, mem_oracles, S_comm| { - chain![ - Az_Bz_Cz, - comms_W_E, - L_row_col, - mem_oracles, - [ - &S_comm.comm_val_A, - &S_comm.comm_val_B, - &S_comm.comm_val_C, - &S_comm.comm_row, - &S_comm.comm_col, - &S_comm.comm_ts_row, - &S_comm.comm_ts_col, - ] - ] - } - ) - .flatten() - .cloned() - .collect::>(); - - let w_vec = zip_with!( - ( - polys_Az_Bz_Cz.into_iter(), - polys_W.into_iter(), - polys_E.into_iter(), - polys_L_row_col.into_iter(), - polys_mem_oracles.into_iter(), - pk.S_repr.iter() - ), - |Az_Bz_Cz, W, E, L_row_col, mem_oracles, S_repr| { - chain![ - Az_Bz_Cz, - [W, E], - L_row_col, - mem_oracles, - [ - S_repr.val_A.clone(), - S_repr.val_B.clone(), - S_repr.val_C.clone(), - S_repr.row.clone(), - S_repr.col.clone(), - S_repr.ts_row.clone(), - S_repr.ts_col.clone(), - ] - ] - } - ) - .flatten() - .map(|p| PolyEvalWitness:: { p }) - .collect::>(); - - for evals in evals_vec.iter() { - transcript.absorb(b"e", &evals.as_slice()); // comm_vec is already - // in the transcript - } - let evals_vec = evals_vec.into_iter().flatten().collect::>(); - - let c = transcript.squeeze(b"c")?; - - // Compute number of variables for each polynomial - let num_vars_u = w_vec.iter().map(|w| w.p.len().log_2()).collect::>(); - let u_batch = - PolyEvalInstance::::batch_diff_size(&comms_vec, &evals_vec, &num_vars_u, rand_sc, c); - let w_batch = - PolyEvalWitness::::batch_diff_size(&w_vec.iter().by_ref().collect::>(), c); - - let eval_arg = EE::prove( - ck, - &pk.pk_ee, - &mut transcript, - &u_batch.c, - &w_batch.p, - &u_batch.x, - &u_batch.e, - )?; - - let comms_Az_Bz_Cz = comms_Az_Bz_Cz - .into_iter() - .map(|comms| comms.map(|comm| comm.compress())) - .collect(); - let comms_L_row_col = comms_L_row_col - .into_iter() - .map(|comms| comms.map(|comm| comm.compress())) - .collect(); - let comms_mem_oracles = comms_mem_oracles - .into_iter() - .map(|comms| comms.map(|comm| comm.compress())) - .collect(); - - Ok(Self { - comms_Az_Bz_Cz, - comms_L_row_col, - comms_mem_oracles, - evals_Az_Bz_Cz_at_tau, - sc, - evals_Az_Bz_Cz_W_E, - evals_L_row_col, - evals_mem_oracle, - evals_mem_preprocessed, - eval_arg, - }) - } - - fn verify( - &self, - vk: &Self::VerifierKey, - U: &[RelaxedR1CSInstance], - ) -> Result<(), NovaError> { - let num_instances = U.len(); - let num_claims_per_instance = 10; - - // number of rounds of sum-check - let num_rounds = vk.S_comm.iter().map(|s| s.N.log_2()).collect::>(); - let num_rounds_max = *num_rounds.iter().max().unwrap(); - - let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); - - transcript.absorb(b"vk", &vk.digest()); - if num_instances > 1 { - let num_instances_field = E::Scalar::from(num_instances as u64); - transcript.absorb(b"n", &num_instances_field); - } - transcript.absorb(b"U", &U); - - // Decompress commitments - let comms_Az_Bz_Cz = self - .comms_Az_Bz_Cz - .iter() - .map(|comms| { - comms - .iter() - .map(Commitment::::decompress) - .collect::, _>>() - }) - .collect::, _>>()?; - - let comms_L_row_col = self - .comms_L_row_col - .iter() - .map(|comms| { - comms - .iter() - .map(Commitment::::decompress) - .collect::, _>>() - }) - .collect::, _>>()?; - - let comms_mem_oracles = self - .comms_mem_oracles - .iter() - .map(|comms| { - comms - .iter() - .map(Commitment::::decompress) - .collect::, _>>() - }) - .collect::, _>>()?; - - // Add commitments [Az, Bz, Cz] to the transcript - comms_Az_Bz_Cz - .iter() - .for_each(|comms| transcript.absorb(b"c", &comms.as_slice())); - - let tau = transcript.squeeze(b"t")?; - let tau_coords = PowPolynomial::new(&tau, num_rounds_max).coordinates(); - - // absorb the claimed evaluations into the transcript - self.evals_Az_Bz_Cz_at_tau.iter().for_each(|evals| { - transcript.absorb(b"e", &evals.as_slice()); - }); - - // absorb commitments to L_row and L_col in the transcript - for comms in comms_L_row_col.iter() { - transcript.absorb(b"e", &comms.as_slice()); - } - - // Batch at tau for each instance - let c = transcript.squeeze(b"c")?; - - // Compute eval_Mz = eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau - let evals_Mz: Vec<_> = zip_with!( - iter, - (comms_Az_Bz_Cz, self.evals_Az_Bz_Cz_at_tau), - |comm_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau| { - let u = PolyEvalInstance::::batch( - comm_Az_Bz_Cz.as_slice(), - tau_coords.clone(), - evals_Az_Bz_Cz_at_tau.as_slice(), - &c, - ); - u.e - } - ) - .collect(); - - let gamma = transcript.squeeze(b"g")?; - let r = transcript.squeeze(b"r")?; - - for comms in comms_mem_oracles.iter() { - transcript.absorb(b"l", &comms.as_slice()); - } - - let rho = transcript.squeeze(b"r")?; - - let s = transcript.squeeze(b"r")?; - let s_powers = powers(&s, num_instances * num_claims_per_instance); - - let (claim_sc_final, rand_sc) = { - // Gather all claims into a single vector - let claims = evals_Mz - .iter() - .flat_map(|&eval_Mz| { - let mut claims = vec![E::Scalar::ZERO; num_claims_per_instance]; - claims[7] = eval_Mz; - claims[8] = eval_Mz; - claims.into_iter() - }) - .collect::>(); - - // Number of rounds for each claim - let num_rounds_by_claim = num_rounds - .iter() - .flat_map(|num_rounds_i| vec![*num_rounds_i; num_claims_per_instance].into_iter()) - .collect::>(); - - self.sc - .verify_batch(&claims, &num_rounds_by_claim, &s_powers, 3, &mut transcript)? - }; - - // Truncated sumcheck randomness for each instance - let rand_sc_i = num_rounds - .iter() - .map(|num_rounds| rand_sc[(num_rounds_max - num_rounds)..].to_vec()) - .collect::>(); - - let claim_sc_final_expected = zip_with!( - ( - vk.num_vars.iter(), - rand_sc_i.iter(), - U.iter(), - self.evals_Az_Bz_Cz_W_E.iter().cloned(), - self.evals_L_row_col.iter().cloned(), - self.evals_mem_oracle.iter().cloned(), - self.evals_mem_preprocessed.iter().cloned() - ), - |num_vars, - rand_sc, - U, - evals_Az_Bz_Cz_W_E, - evals_L_row_col, - eval_mem_oracle, - eval_mem_preprocessed| { - let [Az, Bz, Cz, W, E] = evals_Az_Bz_Cz_W_E; - let [L_row, L_col] = evals_L_row_col; - let [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] = - eval_mem_oracle; - let [val_A, val_B, val_C, row, col, ts_row, ts_col] = eval_mem_preprocessed; - - let num_rounds_i = rand_sc.len(); - let num_vars_log = num_vars.log_2(); - - let eq_rho = PowPolynomial::new(&rho, num_rounds_i).evaluate(rand_sc); - - let (eq_tau, eq_masked_tau) = { - let eq_tau: EqPolynomial<_> = PowPolynomial::new(&tau, num_rounds_i).into(); - - let eq_tau_at_rand = eq_tau.evaluate(rand_sc); - let eq_masked_tau = - MaskedEqPolynomial::new(&eq_tau, num_vars_log).evaluate(rand_sc); - - (eq_tau_at_rand, eq_masked_tau) - }; - - // Evaluate identity polynomial - let id = IdentityPolynomial::new(num_rounds_i).evaluate(rand_sc); - - let Z = { - // rand_sc was padded, so we now remove the padding - let (factor, rand_sc_unpad) = { - let l = num_rounds_i - (num_vars_log + 1); - - let (rand_sc_lo, rand_sc_hi) = rand_sc.split_at(l); - - let factor = rand_sc_lo - .iter() - .fold(E::Scalar::ONE, |acc, r_p| acc * (E::Scalar::ONE - r_p)); - - (factor, rand_sc_hi) - }; - - let X = { - // constant term - let poly_X = std::iter::once(U.u).chain(U.X.iter().cloned()).collect(); - SparsePolynomial::new(num_vars_log, poly_X).evaluate(&rand_sc_unpad[1..]) - }; - - // W was evaluated as if it was padded to logNi variables, - // so we don't multiply it by (1-rand_sc_unpad[0]) - W + factor * rand_sc_unpad[0] * X - }; - - let t_plus_r_row = { - let addr_row = id; - let val_row = eq_tau; - let t = addr_row + gamma * val_row; - t + r - }; - - let w_plus_r_row = { - let addr_row = row; - let val_row = L_row; - let w = addr_row + gamma * val_row; - w + r - }; - - let t_plus_r_col = { - let addr_col = id; - let val_col = Z; - let t = addr_col + gamma * val_col; - t + r - }; - - let w_plus_r_col = { - let addr_col = col; - let val_col = L_col; - let w = addr_col + gamma * val_col; - w + r - }; - - let claims_mem = [ - t_plus_r_inv_row - w_plus_r_inv_row, - t_plus_r_inv_col - w_plus_r_inv_col, - eq_rho * (t_plus_r_inv_row * t_plus_r_row - ts_row), - eq_rho * (w_plus_r_inv_row * w_plus_r_row - E::Scalar::ONE), - eq_rho * (t_plus_r_inv_col * t_plus_r_col - ts_col), - eq_rho * (w_plus_r_inv_col * w_plus_r_col - E::Scalar::ONE), - ]; - - let claims_outer = [ - eq_tau * (Az * Bz - U.u * Cz - E), - eq_tau * (Az + c * Bz + c * c * Cz), - ]; - let claims_inner = [L_row * L_col * (val_A + c * val_B + c * c * val_C)]; - - let claims_witness = [eq_masked_tau * W]; - - chain![claims_mem, claims_outer, claims_inner, claims_witness] - } - ) - .flatten() - .zip_eq(s_powers) - .fold(E::Scalar::ZERO, |acc, (claim, s)| acc + s * claim); - - if claim_sc_final_expected != claim_sc_final { - return Err(NovaError::InvalidSumcheckProof); - } - - let evals_vec = zip_with!( - iter, - ( - self.evals_Az_Bz_Cz_W_E, - self.evals_L_row_col, - self.evals_mem_oracle, - self.evals_mem_preprocessed - ), - |Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed| { - chain![Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed] - .cloned() - .collect::>() - } - ) - .collect::>(); - - // Add all Sumcheck evaluations to the transcript - for evals in evals_vec.iter() { - transcript.absorb(b"e", &evals.as_slice()); // comm_vec is already - // in the transcript - } - - let c = transcript.squeeze(b"c")?; - - // Compute batched polynomial evaluation instance at rand_sc - let u = { - let num_evals = evals_vec[0].len(); - - let evals_vec = evals_vec.into_iter().flatten().collect::>(); - - let num_vars = num_rounds - .iter() - .flat_map(|num_rounds| vec![*num_rounds; num_evals].into_iter()) - .collect::>(); - - let comms_vec = zip_with!( - ( - comms_Az_Bz_Cz.into_iter(), - U.iter(), - comms_L_row_col.into_iter(), - comms_mem_oracles.into_iter(), - vk.S_comm.iter() - ), - |Az_Bz_Cz, U, L_row_col, mem_oracles, S_comm| { - chain![ - Az_Bz_Cz, - [U.comm_W, U.comm_E], - L_row_col, - mem_oracles, - [ - S_comm.comm_val_A, - S_comm.comm_val_B, - S_comm.comm_val_C, - S_comm.comm_row, - S_comm.comm_col, - S_comm.comm_ts_row, - S_comm.comm_ts_col, - ] - ] - } - ) - .flatten() - .collect::>(); - - PolyEvalInstance::::batch_diff_size(&comms_vec, &evals_vec, &num_vars, rand_sc, c) - }; - - // verify - EE::verify(&vk.vk_ee, &mut transcript, &u.c, &u.x, &u.e, &self.eval_arg)?; - - Ok(()) - } -} - -impl> BatchedRelaxedR1CSSNARK { - /// Runs the batched Sumcheck protocol for the claims of multiple instance - /// of possibly different sizes. - /// - /// # Details - /// - /// In order to avoid padding all polynomials to the same maximum size, we - /// adopt the following strategy. - /// - /// Let n be the number of variables for the largest instance, - /// and let m be the number of variables for a shorter one. - /// Let P(X_{0},...,X_{m-1}) be one of the MLEs of the short instance, which - /// has been committed to by taking the MSM of its evaluations with the - /// first 2^m basis points of the commitment key. - /// - /// This Sumcheck prover will interpret it as the polynomial - /// P'(X_{0},...,X_{n-1}) = P(X_{n-m},...,X_{n-1}), - /// whose MLE evaluations over {0,1}^m is equal to 2^{n-m} repetitions of - /// the evaluations of P. - /// - /// In order to account for these "imagined" repetitions, the initial claims - /// for this short instances are scaled by 2^{n-m}. - /// - /// For the first n-m rounds, the univariate polynomials relating to this - /// shorter claim will be constant, and equal to the initial claims, - /// scaled by 2^{n-m-i}, where i is the round number. By definition, P' - /// does not depend on X_i, so binding P' to r_i has no effect on the - /// evaluations. The Sumcheck prover will then interpret the polynomial - /// P' as having half as many repetitions in the next round. - /// - /// When we get to round n-m, the Sumcheck proceeds as usual since the - /// polynomials are the expected size for the round. - /// - /// Note that at the end of the protocol, the prover returns the evaluation - /// u' = P'(r_{0},...,r_{n-1}) = P(r_{n-m},...,r_{n-1}) - /// However, the polynomial we actually committed to over {0,1}^n is - /// P''(X_{0},...,X_{n-1}) = L_0(X_{0},...,X_{n-m-1}) * - /// P(X_{n-m},...,X_{n-1}) The SNARK prover/verifier will need to - /// rescale the evaluation by the first Lagrange polynomial - /// u'' = L_0(r_{0},...,r_{n-m-1}) * u' - /// in order batch all evaluations with a single PCS call. - fn prove_helper( - num_rounds: usize, - mut mem: Vec, - mut outer: Vec, - mut inner: Vec, - mut witness: Vec, - transcript: &mut E::TE, - ) -> Result< - ( - SumcheckProof, - Vec, - Vec>>, - Vec>>, - Vec>>, - Vec>>, - ), - NovaError, - > - where - T1: SumcheckEngine, - T2: SumcheckEngine, - T3: SumcheckEngine, - T4: SumcheckEngine, - { - // sanity checks - let num_instances = mem.len(); - assert_eq!(outer.len(), num_instances); - assert_eq!(inner.len(), num_instances); - assert_eq!(witness.len(), num_instances); - - for inst in mem.iter_mut() { - assert!(inst.size().is_power_of_two()); - } - for inst in outer.iter() { - assert!(inst.size().is_power_of_two()); - } - for inst in inner.iter() { - assert!(inst.size().is_power_of_two()); - } - for inst in witness.iter() { - assert!(inst.size().is_power_of_two()); - } - - let degree = mem[0].degree(); - assert!(mem.iter().all(|inst| inst.degree() == degree)); - assert!(outer.iter().all(|inst| inst.degree() == degree)); - assert!(inner.iter().all(|inst| inst.degree() == degree)); - assert!(witness.iter().all(|inst| inst.degree() == degree)); - - // Collect all claims from the instances. If the instances is defined over `m` - // variables, which is less that the total number of rounds `n`, - // the individual claims σ are scaled by 2^{n-m}. - let claims = zip_with!( - iter, - (mem, outer, inner, witness), - |mem, outer, inner, witness| { - Self::scaled_claims(mem, num_rounds) - .into_iter() - .chain(Self::scaled_claims(outer, num_rounds)) - .chain(Self::scaled_claims(inner, num_rounds)) - .chain(Self::scaled_claims(witness, num_rounds)) - } - ) - .flatten() - .collect::>(); - - // Sample a challenge for the random linear combination of all scaled claims - let s = transcript.squeeze(b"r")?; - let coeffs = powers(&s, claims.len()); - - // At the start of each round, the running claim is equal to the random linear - // combination of the Sumcheck claims, evaluated over the bound - // polynomials. Initially, it is equal to the random linear combination - // of the scaled input claims. - let mut running_claim = zip_with!(iter, (claims, coeffs), |c_1, c_2| *c_1 * c_2).sum(); - - // Keep track of the verifier challenges r, and the univariate polynomials sent - // by the prover in each round - let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec> = Vec::new(); - - for i in 0..num_rounds { - // At the start of round i, there input polynomials are defined over at most n-i - // variables. - let remaining_variables = num_rounds - i; - - // For each claim j, compute the evaluations of its univariate polynomial - // S_j(X_i) at X = 0, 2, 3. The polynomial is such that - // S_{j-1}(r_{j-1}) = S_j(0) + S_j(1). If the number of variable m - // of the claim is m < n-i, then the polynomial is constants and - // equal to the initial claim σ_j scaled by 2^{n-m-i-1}. - let evals = zip_with!( - par_iter, - (mem, outer, inner, witness), - |mem, outer, inner, witness| { - let ((evals_mem, evals_outer), (evals_inner, evals_witness)) = rayon::join( - || { - rayon::join( - || Self::get_evals(mem, remaining_variables), - || Self::get_evals(outer, remaining_variables), - ) - }, - || { - rayon::join( - || Self::get_evals(inner, remaining_variables), - || Self::get_evals(witness, remaining_variables), - ) - }, - ); - evals_mem - .into_par_iter() - .chain(evals_outer.into_par_iter()) - .chain(evals_inner.into_par_iter()) - .chain(evals_witness.into_par_iter()) - } - ) - .flatten() - .collect::>(); - - assert_eq!(evals.len(), claims.len()); - - // Random linear combination of the univariate evaluations at X_i = 0, 2, 3 - let evals_combined_0 = (0..evals.len()).map(|i| evals[i][0] * coeffs[i]).sum(); - let evals_combined_2 = (0..evals.len()).map(|i| evals[i][1] * coeffs[i]).sum(); - let evals_combined_3 = (0..evals.len()).map(|i| evals[i][2] * coeffs[i]).sum(); - - let evals = vec![ - evals_combined_0, - running_claim - evals_combined_0, - evals_combined_2, - evals_combined_3, - ]; - // Coefficient representation of S(X_i) - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - - // Bind the variable X_i of polynomials across all claims to r_i. - // If the claim is defined over m variables and m < n-i, then - // binding has no effect on the polynomial. - zip_with_for_each!( - par_iter_mut, - (mem, outer, inner, witness), - |mem, outer, inner, witness| { - rayon::join( - || { - rayon::join( - || Self::bind(mem, remaining_variables, &r_i), - || Self::bind(outer, remaining_variables, &r_i), - ) - }, - || { - rayon::join( - || Self::bind(inner, remaining_variables, &r_i), - || Self::bind(witness, remaining_variables, &r_i), - ) - }, - ); - } - ); - - running_claim = poly.evaluate(&r_i); - cubic_polys.push(poly.compress()); - } - - // Collect evaluations at (r_{n-m}, ..., r_{n-1}) of polynomials over all - // claims, where m is the initial number of variables the individual - // claims are defined over. - let claims_outer = outer.into_iter().map(|inst| inst.final_claims()).collect(); - let claims_inner = inner.into_iter().map(|inst| inst.final_claims()).collect(); - let claims_mem = mem.into_iter().map(|inst| inst.final_claims()).collect(); - let claims_witness = witness - .into_iter() - .map(|inst| inst.final_claims()) - .collect(); - - Ok(( - SumcheckProof::new(cubic_polys), - r, - claims_outer, - claims_inner, - claims_mem, - claims_witness, - )) - } - - /// In round i, computes the evaluations at X_i = 0, 2, 3 of the univariate - /// polynomials S(X_i) for each claim in the instance. - /// Let `n` be the total number of Sumcheck rounds, and assume the instance - /// is defined over `m` variables. We define `remaining_variables` as - /// n-i. If m < n-i, then the polynomials in the instance are not - /// defined over X_i, so the univariate polynomial is constant and equal - /// to 2^{n-m-i-1}*σ, where σ is the initial claim. - fn get_evals>( - inst: &T, - remaining_variables: usize, - ) -> Vec> { - let num_instance_variables = inst.size().log_2(); // m - if num_instance_variables < remaining_variables { - let deg = inst.degree(); - - // The evaluations at X_i = 0, 2, 3 are all equal to the scaled claim - Self::scaled_claims(inst, remaining_variables - 1) - .into_iter() - .map(|scaled_claim| vec![scaled_claim; deg]) - .collect() - } else { - inst.evaluation_points() - } - } - - /// In round i after receiving challenge r_i, we partially evaluate all - /// polynomials in the instance at X_i = r_i. If the instance is defined - /// over m variables m which is less than n-i, then the polynomials do - /// not depend on X_i, so binding them to r_i has no effect. - fn bind>(inst: &mut T, remaining_variables: usize, r: &E::Scalar) { - let num_instance_variables = inst.size().log_2(); // m - if remaining_variables <= num_instance_variables { - inst.bound(r) - } - } - - /// Given an instance defined over m variables, the sum over n = - /// `remaining_variables` is equal to the initial claim scaled by - /// 2^{n-m}, when m ≤ n. - fn scaled_claims>(inst: &T, remaining_variables: usize) -> Vec { - let num_instance_variables = inst.size().log_2(); // m - let num_repetitions = 1 << (remaining_variables - num_instance_variables); - let scaling = E::Scalar::from(num_repetitions as u64); - inst.initial_claims() - .iter() - .map(|claim| scaling * claim) - .collect() - } -} - -impl> RelaxedR1CSSNARKTrait - for BatchedRelaxedR1CSSNARK -{ - type ProverKey = ProverKey; - - type VerifierKey = VerifierKey; - - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - >::ck_floor() - } - - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result { - >::initialize_pk(ck, vk_digest) - } - - fn setup( - ck: Arc>, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - >::setup(ck, vec![S]) - } - - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result { - let slice_U = slice::from_ref(U); - let slice_W = slice::from_ref(W); - - >::prove(ck, pk, vec![S], slice_U, slice_W) - } - - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let slice = slice::from_ref(U); - >::verify(self, vk, slice) - } -} diff --git a/src/spartan/math.rs b/src/spartan/math.rs deleted file mode 100644 index 853b72e..0000000 --- a/src/spartan/math.rs +++ /dev/null @@ -1,15 +0,0 @@ -pub trait Math { - fn log_2(self) -> usize; -} - -impl Math for usize { - fn log_2(self) -> usize { - assert_ne!(self, 0); - - if self.is_power_of_two() { - (1usize.leading_zeros() - self.leading_zeros()) as Self - } else { - (0usize.leading_zeros() - self.leading_zeros()) as Self - } - } -} diff --git a/src/spartan/polys/eq.rs b/src/spartan/polys/eq.rs deleted file mode 100644 index 7d5e0c6..0000000 --- a/src/spartan/polys/eq.rs +++ /dev/null @@ -1,124 +0,0 @@ -//! `EqPolynomial`: Represents multilinear extension of equality polynomials, -//! evaluated based on binary input values. - -use ff::PrimeField; -use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; - -/// Represents the multilinear extension polynomial (MLE) of the equality -/// polynomial $eq(x,e)$, denoted as $\tilde{eq}(x, e)$. -/// -/// The polynomial is defined by the formula: -/// $$ -/// \tilde{eq}(x, e) = \prod_{i=1}^m(e_i * x_i + (1 - e_i) * (1 - x_i)) -/// $$ -/// -/// Each element in the vector `r` corresponds to a component $e_i$, -/// representing a bit from the binary representation of an input value $e$. -/// This polynomial evaluates to 1 if every component $x_i$ equals its -/// corresponding $e_i$, and 0 otherwise. -/// -/// For instance, for e = 6 (with a binary representation of 0b110), the vector -/// r would be [1, 1, 0]. -#[derive(Debug)] -pub struct EqPolynomial { - pub(in crate::spartan) r: Vec, -} - -impl EqPolynomial { - /// Creates a new `EqPolynomial` from a vector of Scalars `r`. - /// - /// Each Scalar in `r` corresponds to a bit from the binary representation - /// of an input value `e`. - pub const fn new(r: Vec) -> Self { - Self { r } - } - - /// Evaluates the `EqPolynomial` at a given point `rx`. - /// - /// This function computes the value of the polynomial at the point - /// specified by `rx`. It expects `rx` to have the same length as the - /// internal vector `r`. - /// - /// Panics if `rx` and `r` have different lengths. - pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { - assert_eq!(self.r.len(), rx.len()); - (0..rx.len()) - .map(|i| self.r[i] * rx[i] + (Scalar::ONE - self.r[i]) * (Scalar::ONE - rx[i])) - .product() - } - - /// Evaluates the `EqPolynomial` at all the `2^|r|` points in its domain. - /// - /// Returns a vector of Scalars, each corresponding to the polynomial - /// evaluation at a specific point. - #[must_use = "this returns an expensive vector and leaves self unchanged"] - pub fn evals(&self) -> Vec { - Self::evals_from_points(&self.r) - } - - /// Evaluates the `EqPolynomial` from the `2^|r|` points in its domain, - /// without creating an intermediate polynomial representation. - /// - /// Returns a vector of Scalars, each corresponding to the polynomial - /// evaluation at a specific point. - pub fn evals_from_points(r: &[Scalar]) -> Vec { - let ell = r.len(); - let mut evals: Vec = vec![Scalar::ZERO; (2_usize).pow(ell as u32)]; - let mut size = 1; - evals[0] = Scalar::ONE; - - for r in r.iter().rev() { - let (evals_left, evals_right) = evals.split_at_mut(size); - let (evals_right, _) = evals_right.split_at_mut(size); - - evals_left - .par_iter_mut() - .zip_eq(evals_right.par_iter_mut()) - .for_each(|(x, y)| { - *y = *x * r; - *x -= &*y; - }); - - size *= 2; - } - - evals - } -} - -impl FromIterator for EqPolynomial { - fn from_iter>(iter: I) -> Self { - let r: Vec<_> = iter.into_iter().collect(); - Self { r } - } -} - -#[cfg(test)] -mod tests { - - use super::*; - use crate::provider; - - fn test_eq_polynomial_with() { - let eq_poly = EqPolynomial::::new(vec![F::ONE, F::ZERO, F::ONE]); - let y = eq_poly.evaluate(vec![F::ONE, F::ONE, F::ONE].as_slice()); - assert_eq!(y, F::ZERO); - - let y = eq_poly.evaluate(vec![F::ONE, F::ZERO, F::ONE].as_slice()); - assert_eq!(y, F::ONE); - - let eval_list = eq_poly.evals(); - for (i, &coeff) in eval_list.iter().enumerate().take((2_usize).pow(3)) { - if i == 5 { - assert_eq!(coeff, F::ONE); - } else { - assert_eq!(coeff, F::ZERO); - } - } - } - - #[test] - fn test_eq_polynomial() { - test_eq_polynomial_with::(); - } -} diff --git a/src/spartan/polys/identity.rs b/src/spartan/polys/identity.rs deleted file mode 100644 index f2c6068..0000000 --- a/src/spartan/polys/identity.rs +++ /dev/null @@ -1,30 +0,0 @@ -use core::marker::PhantomData; - -use ff::PrimeField; - -pub struct IdentityPolynomial { - ell: usize, - _p: PhantomData, -} - -impl IdentityPolynomial { - pub fn new(ell: usize) -> Self { - Self { - ell, - _p: PhantomData, - } - } - - pub fn evaluate(&self, r: &[Scalar]) -> Scalar { - assert_eq!(self.ell, r.len()); - let mut power_of_two = 1_u64; - (0..self.ell) - .rev() - .map(|i| { - let result = Scalar::from(power_of_two) * r[i]; - power_of_two *= 2; - result - }) - .sum() - } -} diff --git a/src/spartan/polys/masked_eq.rs b/src/spartan/polys/masked_eq.rs deleted file mode 100644 index b882af8..0000000 --- a/src/spartan/polys/masked_eq.rs +++ /dev/null @@ -1,150 +0,0 @@ -//! `MaskedEqPolynomial`: Represents the `eq` polynomial over n variables, where -//! the first 2^m entries are 0. - -use ff::PrimeField; -use itertools::zip_eq; - -use crate::spartan::polys::eq::EqPolynomial; - -/// Represents the multilinear extension polynomial (MLE) of the equality -/// polynomial $eqₘ(x,r)$ over n variables, where the first 2^m evaluations are -/// 0. -/// -/// The polynomial is defined by the formula: -/// eqₘ(x,r) = eq(x,r) - ( ∏_{0 ≤ i < n-m} (1−rᵢ)(1−xᵢ) )⋅( ∏_{n-m ≤ i < n} -/// (1−rᵢ)(1−xᵢ) + rᵢ⋅xᵢ ) -#[derive(Debug)] -pub struct MaskedEqPolynomial<'a, Scalar> { - eq: &'a EqPolynomial, - num_masked_vars: usize, -} - -impl<'a, Scalar: PrimeField> MaskedEqPolynomial<'a, Scalar> { - /// Creates a new `MaskedEqPolynomial` from a vector of Scalars `r` of size - /// n, with the number of masked variables m = `num_masked_vars`. - pub const fn new(eq: &'a EqPolynomial, num_masked_vars: usize) -> Self { - MaskedEqPolynomial { - eq, - num_masked_vars, - } - } - - /// Evaluates the `MaskedEqPolynomial` at a given point `rx`. - /// - /// This function computes the value of the polynomial at the point - /// specified by `rx`. It expects `rx` to have the same length as the - /// internal vector `r`. - /// - /// Panics if `rx` and `r` have different lengths. - pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { - let r = &self.eq.r; - assert_eq!(r.len(), rx.len()); - let split_idx = r.len() - self.num_masked_vars; - - let (r_lo, r_hi) = r.split_at(split_idx); - let (rx_lo, rx_hi) = rx.split_at(split_idx); - let eq_lo = zip_eq(r_lo, rx_lo) - .map(|(r, rx)| *r * rx + (Scalar::ONE - r) * (Scalar::ONE - rx)) - .product::(); - let eq_hi = zip_eq(r_hi, rx_hi) - .map(|(r, rx)| *r * rx + (Scalar::ONE - r) * (Scalar::ONE - rx)) - .product::(); - let mask_lo = zip_eq(r_lo, rx_lo) - .map(|(r, rx)| (Scalar::ONE - r) * (Scalar::ONE - rx)) - .product::(); - - (eq_lo - mask_lo) * eq_hi - } - - /// Evaluates the `MaskedEqPolynomial` at all the `2^|r|` points in its - /// domain. - /// - /// Returns a vector of Scalars, each corresponding to the polynomial - /// evaluation at a specific point. - pub fn evals(&self) -> Vec { - Self::evals_from_points(&self.eq.r, self.num_masked_vars) - } - - /// Evaluates the `MaskedEqPolynomial` from the `2^|r|` points in its - /// domain, without creating an intermediate polynomial representation. - /// - /// Returns a vector of Scalars, each corresponding to the polynomial - /// evaluation at a specific point. - fn evals_from_points(r: &[Scalar], num_masked_vars: usize) -> Vec { - let mut evals = EqPolynomial::evals_from_points(r); - - // replace the first 2^m evaluations with 0 - let num_masked_evals = 1 << num_masked_vars; - evals[..num_masked_evals] - .iter_mut() - .for_each(|e| *e = Scalar::ZERO); - - evals - } -} - -#[cfg(test)] -mod tests { - use rand_chacha::ChaCha20Rng; - use rand_core::{CryptoRng, RngCore, SeedableRng}; - - use super::*; - use crate::{provider, spartan::polys::eq::EqPolynomial}; - - fn test_masked_eq_polynomial_with( - num_vars: usize, - num_masked_vars: usize, - mut rng: &mut R, - ) { - let num_masked_evals = 1 << num_masked_vars; - - // random point - let r = std::iter::from_fn(|| Some(F::random(&mut rng))) - .take(num_vars) - .collect::>(); - // evaluation point - let rx = std::iter::from_fn(|| Some(F::random(&mut rng))) - .take(num_vars) - .collect::>(); - - let poly_eq = EqPolynomial::new(r); - let poly_eq_evals = poly_eq.evals(); - - let masked_eq_poly = MaskedEqPolynomial::new(&poly_eq, num_masked_vars); - let masked_eq_poly_evals = masked_eq_poly.evals(); - - // ensure the first 2^m entries are 0 - assert_eq!( - masked_eq_poly_evals[..num_masked_evals], - vec![F::ZERO; num_masked_evals] - ); - // ensure the remaining evaluations match eq(r) - assert_eq!( - masked_eq_poly_evals[num_masked_evals..], - poly_eq_evals[num_masked_evals..] - ); - - // compute the evaluation at rx succinctly - let masked_eq_eval = masked_eq_poly.evaluate(&rx); - - // compute the evaluation as a MLE - let rx_evals = EqPolynomial::evals_from_points(&rx); - let expected_masked_eq_eval = zip_eq(rx_evals, masked_eq_poly_evals) - .map(|(rx, r)| rx * r) - .sum(); - - assert_eq!(masked_eq_eval, expected_masked_eq_eval); - } - - #[test] - fn test_masked_eq_polynomial() { - let mut rng = ChaCha20Rng::from_seed([0u8; 32]); - let num_vars = 5; - let num_masked_vars = 2; - test_masked_eq_polynomial_with::( - num_vars, - num_masked_vars, - &mut rng, - ); - } -} diff --git a/src/spartan/polys/multilinear.rs b/src/spartan/polys/multilinear.rs deleted file mode 100644 index d7f3436..0000000 --- a/src/spartan/polys/multilinear.rs +++ /dev/null @@ -1,336 +0,0 @@ -//! Main components: -//! - `MultilinearPolynomial`: Dense representation of multilinear polynomials, -//! represented by evaluations over all possible binary inputs. -//! - `SparsePolynomial`: Efficient representation of sparse multilinear -//! polynomials, storing only non-zero evaluations. - -use std::ops::{Add, Index}; - -use ff::PrimeField; -use itertools::Itertools as _; -use rand_core::{CryptoRng, RngCore}; -use rayon::prelude::{ - IndexedParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelIterator, -}; -use serde::{Deserialize, Serialize}; - -use crate::spartan::{math::Math, polys::eq::EqPolynomial}; - -/// A multilinear extension of a polynomial $Z(\cdot)$, denote it as -/// $\tilde{Z}(x_1, ..., x_m)$ where the degree of each variable is at most one. -/// -/// This is the dense representation of a multilinear poynomial. -/// Let it be $\mathbb{G}(\cdot): \mathbb{F}^m \rightarrow \mathbb{F}$, it can -/// be represented uniquely by the list of evaluations of $\mathbb{G}(\cdot)$ -/// over the Boolean hypercube $\{0, 1\}^m$. -/// -/// For example, a 3 variables multilinear polynomial can be represented by -/// evaluation at points $[0, 2^3-1]$. -/// -/// The implementation follows -/// $$ -/// \tilde{Z}(x_1, ..., x_m) = \sum_{e\in {0,1}^m}Z(e) \cdot \prod_{i=1}^m(x_i -/// \cdot e_i + (1-x_i) \cdot (1-e_i)) $$ -/// -/// Vector $Z$ indicates $Z(e)$ where $e$ ranges from $0$ to $2^m-1$. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub struct MultilinearPolynomial { - num_vars: usize, // the number of variables in the multilinear polynomial - pub(crate) Z: Vec, /* evaluations of the polynomial in all the 2^num_vars Boolean - * inputs */ -} - -impl MultilinearPolynomial { - /// Creates a new `MultilinearPolynomial` from the given evaluations. - /// - /// # Panics - /// The number of evaluations must be a power of two. - pub fn new(Z: Vec) -> Self { - let num_vars = Z.len().log_2(); - assert_eq!(Z.len(), 1 << num_vars); - Self { num_vars, Z } - } - - /// evaluations of the polynomial in all the 2^num_vars Boolean inputs - pub fn evaluations(&self) -> &[Scalar] { - &self.Z[..] - } - - /// Returns the number of variables in the multilinear polynomial - pub const fn get_num_vars(&self) -> usize { - self.num_vars - } - - /// Returns the total number of evaluations. - pub fn len(&self) -> usize { - self.Z.len() - } - - /// Returns true if no evaluations. - pub fn is_empty(&self) -> bool { - self.Z.len() == 0 - } - - /// Returns a random polynomial - pub fn random(num_vars: usize, mut rng: &mut R) -> Self { - Self::new( - std::iter::from_fn(|| Some(Scalar::random(&mut rng))) - .take(1 << num_vars) - .collect(), - ) - } - - /// Binds the polynomial's top variable using the given scalar. - /// - /// This operation modifies the polynomial in-place. - pub fn bind_poly_var_top(&mut self, r: &Scalar) { - assert!(self.num_vars > 0); - - let n = self.len() / 2; - - let (left, right) = self.Z.split_at_mut(n); - - left.par_iter_mut() - .zip_eq(right.par_iter()) - .for_each(|(a, b)| { - *a += *r * (*b - *a); - }); - - self.Z.resize(n, Scalar::ZERO); - self.num_vars -= 1; - } - - /// Evaluates the polynomial at the given point. - /// Returns Z(r) in O(n) time. - /// - /// The point must have a value for each variable. - pub fn evaluate(&self, r: &[Scalar]) -> Scalar { - // r must have a value for each variable - assert_eq!(r.len(), self.get_num_vars()); - Self::evaluate_with(&self.Z, r) - } - - /// Evaluates the polynomial with the given evaluations and point. - pub fn evaluate_with(Z: &[Scalar], r: &[Scalar]) -> Scalar { - let chis = EqPolynomial::evals_from_points(r); - Self::evaluate_with_chis(Z, &chis) - } - - /// Evaluates the polynomial with the given evaluations and chi coefficients - pub fn evaluate_with_chis(Z: &[Scalar], chis: &[Scalar]) -> Scalar { - zip_with!(par_iter, (chis, Z), |a, b| *a * b).sum() - } -} - -impl Index for MultilinearPolynomial { - type Output = Scalar; - - #[inline(always)] - fn index(&self, _index: usize) -> &Scalar { - &(self.Z[_index]) - } -} - -/// Sparse multilinear polynomial, which means the $Z(\cdot)$ is zero at most -/// points. In our context, sparse polynomials are non-zeros over the hypercube -/// at locations that map to "small" integers We exploit this property to -/// implement a time-optimal algorithm -pub(crate) struct SparsePolynomial { - num_vars: usize, - Z: Vec, -} - -impl SparsePolynomial { - pub fn new(num_vars: usize, Z: Vec) -> Self { - Self { num_vars, Z } - } - - // a time-optimal algorithm to evaluate sparse polynomials - pub fn evaluate(&self, r: &[Scalar]) -> Scalar { - assert_eq!(self.num_vars, r.len()); - - let num_vars_z = self.Z.len().next_power_of_two().log_2(); - let chis = EqPolynomial::evals_from_points(&r[self.num_vars - 1 - num_vars_z..]); - #[allow(clippy::disallowed_methods)] - let eval_partial: Scalar = self - .Z - .iter() - .zip(chis.iter()) - .map(|(z, chi)| *z * *chi) - .sum(); - - let common = (0..self.num_vars - 1 - num_vars_z) - .map(|i| (Scalar::ONE - r[i])) - .product::(); - - common * eval_partial - } -} - -/// Adds another multilinear polynomial to `self`. -/// Assumes the two polynomials have the same number of variables. -impl Add for MultilinearPolynomial { - type Output = Result; - - fn add(self, other: Self) -> Self::Output { - if self.get_num_vars() != other.get_num_vars() { - return Err("The two polynomials must have the same number of variables"); - } - - let sum: Vec = zip_with!(into_iter, (self.Z, other.Z), |a, b| a + b).collect(); - - Ok(Self::new(sum)) - } -} - -#[cfg(test)] -mod tests { - use rand_chacha::ChaCha20Rng; - use rand_core::SeedableRng; - - use super::*; - use crate::provider::bn256_grumpkin::bn256; - - fn make_mlp(len: usize, value: F) -> MultilinearPolynomial { - MultilinearPolynomial { - num_vars: len.count_ones() as usize, - Z: vec![value; len], - } - } - - // fn test_multilinear_polynomial_with() { - // // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * - // x_3 // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, - // 0, 1, 0, 2]. - - // let TWO = F::from(2); - - // let Z = vec![ - // F::ZERO, - // F::ZERO, - // F::ZERO, - // F::ONE, - // F::ZERO, - // F::ONE, - // F::ZERO, - // TWO, - // ]; - // let m_poly = MultilinearPolynomial::::new(Z.clone()); - // assert_eq!(m_poly.get_num_vars(), 3); - - // let x = vec![F::ONE, F::ONE, F::ONE]; - // assert_eq!(m_poly.evaluate(x.as_slice()), TWO); - - // let y = MultilinearPolynomial::::evaluate_with(Z.as_slice(), - // x.as_slice()); assert_eq!(y, TWO); - // } - - // fn test_sparse_polynomial_with() { - // // Let the polynomial have 4 variables, but is non-zero at only 3 - // locations (out // of 2^4 = 16) over the hypercube - // let mut Z = vec![F::ONE, F::ONE, F::from(2)]; - // let m_poly = SparsePolynomial::::new(4, Z.clone()); - - // Z.resize(16, F::ZERO); // append with zeros to make it a dense polynomial - // let m_poly_dense = MultilinearPolynomial::new(Z); - - // // evaluation point - // let x = vec![F::from(5), F::from(8), F::from(5), F::from(3)]; - - // // check evaluations - // assert_eq!( - // m_poly.evaluate(x.as_slice()), - // m_poly_dense.evaluate(x.as_slice()) - // ); - // } - - fn test_mlp_add_with() { - let mlp1 = make_mlp(4, F::from(3)); - let mlp2 = make_mlp(4, F::from(7)); - - let mlp3 = mlp1.add(mlp2).unwrap(); - - assert_eq!(mlp3.Z, vec![F::from(10); 4]); - } - - #[test] - fn test_mlp_add() { - test_mlp_add_with::(); - } - - fn test_evaluation_with() { - let num_evals = 4; - let mut evals: Vec = Vec::with_capacity(num_evals); - for _ in 0..num_evals { - evals.push(F::from(8)); - } - let dense_poly: MultilinearPolynomial = MultilinearPolynomial::new(evals.clone()); - - // Evaluate at 3: - // (0, 0) = 1 - // (0, 1) = 1 - // (1, 0) = 1 - // (1, 1) = 1 - // g(x_0,x_1) => c_0*(1 - x_0)(1 - x_1) + c_1*(1-x_0)(x_1) + c_2*(x_0)(1-x_1) + - // c_3*(x_0)(x_1) g(3, 4) = 8*(1 - 3)(1 - 4) + 8*(1-3)(4) + 8*(3)(1-4) + - // 8*(3)(4) = 48 + -64 + -72 + 96 = 8 g(5, 10) = 8*(1 - 5)(1 - 10) + - // 8*(1 - 5)(10) + 8*(5)(1-10) + 8*(5)(10) = 96 + -16 + -72 + 96 = 8 - assert_eq!( - dense_poly.evaluate(vec![F::from(3), F::from(4)].as_slice()), - F::from(8) - ); - assert_eq!( - dense_poly.evaluate(vec![F::from(5), F::from(10)].as_slice()), - F::from(8) - ); - } - - #[test] - fn test_evaluation() { - test_evaluation_with::(); - } - - /// This binds the variables of a multilinear polynomial to a provided - /// sequence of values. - /// - /// Assuming `bind_poly_var_top` defines the "top" variable of the - /// polynomial, this aims to test whether variables should be provided - /// to the `evaluate` function in topmost-first (big endian) of - /// topmost-last (lower endian) order. - fn bind_sequence( - poly: &MultilinearPolynomial, - values: &[F], - ) -> MultilinearPolynomial { - // Assert that the size of the polynomial being evaluated is a power of 2 - // greater than (1 << values.len()) - assert!(poly.Z.len().is_power_of_two()); - assert!(poly.Z.len() >= 1 << values.len()); - - let mut tmp = poly.clone(); - for v in values.iter() { - tmp.bind_poly_var_top(v); - } - tmp - } - - fn bind_and_evaluate_with() { - for i in 0..50 { - // Initialize a random polynomial - let n = 7; - let mut rng = ChaCha20Rng::from_seed([i as u8; 32]); - let poly = MultilinearPolynomial::random(n, &mut rng); - - // draw a random point - let pt: Vec<_> = std::iter::from_fn(|| Some(F::random(&mut rng))) - .take(n) - .collect(); - // this shows the order in which coordinates are evaluated - assert_eq!(poly.evaluate(&pt), bind_sequence(&poly, &pt).Z[0]) - } - } - - #[test] - fn test_bind_and_evaluate() { - bind_and_evaluate_with::(); - } -} diff --git a/src/spartan/polys/power.rs b/src/spartan/polys/power.rs deleted file mode 100644 index 04aba49..0000000 --- a/src/spartan/polys/power.rs +++ /dev/null @@ -1,75 +0,0 @@ -//! `PowPolynomial`: Represents multilinear extension of power polynomials - -use std::iter::successors; - -use ff::PrimeField; - -use crate::spartan::polys::eq::EqPolynomial; - -/// Represents the multilinear extension polynomial (MLE) of the equality -/// polynomial $pow(x,t)$, denoted as $\tilde{pow}(x, t)$. -/// -/// The polynomial is defined by the formula: -/// $$ -/// \tilde{power}(x, t) = \prod_{i=1}^m(1 + (t^{2^i} - 1) * x_i) -/// $$ -pub struct PowPolynomial { - eq: EqPolynomial, -} - -impl PowPolynomial { - /// Creates a new `PowPolynomial` from a Scalars `t`. - pub fn new(t: &Scalar, ell: usize) -> Self { - // t_pow = [t^{2^0}, t^{2^1}, ..., t^{2^{ell-1}}] - let t_pow = Self::squares(t, ell); - - Self { - eq: EqPolynomial::new(t_pow), - } - } - - /// Create powers the following powers of `t`: - /// [t^{2^0}, t^{2^1}, ..., t^{2^{ell-1}}] - pub fn squares(t: &Scalar, ell: usize) -> Vec { - successors(Some(*t), |p: &Scalar| Some(p.square())) - .take(ell) - .collect::>() - } - - /// Creates the evals corresponding to a `PowPolynomial` from an - /// already-existing vector of powers. `t_pow.len() > ell` must be true. - pub(crate) fn evals_with_powers(powers: &[Scalar], ell: usize) -> Vec { - let t_pow = powers[..ell].to_vec(); - EqPolynomial::evals_from_points(&t_pow) - } - - /// Evaluates the `PowPolynomial` at a given point `rx`. - /// - /// This function computes the value of the polynomial at the point - /// specified by `rx`. It expects `rx` to have the same length as the - /// internal vector `t_pow`. - /// - /// Panics if `rx` and `t_pow` have different lengths. - pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { - self.eq.evaluate(rx) - } - - pub fn coordinates(self) -> Vec { - self.eq.r - } - - /// Evaluates the `PowPolynomial` at all the `2^|t_pow|` points in its - /// domain. - /// - /// Returns a vector of Scalars, each corresponding to the polynomial - /// evaluation at a specific point. - pub fn evals(&self) -> Vec { - self.eq.evals() - } -} - -impl From> for EqPolynomial { - fn from(polynomial: PowPolynomial) -> Self { - polynomial.eq - } -} diff --git a/src/spartan/polys/univariate.rs b/src/spartan/polys/univariate.rs deleted file mode 100644 index 35a25d9..0000000 --- a/src/spartan/polys/univariate.rs +++ /dev/null @@ -1,415 +0,0 @@ -//! Main components: -//! - `UniPoly`: an univariate dense polynomial in coefficient form (big -//! endian), -//! - `CompressedUniPoly`: a univariate dense polynomial, compressed (omitted -//! linear term), in coefficient form (little endian), -use std::{ - cmp::Ordering, - ops::{AddAssign, Index, IndexMut, MulAssign, SubAssign}, -}; - -use ff::PrimeField; -use rayon::prelude::{IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; -use ref_cast::RefCast; -use serde::{Deserialize, Serialize}; - -use crate::{ - provider::util::iterators::DoubleEndedIteratorExt as _, - traits::{Group, TranscriptReprTrait}, -}; - -// ax^2 + bx + c stored as vec![c, b, a] -// ax^3 + bx^2 + cx + d stored as vec![d, c, b, a] -#[derive(Debug, Clone, PartialEq, Eq, RefCast)] -#[repr(transparent)] -pub struct UniPoly { - pub coeffs: Vec, -} - -// ax^2 + bx + c stored as vec![c, a] -// ax^3 + bx^2 + cx + d stored as vec![d, c, a] -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct CompressedUniPoly { - coeffs_except_linear_term: Vec, -} - -impl UniPoly { - pub fn new(coeffs: Vec) -> Self { - let mut res = Self { coeffs }; - res.truncate_leading_zeros(); - res - } - - fn zero() -> Self { - Self::new(Vec::new()) - } - - /// Divide self by another polynomial, and returns the - /// quotient and remainder. - pub fn divide_with_q_and_r(&self, divisor: &Self) -> Option<(Self, Self)> { - if self.is_zero() { - Some((Self::zero(), Self::zero())) - } else if divisor.is_zero() { - None - } else if self.degree() < divisor.degree() { - Some((Self::zero(), self.clone())) - } else { - // Now we know that self.degree() >= divisor.degree(); - let mut quotient = vec![Scalar::ZERO; self.degree() - divisor.degree() + 1]; - let mut remainder: Self = self.clone(); - // Can unwrap here because we know self is not zero. - let divisor_leading_inv = divisor.leading_coefficient().unwrap().invert().unwrap(); - while !remainder.is_zero() && remainder.degree() >= divisor.degree() { - let cur_q_coeff = *remainder.leading_coefficient().unwrap() * divisor_leading_inv; - let cur_q_degree = remainder.degree() - divisor.degree(); - quotient[cur_q_degree] = cur_q_coeff; - - for (i, div_coeff) in divisor.coeffs.iter().enumerate() { - remainder.coeffs[cur_q_degree + i] -= &(cur_q_coeff * div_coeff); - } - while let Some(true) = remainder.coeffs.last().map(|c| c == &Scalar::ZERO) { - remainder.coeffs.pop(); - } - } - Some((Self::new(quotient), remainder)) - } - } - - /// Divides f(x) by x-a and returns quotient polynomial with no reminder - /// This is a common use case for polynomial divisions in KZG-based PCS. - pub fn divide_minus_u(&self, u: Scalar) -> Self { - if self.is_zero() { - Self::zero() - } else { - // On input f(x) and u compute the witness polynomial used to prove - // that f(u) = v. The main part of this is to compute the - // division (f(x) - f(u)) / (x - u), but we don't use a general - // division algorithm, we make use of the fact that the division - // never has a remainder, and that the denominator is always a linear - // polynomial. The cost is (d-1) mults + (d-1) adds in E::Scalar, where - // d is the degree of f. - // - // We use the fact that if we compute the quotient of f(x)/(x-u), - // there will be a remainder, but it'll be v = f(u). Put another way - // the quotient of f(x)/(x-u) and (f(x) - f(v))/(x-u) is the - // same. One advantage is that computing f(u) could be decoupled - // from kzg_open, it could be done later or separate from computing W. - - let d = self.coeffs.len(); - - // Compute h(x) = f(x)/(x - u) - let mut h = vec![Scalar::ZERO; d]; - for i in (1..d).rev() { - h[i - 1] = self.coeffs[i] + h[i] * u; - } - Self::new(h) - } - } - - fn is_zero(&self) -> bool { - self.coeffs.is_empty() || self.coeffs.iter().all(|c| c == &Scalar::ZERO) - } - - fn truncate_leading_zeros(&mut self) { - while self.coeffs.last().map_or(false, |c| c == &Scalar::ZERO) { - self.coeffs.pop(); - } - } - - fn leading_coefficient(&self) -> Option<&Scalar> { - self.coeffs.last() - } - - pub fn from_evals(evals: &[Scalar]) -> Self { - // we only support degree-2 or degree-3 univariate polynomials - assert!(evals.len() == 3 || evals.len() == 4); - let two_inv = Scalar::from(2).invert().unwrap(); - let coeffs = if evals.len() == 3 { - // ax^2 + bx + c - let c = evals[0]; - let a = two_inv * (evals[2] - evals[1] - evals[1] + c); - let b = evals[1] - c - a; - vec![c, b, a] - } else { - // ax^3 + bx^2 + cx + d - let six_inv = Scalar::from(6).invert().unwrap(); - - let d = evals[0]; - let a = six_inv - * (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - - evals[0]); - let b = two_inv - * (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1] - + evals[2] - + evals[2] - + evals[2] - + evals[2] - - evals[3]); - let c = evals[1] - d - a - b; - vec![d, c, b, a] - }; - - Self { coeffs } - } - - pub fn degree(&self) -> usize { - self.coeffs.len() - 1 - } - - pub fn eval_at_zero(&self) -> Scalar { - self.coeffs[0] - } - - pub fn eval_at_one(&self) -> Scalar { - (0..self.coeffs.len()) - .into_par_iter() - .map(|i| self.coeffs[i]) - .sum() - } - - pub fn evaluate(&self, r: &Scalar) -> Scalar { - self.coeffs.iter().rlc(r) - } - - pub fn compress(&self) -> CompressedUniPoly { - let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat(); - assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len()); - CompressedUniPoly { - coeffs_except_linear_term, - } - } - - #[cfg(test)] - /// Returns a random polynomial - pub fn random(num_vars: usize, mut rng: &mut R) -> Self { - Self::new( - std::iter::from_fn(|| Some(Scalar::random(&mut rng))) - .take(num_vars) - .collect(), - ) - } -} - -impl CompressedUniPoly { - // we require eval(0) + eval(1) = hint, so we can solve for the linear term as: - // linear_term = hint - 2 * constant_term - deg2 term - deg3 term - pub fn decompress(&self, hint: &Scalar) -> UniPoly { - let mut linear_term = - *hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0]; - for i in 1..self.coeffs_except_linear_term.len() { - linear_term -= self.coeffs_except_linear_term[i]; - } - - let mut coeffs: Vec = Vec::new(); - coeffs.push(self.coeffs_except_linear_term[0]); - coeffs.push(linear_term); - coeffs.extend(&self.coeffs_except_linear_term[1..]); - assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len()); - UniPoly { coeffs } - } -} - -impl TranscriptReprTrait for UniPoly { - fn to_transcript_bytes(&self) -> Vec { - let coeffs = self.compress().coeffs_except_linear_term; - coeffs - .iter() - .flat_map(|&t| t.to_repr().as_ref().to_vec()) - .collect::>() - } -} - -impl Index for UniPoly { - type Output = Scalar; - - fn index(&self, index: usize) -> &Self::Output { - &self.coeffs[index] - } -} - -impl IndexMut for UniPoly { - fn index_mut(&mut self, index: usize) -> &mut Self::Output { - &mut self.coeffs[index] - } -} - -impl AddAssign<&Scalar> for UniPoly { - fn add_assign(&mut self, rhs: &Scalar) { - self.coeffs.par_iter_mut().for_each(|c| *c += rhs); - } -} - -impl MulAssign<&Scalar> for UniPoly { - fn mul_assign(&mut self, rhs: &Scalar) { - self.coeffs.par_iter_mut().for_each(|c| *c *= rhs); - } -} - -impl AddAssign<&Self> for UniPoly { - fn add_assign(&mut self, rhs: &Self) { - let ordering = self.coeffs.len().cmp(&rhs.coeffs.len()); - #[allow(clippy::disallowed_methods)] - for (lhs, rhs) in self.coeffs.iter_mut().zip(&rhs.coeffs) { - *lhs += rhs; - } - if matches!(ordering, Ordering::Less) { - self.coeffs - .extend(rhs.coeffs[self.coeffs.len()..].iter().cloned()); - } - if matches!(ordering, Ordering::Equal) { - self.truncate_leading_zeros(); - } - } -} - -impl SubAssign<&Self> for UniPoly { - fn sub_assign(&mut self, rhs: &Self) { - let ordering = self.coeffs.len().cmp(&rhs.coeffs.len()); - #[allow(clippy::disallowed_methods)] - for (lhs, rhs) in self.coeffs.iter_mut().zip(&rhs.coeffs) { - *lhs -= rhs; - } - if matches!(ordering, Ordering::Less) { - self.coeffs - .extend(rhs.coeffs[self.coeffs.len()..].iter().cloned()); - } - if matches!(ordering, Ordering::Equal) { - self.truncate_leading_zeros(); - } - } -} - -impl AsRef> for UniPoly { - fn as_ref(&self) -> &Vec { - &self.coeffs - } -} - -#[cfg(test)] -mod tests { - use rand::SeedableRng; - use rand_chacha::ChaCha20Rng; - - use super::*; - use crate::provider::bn256_grumpkin; - - fn test_from_evals_quad_with() { - // polynomial is 2x^2 + 3x + 1 - let e0 = F::ONE; - let e1 = F::from(6); - let e2 = F::from(15); - let evals = vec![e0, e1, e2]; - let poly = UniPoly::from_evals(&evals); - - assert_eq!(poly.eval_at_zero(), e0); - assert_eq!(poly.eval_at_one(), e1); - assert_eq!(poly.coeffs.len(), 3); - assert_eq!(poly.coeffs[0], F::ONE); - assert_eq!(poly.coeffs[1], F::from(3)); - assert_eq!(poly.coeffs[2], F::from(2)); - - let hint = e0 + e1; - let compressed_poly = poly.compress(); - let decompressed_poly = compressed_poly.decompress(&hint); - for i in 0..decompressed_poly.coeffs.len() { - assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); - } - - let e3 = F::from(28); - assert_eq!(poly.evaluate(&F::from(3)), e3); - } - - #[test] - fn test_from_evals_quad() { - test_from_evals_quad_with::(); - } - - fn test_from_evals_cubic_with() { - // polynomial is x^3 + 2x^2 + 3x + 1 - let e0 = F::ONE; - let e1 = F::from(7); - let e2 = F::from(23); - let e3 = F::from(55); - let evals = vec![e0, e1, e2, e3]; - let poly = UniPoly::from_evals(&evals); - - assert_eq!(poly.eval_at_zero(), e0); - assert_eq!(poly.eval_at_one(), e1); - assert_eq!(poly.coeffs.len(), 4); - - assert_eq!(poly.coeffs[1], F::from(3)); - assert_eq!(poly.coeffs[2], F::from(2)); - assert_eq!(poly.coeffs[3], F::from(1)); - - let hint = e0 + e1; - let compressed_poly = poly.compress(); - let decompressed_poly = compressed_poly.decompress(&hint); - for i in 0..decompressed_poly.coeffs.len() { - assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); - } - - let e4 = F::from(109); - assert_eq!(poly.evaluate(&F::from(4)), e4); - } - - #[test] - fn test_from_evals_cubic() { - test_from_evals_cubic_with::(); - } - - /// Perform a naive n^2 multiplication of `self` by `other`. - pub fn naive_mul(ours: &UniPoly, other: &UniPoly) -> UniPoly { - if ours.is_zero() || other.is_zero() { - UniPoly::zero() - } else { - let mut result = vec![F::ZERO; ours.degree() + other.degree() + 1]; - for (i, self_coeff) in ours.coeffs.iter().enumerate() { - for (j, other_coeff) in other.coeffs.iter().enumerate() { - result[i + j] += &(*self_coeff * other_coeff); - } - } - UniPoly::new(result) - } - } - - fn divide_polynomials_random() { - let rng = &mut ChaCha20Rng::from_seed([0u8; 32]); - - for a_degree in 0..50 { - for b_degree in 0..50 { - let dividend = UniPoly::::random(a_degree, rng); - let divisor = UniPoly::::random(b_degree, rng); - - if let Some((quotient, remainder)) = - UniPoly::divide_with_q_and_r(÷nd, &divisor) - { - let mut prod = naive_mul(&divisor, "ient); - prod += &remainder; - assert_eq!(dividend, prod) - } - } - } - } - - #[test] - fn test_divide_minus_u() { - fn test_inner() { - let rng = &mut ChaCha20Rng::from_seed([0u8; 32]); - let dividend = UniPoly::::random(50, rng); - let u = Fr::random(rng); - let divisor = UniPoly::new(vec![-u, Fr::ONE]); - - let (q1, _) = dividend.divide_with_q_and_r(&divisor).unwrap(); - let q2 = dividend.divide_minus_u(u); - - assert_eq!(q1, q2); - } - - test_inner::(); - } - - #[test] - fn test_divide_polynomials_random() { - divide_polynomials_random::(); - } -} diff --git a/src/spartan/ppsnark.rs b/src/spartan/ppsnark.rs deleted file mode 100644 index 64c27c4..0000000 --- a/src/spartan/ppsnark.rs +++ /dev/null @@ -1,1097 +0,0 @@ -//! This module implements `RelaxedR1CSSNARK` traits using a spark-based -//! approach to prove evaluations of sparse multilinear polynomials involved in -//! Spartan's sum-check protocol, thereby providing a preprocessing SNARK -//! The verifier in this preprocessing SNARK maintains a commitment to R1CS -//! matrices. This is beneficial when using a polynomial commitment scheme in -//! which the verifier's costs is succinct. This code includes experimental -//! optimizations to reduce runtimes and proof sizes. -use core::cmp::max; -use std::sync::Arc; - -use ff::Field; -use itertools::Itertools as _; -use once_cell::sync::OnceCell; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use super::polys::{masked_eq::MaskedEqPolynomial, multilinear::SparsePolynomial}; -use crate::{ - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, - spartan::{ - math::Math, - polys::{ - eq::EqPolynomial, - identity::IdentityPolynomial, - multilinear::MultilinearPolynomial, - power::PowPolynomial, - univariate::{CompressedUniPoly, UniPoly}, - }, - powers, - sumcheck::{ - engine::{ - InnerSumcheckInstance, MemorySumcheckInstance, OuterSumcheckInstance, - SumcheckEngine, WitnessBoundSumcheck, - }, - SumcheckProof, - }, - PolyEvalInstance, PolyEvalWitness, - }, - traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, - evaluation::EvaluationEngineTrait, - snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Engine, TranscriptEngineTrait, TranscriptReprTrait, - }, - zip_with, Commitment, CommitmentKey, CompressedCommitment, -}; - -fn padded(v: &[E::Scalar], n: usize, e: &E::Scalar) -> Vec { - let mut v_padded = vec![*e; n]; - v_padded[..v.len()].copy_from_slice(v); - v_padded -} - -/// A type that holds `R1CSShape` in a form amenable to memory checking -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct R1CSShapeSparkRepr { - pub(in crate::spartan) N: usize, // size of the vectors - - // dense representation - pub(in crate::spartan) row: Vec, - pub(in crate::spartan) col: Vec, - pub(in crate::spartan) val_A: Vec, - pub(in crate::spartan) val_B: Vec, - pub(in crate::spartan) val_C: Vec, - - // timestamp polynomials - pub(in crate::spartan) ts_row: Vec, - pub(in crate::spartan) ts_col: Vec, -} - -/// A type that holds a commitment to a sparse polynomial -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct R1CSShapeSparkCommitment { - pub(in crate::spartan) N: usize, // size of each vector - - // commitments to the dense representation - pub(in crate::spartan) comm_row: Commitment, - pub(in crate::spartan) comm_col: Commitment, - pub(in crate::spartan) comm_val_A: Commitment, - pub(in crate::spartan) comm_val_B: Commitment, - pub(in crate::spartan) comm_val_C: Commitment, - - // commitments to the timestamp polynomials - pub(in crate::spartan) comm_ts_row: Commitment, - pub(in crate::spartan) comm_ts_col: Commitment, -} - -impl TranscriptReprTrait for R1CSShapeSparkCommitment { - fn to_transcript_bytes(&self) -> Vec { - [ - self.comm_row, - self.comm_col, - self.comm_val_A, - self.comm_val_B, - self.comm_val_C, - self.comm_ts_row, - self.comm_ts_col, - ] - .as_slice() - .to_transcript_bytes() - } -} - -impl R1CSShapeSparkRepr { - /// represents `R1CSShape` in a Spark-friendly format amenable to memory - /// checking - pub fn new(S: &R1CSShape) -> Self { - let N = { - let total_nz = S.A.len() + S.B.len() + S.C.len(); - max(total_nz, max(2 * S.num_vars, S.num_cons)).next_power_of_two() - }; - - // we make col lookup into the last entry of z, so we commit to zeros - let (mut row, mut col, mut val_A, mut val_B, mut val_C) = ( - vec![0; N], - vec![N - 1; N], - vec![E::Scalar::ZERO; N], - vec![E::Scalar::ZERO; N], - vec![E::Scalar::ZERO; N], - ); - - for (i, entry) in S.A.iter().enumerate() { - let (r, c, v) = entry; - row[i] = r; - col[i] = c; - val_A[i] = v; - } - - let b_offset = S.A.len(); - for (i, entry) in S.B.iter().enumerate() { - let (r, c, v) = entry; - row[b_offset + i] = r; - col[b_offset + i] = c; - val_B[b_offset + i] = v; - } - - let c_offset = S.A.len() + S.B.len(); - for (i, entry) in S.C.iter().enumerate() { - let (r, c, v) = entry; - row[c_offset + i] = r; - col[c_offset + i] = c; - val_C[c_offset + i] = v; - } - - // timestamp calculation routine - let timestamp_calc = - |num_ops: usize, num_cells: usize, addr_trace: &[usize]| -> Vec { - let mut ts = vec![0usize; num_cells]; - - assert!(num_ops >= addr_trace.len()); - for addr in addr_trace { - assert!(*addr < num_cells); - ts[*addr] += 1; - } - ts - }; - - // timestamp polynomials for row - let (ts_row, ts_col) = - rayon::join(|| timestamp_calc(N, N, &row), || timestamp_calc(N, N, &col)); - - // a routine to turn a vector of usize into a vector scalars - let to_vec_scalar = |v: &[usize]| -> Vec { - v.iter() - .map(|x| E::Scalar::from(*x as u64)) - .collect::>() - }; - - Self { - N, - - // dense representation - row: to_vec_scalar(&row), - col: to_vec_scalar(&col), - val_A, - val_B, - val_C, - - // timestamp polynomials - ts_row: to_vec_scalar(&ts_row), - ts_col: to_vec_scalar(&ts_col), - } - } - - pub(in crate::spartan) fn commit(&self, ck: &CommitmentKey) -> R1CSShapeSparkCommitment { - let comm_vec: Vec> = [ - &self.row, - &self.col, - &self.val_A, - &self.val_B, - &self.val_C, - &self.ts_row, - &self.ts_col, - ] - .par_iter() - .map(|v| E::CE::commit(ck, v)) - .collect(); - - R1CSShapeSparkCommitment { - N: self.row.len(), - comm_row: comm_vec[0], - comm_col: comm_vec[1], - comm_val_A: comm_vec[2], - comm_val_B: comm_vec[3], - comm_val_C: comm_vec[4], - comm_ts_row: comm_vec[5], - comm_ts_col: comm_vec[6], - } - } - - // computes evaluation oracles - fn evaluation_oracles( - &self, - S: &R1CSShape, - r_x: &E::Scalar, - z: &[E::Scalar], - ) -> ( - Vec, - Vec, - Vec, - Vec, - ) { - let mem_row = PowPolynomial::new(r_x, self.N.log_2()).evals(); - let mem_col = padded::(z, self.N, &E::Scalar::ZERO); - - let (L_row, L_col) = { - let mut L_row = vec![mem_row[0]; self.N]; // we place mem_row[0] since resized row is appended with 0s - let mut L_col = vec![mem_col[self.N - 1]; self.N]; // we place mem_col[N-1] since resized col is appended with N-1 - - for (i, (val_r, val_c)) in - S.A.iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(r, c, _)| (mem_row[r], mem_col[c])) - .enumerate() - { - L_row[i] = val_r; - L_col[i] = val_c; - } - (L_row, L_col) - }; - - (mem_row, mem_col, L_row, L_col) - } -} - -/// A type that represents the prover's key -#[derive(Debug, Clone)] -pub struct ProverKey> { - pk_ee: EE::ProverKey, - S_repr: R1CSShapeSparkRepr, - S_comm: R1CSShapeSparkCommitment, - vk_digest: E::Scalar, // digest of verifier's key -} - -/// A type that represents the verifier's key -#[derive(Debug, Clone, Serialize)] -#[serde(bound = "EE::VerifierKey: Serialize")] -pub struct VerifierKey> { - num_cons: usize, - num_vars: usize, - vk_ee: EE::VerifierKey, - S_comm: R1CSShapeSparkCommitment, - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} - -impl> SimpleDigestible for VerifierKey where - EE::VerifierKey: Serialize -{ -} - -/// A succinct proof of knowledge of a witness to a relaxed R1CS instance -/// The proof is produced using Spartan's combination of the sum-check and -/// the commitment to a vector viewed as a polynomial commitment -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RelaxedR1CSSNARK> { - // commitment to oracles: the first three are for Az, Bz, Cz, - // and the last two are for memory reads - comm_Az: CompressedCommitment, - comm_Bz: CompressedCommitment, - comm_Cz: CompressedCommitment, - comm_L_row: CompressedCommitment, - comm_L_col: CompressedCommitment, - - // commitments to aid the memory checks - comm_t_plus_r_inv_row: CompressedCommitment, - comm_w_plus_r_inv_row: CompressedCommitment, - comm_t_plus_r_inv_col: CompressedCommitment, - comm_w_plus_r_inv_col: CompressedCommitment, - - // claims about Az, Bz, and Cz polynomials - eval_Az_at_tau: E::Scalar, - eval_Bz_at_tau: E::Scalar, - eval_Cz_at_tau: E::Scalar, - - // sum-check - sc: SumcheckProof, - - // claims from the end of sum-check - eval_Az: E::Scalar, - eval_Bz: E::Scalar, - eval_Cz: E::Scalar, - eval_E: E::Scalar, - eval_L_row: E::Scalar, - eval_L_col: E::Scalar, - eval_val_A: E::Scalar, - eval_val_B: E::Scalar, - eval_val_C: E::Scalar, - - eval_W: E::Scalar, - - eval_t_plus_r_inv_row: E::Scalar, - eval_row: E::Scalar, // address - eval_w_plus_r_inv_row: E::Scalar, - eval_ts_row: E::Scalar, - - eval_t_plus_r_inv_col: E::Scalar, - eval_col: E::Scalar, // address - eval_w_plus_r_inv_col: E::Scalar, - eval_ts_col: E::Scalar, - - // a PCS evaluation argument - eval_arg: EE::EvaluationArgument, -} - -impl> RelaxedR1CSSNARK { - fn prove_helper( - mem: &mut T1, - outer: &mut T2, - inner: &mut T3, - witness: &mut T4, - transcript: &mut E::TE, - ) -> Result< - ( - SumcheckProof, - Vec, - Vec>, - Vec>, - Vec>, - Vec>, - ), - NovaError, - > - where - T1: SumcheckEngine, - T2: SumcheckEngine, - T3: SumcheckEngine, - T4: SumcheckEngine, - { - // sanity checks - assert_eq!(mem.size(), outer.size()); - assert_eq!(mem.size(), inner.size()); - assert_eq!(mem.size(), witness.size()); - assert_eq!(mem.degree(), outer.degree()); - assert_eq!(mem.degree(), inner.degree()); - assert_eq!(mem.degree(), witness.degree()); - - // these claims are already added to the transcript, so we do not need to add - let claims = mem - .initial_claims() - .into_iter() - .chain(outer.initial_claims()) - .chain(inner.initial_claims()) - .chain(witness.initial_claims()) - .collect::>(); - - let s = transcript.squeeze(b"r")?; - let coeffs = powers(&s, claims.len()); - - // compute the joint claim - let claim = zip_with!(iter, (claims, coeffs), |c_1, c_2| *c_1 * c_2).sum(); - - let mut e = claim; - let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec> = Vec::new(); - let num_rounds = mem.size().log_2(); - for _ in 0..num_rounds { - let ((evals_mem, evals_outer), (evals_inner, evals_witness)) = rayon::join( - || rayon::join(|| mem.evaluation_points(), || outer.evaluation_points()), - || rayon::join(|| inner.evaluation_points(), || witness.evaluation_points()), - ); - - let evals: Vec> = evals_mem - .into_iter() - .chain(evals_outer.into_iter()) - .chain(evals_inner.into_iter()) - .chain(evals_witness.into_iter()) - .collect::>>(); - assert_eq!(evals.len(), claims.len()); - - let evals_combined_0 = (0..evals.len()).map(|i| evals[i][0] * coeffs[i]).sum(); - let evals_combined_2 = (0..evals.len()).map(|i| evals[i][1] * coeffs[i]).sum(); - let evals_combined_3 = (0..evals.len()).map(|i| evals[i][2] * coeffs[i]).sum(); - - let evals = vec![ - evals_combined_0, - e - evals_combined_0, - evals_combined_2, - evals_combined_3, - ]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - - let _ = rayon::join( - || rayon::join(|| mem.bound(&r_i), || outer.bound(&r_i)), - || rayon::join(|| inner.bound(&r_i), || witness.bound(&r_i)), - ); - - e = poly.evaluate(&r_i); - cubic_polys.push(poly.compress()); - } - - let mem_claims = mem.final_claims(); - let outer_claims = outer.final_claims(); - let inner_claims = inner.final_claims(); - let witness_claims = witness.final_claims(); - - Ok(( - SumcheckProof::new(cubic_polys), - r, - mem_claims, - outer_claims, - inner_claims, - witness_claims, - )) - } -} - -impl> VerifierKey { - fn new( - num_cons: usize, - num_vars: usize, - S_comm: R1CSShapeSparkCommitment, - vk_ee: EE::VerifierKey, - ) -> Self { - Self { - num_cons, - num_vars, - S_comm, - vk_ee, - digest: Default::default(), - } - } -} -impl> DigestHelperTrait for VerifierKey { - /// Returns the digest of the verifier's key - fn digest(&self) -> E::Scalar { - self.digest - .get_or_try_init(|| { - let dc = DigestComputer::new(self); - dc.digest() - }) - .cloned() - .expect("Failure to retrieve digest!") - } -} - -impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK { - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - Box::new(|shape: &R1CSShape| -> usize { - // the commitment key should be large enough to commit to the R1CS matrices - shape.A.len() + shape.B.len() + shape.C.len() - }) - } - - fn initialize_pk( - _ck: Arc>, - _vk_digest: ::Scalar, - ) -> Result { - todo!("not implemented for nova snarks"); - } - - fn setup( - ck: Arc>, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - // check the provided commitment key meets minimal requirements - if ck.length() < Self::ck_floor()(S) { - return Err(NovaError::InvalidCommitmentKeyLength); - } - let (pk_ee, vk_ee) = EE::setup(ck.clone()); - - // pad the R1CS matrices - let S = S.pad(); - - let S_repr = R1CSShapeSparkRepr::new(&S); - let S_comm = S_repr.commit(&*ck); - - let vk = VerifierKey::new(S.num_cons, S.num_vars, S_comm.clone(), vk_ee); - - let pk = ProverKey { - pk_ee, - S_repr, - S_comm, - vk_digest: vk.digest(), - }; - - Ok((pk, vk)) - } - - /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance - #[tracing::instrument(skip_all, name = "PPSNARK::prove")] - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result { - // pad the R1CSShape - let S = S.pad(); - // sanity check that R1CSShape has all required size characteristics - assert!(S.is_regular_shape()); - - let W = W.pad(&S); // pad the witness - let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); - - // append the verifier key (which includes commitment to R1CS matrices) and the - // RelaxedR1CSInstance to the transcript - transcript.absorb(b"vk", &pk.vk_digest); - transcript.absorb(b"U", U); - - // compute the full satisfying assignment by concatenating W.W, U.u, and U.X - let z = [W.W.clone(), vec![U.u], U.X.clone()].concat(); - - // compute Az, Bz, Cz - let (mut Az, mut Bz, mut Cz) = S.multiply_vec(&z)?; - - // commit to Az, Bz, Cz - let (comm_Az, (comm_Bz, comm_Cz)) = rayon::join( - || E::CE::commit(ck, &Az), - || rayon::join(|| E::CE::commit(ck, &Bz), || E::CE::commit(ck, &Cz)), - ); - - transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); - - // number of rounds of sum-check - let num_rounds_sc = pk.S_repr.N.log_2(); - let tau = transcript.squeeze(b"t")?; - let tau_coords = PowPolynomial::new(&tau, num_rounds_sc).coordinates(); - - // (1) send commitments to Az, Bz, and Cz along with their evaluations at tau - let (Az, Bz, Cz, W, E) = { - Az.resize(pk.S_repr.N, E::Scalar::ZERO); - Bz.resize(pk.S_repr.N, E::Scalar::ZERO); - Cz.resize(pk.S_repr.N, E::Scalar::ZERO); - let E = padded::(&W.E, pk.S_repr.N, &E::Scalar::ZERO); - let W = padded::(&W.W, pk.S_repr.N, &E::Scalar::ZERO); - - (Az, Bz, Cz, W, E) - }; - let chis_taus = EqPolynomial::evals_from_points(&tau_coords); - let (eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau) = { - let evals_at_tau = [&Az, &Bz, &Cz] - .into_par_iter() - .map(|p| MultilinearPolynomial::evaluate_with_chis(p, &chis_taus)) - .collect::>(); - (evals_at_tau[0], evals_at_tau[1], evals_at_tau[2]) - }; - - // (2) send commitments to the following two oracles - // L_row(i) = eq(tau, row(i)) for all i - // L_col(i) = z(col(i)) for all i - let (mem_row, mem_col, L_row, L_col) = pk.S_repr.evaluation_oracles(&S, &tau, &z); - let (comm_L_row, comm_L_col) = - rayon::join(|| E::CE::commit(ck, &L_row), || E::CE::commit(ck, &L_col)); - - // since all the three polynomials are opened at tau, - // we can combine them into a single polynomial opened at tau - let eval_vec = vec![eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau]; - - // absorb the claimed evaluations into the transcript - transcript.absorb(b"e", &eval_vec.as_slice()); - // absorb commitments to L_row and L_col in the transcript - transcript.absorb(b"e", &vec![comm_L_row, comm_L_col].as_slice()); - let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; - let poly_vec = vec![&Az, &Bz, &Cz]; - let c = transcript.squeeze(b"c")?; - let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); - let u: PolyEvalInstance = - PolyEvalInstance::batch(&comm_vec, tau_coords.clone(), &eval_vec, &c); - - // we now need to prove four claims - // (1) 0 = \sum_x poly_tau(x) * (poly_Az(x) * poly_Bz(x) - poly_uCz_E(x)), and - // eval_Az_at_tau + r * eval_Bz_at_tau + r^2 * eval_Cz_at_tau = - // (Az+r*Bz+r^2*Cz)(tau) (2) eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * - // eval_Cz_at_tau = \sum_y L_row(y) * (val_A(y) + c * val_B(y) + c^2 * val_C(y)) - // * L_col(y) (3) L_row(i) = eq(tau, row(i)) and L_col(i) = z(col(i)) - // (4) Check that the witness polynomial W is well-formed e.g., it is padded - // with only zeros - let gamma = transcript.squeeze(b"g")?; - let r = transcript.squeeze(b"r")?; - - let ((mut outer_sc_inst, mut inner_sc_inst), mem_res) = rayon::join( - || { - // a sum-check instance to prove the first claim - let outer_sc_inst = OuterSumcheckInstance::new( - PowPolynomial::new(&tau, num_rounds_sc).evals(), - Az.clone(), - Bz.clone(), - (0..Cz.len()) - .map(|i| U.u * Cz[i] + E[i]) - .collect::>(), - w.p.clone(), // Mz = Az + r * Bz + r^2 * Cz - &u.e, // eval_Az_at_tau + r * eval_Az_at_tau + r^2 * eval_Cz_at_tau - ); - - // a sum-check instance to prove the second claim - let val = zip_with!( - par_iter, - (pk.S_repr.val_A, pk.S_repr.val_B, pk.S_repr.val_C), - |v_a, v_b, v_c| *v_a + c * *v_b + c * c * *v_c - ) - .collect::>(); - let inner_sc_inst = InnerSumcheckInstance { - claim: eval_Az_at_tau + c * eval_Bz_at_tau + c * c * eval_Cz_at_tau, - poly_L_row: MultilinearPolynomial::new(L_row.clone()), - poly_L_col: MultilinearPolynomial::new(L_col.clone()), - poly_val: MultilinearPolynomial::new(val), - }; - - (outer_sc_inst, inner_sc_inst) - }, - || { - // a third sum-check instance to prove the read-only memory claim - // we now need to prove that L_row and L_col are well-formed - - // hash the tuples of (addr,val) memory contents and read responses into a - // single field element using `hash_func` - - let (comm_mem_oracles, mem_oracles, mem_aux) = - MemorySumcheckInstance::::compute_oracles( - ck, - &r, - &gamma, - &mem_row, - &pk.S_repr.row, - &L_row, - &pk.S_repr.ts_row, - &mem_col, - &pk.S_repr.col, - &L_col, - &pk.S_repr.ts_col, - )?; - // absorb the commitments - transcript.absorb(b"l", &comm_mem_oracles.as_slice()); - - let rho = transcript.squeeze(b"r")?; - let poly_eq = - MultilinearPolynomial::new(PowPolynomial::new(&rho, num_rounds_sc).evals()); - - Ok::<_, NovaError>(( - MemorySumcheckInstance::new( - mem_oracles.clone(), - mem_aux, - poly_eq.Z, - pk.S_repr.ts_row.clone(), - pk.S_repr.ts_col.clone(), - ), - comm_mem_oracles, - mem_oracles, - )) - }, - ); - - let (mut mem_sc_inst, comm_mem_oracles, mem_oracles) = mem_res?; - - let mut witness_sc_inst = WitnessBoundSumcheck::new(tau, W.clone(), S.num_vars); - - let (sc, rand_sc, claims_mem, claims_outer, claims_inner, claims_witness) = - Self::prove_helper( - &mut mem_sc_inst, - &mut outer_sc_inst, - &mut inner_sc_inst, - &mut witness_sc_inst, - &mut transcript, - )?; - - // claims from the end of the sum-check - let eval_Az = claims_outer[0][0]; - let eval_Bz = claims_outer[0][1]; - - let eval_L_row = claims_inner[0][0]; - let eval_L_col = claims_inner[0][1]; - - let eval_t_plus_r_inv_row = claims_mem[0][0]; - let eval_w_plus_r_inv_row = claims_mem[0][1]; - let eval_ts_row = claims_mem[0][2]; - - let eval_t_plus_r_inv_col = claims_mem[1][0]; - let eval_w_plus_r_inv_col = claims_mem[1][1]; - let eval_ts_col = claims_mem[1][2]; - let eval_W = claims_witness[0][0]; - - // compute the remaining claims that did not come for free from the sum-check - // prover - let (eval_Cz, eval_E, eval_val_A, eval_val_B, eval_val_C, eval_row, eval_col) = { - let e = [ - &Cz, - &E, - &pk.S_repr.val_A, - &pk.S_repr.val_B, - &pk.S_repr.val_C, - &pk.S_repr.row, - &pk.S_repr.col, - ] - .into_par_iter() - .map(|p| MultilinearPolynomial::evaluate_with(p, &rand_sc)) - .collect::>(); - (e[0], e[1], e[2], e[3], e[4], e[5], e[6]) - }; - - // all the evaluations are at rand_sc, we can fold them into one claim - let eval_vec = vec![ - eval_W, - eval_Az, - eval_Bz, - eval_Cz, - eval_E, - eval_L_row, - eval_L_col, - eval_val_A, - eval_val_B, - eval_val_C, - eval_t_plus_r_inv_row, - eval_row, - eval_w_plus_r_inv_row, - eval_ts_row, - eval_t_plus_r_inv_col, - eval_col, - eval_w_plus_r_inv_col, - eval_ts_col, - ]; - - let comm_vec = [ - U.comm_W, - comm_Az, - comm_Bz, - comm_Cz, - U.comm_E, - comm_L_row, - comm_L_col, - pk.S_comm.comm_val_A, - pk.S_comm.comm_val_B, - pk.S_comm.comm_val_C, - comm_mem_oracles[0], - pk.S_comm.comm_row, - comm_mem_oracles[1], - pk.S_comm.comm_ts_row, - comm_mem_oracles[2], - pk.S_comm.comm_col, - comm_mem_oracles[3], - pk.S_comm.comm_ts_col, - ]; - let poly_vec = [ - &W, - &Az, - &Bz, - &Cz, - &E, - &L_row, - &L_col, - &pk.S_repr.val_A, - &pk.S_repr.val_B, - &pk.S_repr.val_C, - mem_oracles[0].as_ref(), - &pk.S_repr.row, - mem_oracles[1].as_ref(), - &pk.S_repr.ts_row, - mem_oracles[2].as_ref(), - &pk.S_repr.col, - mem_oracles[3].as_ref(), - &pk.S_repr.ts_col, - ]; - transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript - let c = transcript.squeeze(b"c")?; - let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); - let u: PolyEvalInstance = - PolyEvalInstance::batch(&comm_vec, rand_sc.clone(), &eval_vec, &c); - - let eval_arg = EE::prove(ck, &pk.pk_ee, &mut transcript, &u.c, &w.p, &rand_sc, &u.e)?; - - Ok(Self { - comm_Az: comm_Az.compress(), - comm_Bz: comm_Bz.compress(), - comm_Cz: comm_Cz.compress(), - comm_L_row: comm_L_row.compress(), - comm_L_col: comm_L_col.compress(), - - comm_t_plus_r_inv_row: comm_mem_oracles[0].compress(), - comm_w_plus_r_inv_row: comm_mem_oracles[1].compress(), - comm_t_plus_r_inv_col: comm_mem_oracles[2].compress(), - comm_w_plus_r_inv_col: comm_mem_oracles[3].compress(), - - eval_Az_at_tau, - eval_Bz_at_tau, - eval_Cz_at_tau, - - sc, - - eval_Az, - eval_Bz, - eval_Cz, - eval_E, - eval_L_row, - eval_L_col, - eval_val_A, - eval_val_B, - eval_val_C, - - eval_W, - - eval_t_plus_r_inv_row, - eval_row, - eval_w_plus_r_inv_row, - eval_ts_row, - - eval_col, - eval_t_plus_r_inv_col, - eval_w_plus_r_inv_col, - eval_ts_col, - - eval_arg, - }) - } - - /// verifies a proof of satisfiability of a `RelaxedR1CS` instance - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); - - // append the verifier key (including commitment to R1CS matrices) and the - // RelaxedR1CSInstance to the transcript - transcript.absorb(b"vk", &vk.digest()); - transcript.absorb(b"U", U); - - let comm_Az = Commitment::::decompress(&self.comm_Az)?; - let comm_Bz = Commitment::::decompress(&self.comm_Bz)?; - let comm_Cz = Commitment::::decompress(&self.comm_Cz)?; - let comm_L_row = Commitment::::decompress(&self.comm_L_row)?; - let comm_L_col = Commitment::::decompress(&self.comm_L_col)?; - let comm_t_plus_r_inv_row = Commitment::::decompress(&self.comm_t_plus_r_inv_row)?; - let comm_w_plus_r_inv_row = Commitment::::decompress(&self.comm_w_plus_r_inv_row)?; - let comm_t_plus_r_inv_col = Commitment::::decompress(&self.comm_t_plus_r_inv_col)?; - let comm_w_plus_r_inv_col = Commitment::::decompress(&self.comm_w_plus_r_inv_col)?; - - transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); - - let num_rounds_sc = vk.S_comm.N.log_2(); - let tau = transcript.squeeze(b"t")?; - let tau_coords = PowPolynomial::new(&tau, num_rounds_sc).coordinates(); - - // add claims about Az, Bz, and Cz to be checked later - // since all the three polynomials are opened at tau, - // we can combine them into a single polynomial opened at tau - let eval_vec = vec![ - self.eval_Az_at_tau, - self.eval_Bz_at_tau, - self.eval_Cz_at_tau, - ]; - - transcript.absorb(b"e", &eval_vec.as_slice()); - - transcript.absorb(b"e", &vec![comm_L_row, comm_L_col].as_slice()); - let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; - let c = transcript.squeeze(b"c")?; - let u: PolyEvalInstance = - PolyEvalInstance::batch(&comm_vec, tau_coords.clone(), &eval_vec, &c); - let claim = u.e; - - let gamma = transcript.squeeze(b"g")?; - - let r = transcript.squeeze(b"r")?; - - transcript.absorb( - b"l", - &vec![ - comm_t_plus_r_inv_row, - comm_w_plus_r_inv_row, - comm_t_plus_r_inv_col, - comm_w_plus_r_inv_col, - ] - .as_slice(), - ); - - let rho = transcript.squeeze(b"r")?; - - let num_claims = 10; - let s = transcript.squeeze(b"r")?; - let coeffs = powers(&s, num_claims); - let claim = (coeffs[7] + coeffs[8]) * claim; // rest are zeros - - // verify sc - let (claim_sc_final, rand_sc) = self.sc.verify(claim, num_rounds_sc, 3, &mut transcript)?; - - // verify claim_sc_final - let claim_sc_final_expected = { - let rand_eq_bound_rand_sc = PowPolynomial::new(&rho, num_rounds_sc).evaluate(&rand_sc); - let eq_tau: EqPolynomial<_> = PowPolynomial::new(&tau, num_rounds_sc).into(); - - let taus_bound_rand_sc = eq_tau.evaluate(&rand_sc); - let taus_masked_bound_rand_sc = - MaskedEqPolynomial::new(&eq_tau, vk.num_vars.log_2()).evaluate(&rand_sc); - - let eval_t_plus_r_row = { - let eval_addr_row = IdentityPolynomial::new(num_rounds_sc).evaluate(&rand_sc); - let eval_val_row = taus_bound_rand_sc; - let eval_t = eval_addr_row + gamma * eval_val_row; - eval_t + r - }; - - let eval_w_plus_r_row = { - let eval_addr_row = self.eval_row; - let eval_val_row = self.eval_L_row; - let eval_w = eval_addr_row + gamma * eval_val_row; - eval_w + r - }; - - let eval_t_plus_r_col = { - let eval_addr_col = IdentityPolynomial::new(num_rounds_sc).evaluate(&rand_sc); - - // memory contents is z, so we compute eval_Z from eval_W and eval_X - let eval_val_col = { - // rand_sc was padded, so we now remove the padding - let (factor, rand_sc_unpad) = { - let l = vk.S_comm.N.log_2() - (2 * vk.num_vars).log_2(); - - let mut factor = E::Scalar::ONE; - for r_p in rand_sc.iter().take(l) { - factor *= E::Scalar::ONE - r_p - } - - let rand_sc_unpad = rand_sc[l..].to_vec(); - - (factor, rand_sc_unpad) - }; - - let eval_X = { - // public IO is (u, X) - let X = vec![U.u] - .into_iter() - .chain(U.X.iter().cloned()) - .collect::>(); - - // evaluate the sparse polynomial at rand_sc_unpad[1..] - let poly_X = SparsePolynomial::new(rand_sc_unpad.len() - 1, X); - poly_X.evaluate(&rand_sc_unpad[1..]) - }; - - self.eval_W + factor * rand_sc_unpad[0] * eval_X - }; - let eval_t = eval_addr_col + gamma * eval_val_col; - eval_t + r - }; - - let eval_w_plus_r_col = { - let eval_addr_col = self.eval_col; - let eval_val_col = self.eval_L_col; - let eval_w = eval_addr_col + gamma * eval_val_col; - eval_w + r - }; - - let claim_mem_final_expected: E::Scalar = coeffs[0] - * (self.eval_t_plus_r_inv_row - self.eval_w_plus_r_inv_row) - + coeffs[1] * (self.eval_t_plus_r_inv_col - self.eval_w_plus_r_inv_col) - + coeffs[2] - * (rand_eq_bound_rand_sc - * (self.eval_t_plus_r_inv_row * eval_t_plus_r_row - self.eval_ts_row)) - + coeffs[3] - * (rand_eq_bound_rand_sc - * (self.eval_w_plus_r_inv_row * eval_w_plus_r_row - E::Scalar::ONE)) - + coeffs[4] - * (rand_eq_bound_rand_sc - * (self.eval_t_plus_r_inv_col * eval_t_plus_r_col - self.eval_ts_col)) - + coeffs[5] - * (rand_eq_bound_rand_sc - * (self.eval_w_plus_r_inv_col * eval_w_plus_r_col - E::Scalar::ONE)); - - let claim_outer_final_expected = coeffs[6] - * taus_bound_rand_sc - * (self.eval_Az * self.eval_Bz - U.u * self.eval_Cz - self.eval_E) - + coeffs[7] - * taus_bound_rand_sc - * (self.eval_Az + c * self.eval_Bz + c * c * self.eval_Cz); - let claim_inner_final_expected = coeffs[8] - * self.eval_L_row - * self.eval_L_col - * (self.eval_val_A + c * self.eval_val_B + c * c * self.eval_val_C); - - let claim_witness_final_expected = coeffs[9] * taus_masked_bound_rand_sc * self.eval_W; - - claim_mem_final_expected - + claim_outer_final_expected - + claim_inner_final_expected - + claim_witness_final_expected - }; - - if claim_sc_final_expected != claim_sc_final { - return Err(NovaError::InvalidSumcheckProof); - } - - let eval_vec = vec![ - self.eval_W, - self.eval_Az, - self.eval_Bz, - self.eval_Cz, - self.eval_E, - self.eval_L_row, - self.eval_L_col, - self.eval_val_A, - self.eval_val_B, - self.eval_val_C, - self.eval_t_plus_r_inv_row, - self.eval_row, - self.eval_w_plus_r_inv_row, - self.eval_ts_row, - self.eval_t_plus_r_inv_col, - self.eval_col, - self.eval_w_plus_r_inv_col, - self.eval_ts_col, - ]; - - let comm_vec = [ - U.comm_W, - comm_Az, - comm_Bz, - comm_Cz, - U.comm_E, - comm_L_row, - comm_L_col, - vk.S_comm.comm_val_A, - vk.S_comm.comm_val_B, - vk.S_comm.comm_val_C, - comm_t_plus_r_inv_row, - vk.S_comm.comm_row, - comm_w_plus_r_inv_row, - vk.S_comm.comm_ts_row, - comm_t_plus_r_inv_col, - vk.S_comm.comm_col, - comm_w_plus_r_inv_col, - vk.S_comm.comm_ts_col, - ]; - transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript - let c = transcript.squeeze(b"c")?; - let u: PolyEvalInstance = - PolyEvalInstance::batch(&comm_vec, rand_sc.clone(), &eval_vec, &c); - - // verify - EE::verify( - &vk.vk_ee, - &mut transcript, - &u.c, - &rand_sc, - &u.e, - &self.eval_arg, - )?; - - Ok(()) - } -} - -// #[cfg(test)] -// mod tests { -// use ff::Field; -// use pasta_curves::Fq as Scalar; - -// use super::*; -// use crate::provider::PallasEngine; - -// #[test] -// fn test_padded() { -// let mut rng = rand::thread_rng(); -// let e = Scalar::random(&mut rng); -// let v: Vec = (0..10).map(|_| Scalar::random(&mut -// rng)).collect(); let n = 20; - -// let result = padded::(&v, n, &e); - -// assert_eq!(result.len(), n); -// assert_eq!(&result[..10], &v[..]); -// assert!(result[10..].iter().all(|&i| i == e)); -// } -// } diff --git a/src/spartan/snark.rs b/src/spartan/snark.rs deleted file mode 100644 index 1973615..0000000 --- a/src/spartan/snark.rs +++ /dev/null @@ -1,560 +0,0 @@ -//! This module implements `RelaxedR1CSSNARKTrait` using Spartan that is generic -//! over the polynomial commitment and evaluation argument (i.e., a PCS) -//! This version of Spartan does not use preprocessing so the verifier keeps the -//! entire description of R1CS matrices. This is essentially optimal for the -//! verifier when using an IPA-based polynomial commitment scheme. - -use std::sync::Arc; - -use ff::Field; -use itertools::Itertools as _; -use once_cell::sync::OnceCell; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use crate::{ - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness, SparseMatrix}, - spartan::{ - compute_eval_table_sparse, - polys::{ - eq::EqPolynomial, - multilinear::{MultilinearPolynomial, SparsePolynomial}, - power::PowPolynomial, - }, - powers, - sumcheck::SumcheckProof, - PolyEvalInstance, PolyEvalWitness, - }, - traits::{ - evaluation::EvaluationEngineTrait, - snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Engine, TranscriptEngineTrait, - }, - CommitmentKey, -}; - -/// A type that represents the prover's key -#[derive(Debug, Clone)] -pub struct ProverKey> { - pub pk_ee: EE::ProverKey, - pub vk_digest: E::Scalar, // digest of the verifier's key -} - -/// A type that represents the verifier's key -#[derive(Debug, Clone, Serialize)] -#[serde(bound = "")] -pub struct VerifierKey> { - vk_ee: EE::VerifierKey, - S: R1CSShape, - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} - -impl> SimpleDigestible for VerifierKey {} - -impl> VerifierKey { - fn new(shape: R1CSShape, vk_ee: EE::VerifierKey) -> Self { - Self { - vk_ee, - S: shape, - digest: OnceCell::new(), - } - } -} - -impl> DigestHelperTrait for VerifierKey { - /// Returns the digest of the verifier's key. - fn digest(&self) -> E::Scalar { - self.digest - .get_or_try_init(|| { - let dc = DigestComputer::::new(self); - dc.digest() - }) - .cloned() - .expect("Failure to retrieve digest!") - } -} - -/// A succinct proof of knowledge of a witness to a relaxed R1CS instance -/// The proof is produced using Spartan's combination of the sum-check and -/// the commitment to a vector viewed as a polynomial commitment -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RelaxedR1CSSNARK> { - sc_proof_outer: SumcheckProof, - claims_outer: (E::Scalar, E::Scalar, E::Scalar), - eval_E: E::Scalar, - sc_proof_inner: SumcheckProof, - eval_W: E::Scalar, - sc_proof_batch: SumcheckProof, - evals_batch: Vec, - eval_arg: EE::EvaluationArgument, -} - -impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK { - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - - fn initialize_pk( - ck: Arc>, - vk_digest: ::Scalar, - ) -> Result { - todo!("not implemented for nova snarks"); - } - - fn setup( - ck: Arc>, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - let (pk_ee, vk_ee) = EE::setup(ck); - - let S = S.pad(); - - let vk: VerifierKey = VerifierKey::new(S, vk_ee); - - let pk = ProverKey { - pk_ee, - vk_digest: vk.digest(), - }; - - Ok((pk, vk)) - } - - /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance - #[tracing::instrument(skip_all, name = "SNARK::prove")] - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result { - // pad the R1CSShape - let S = S.pad(); - // sanity check that R1CSShape has all required size characteristics - assert!(S.is_regular_shape()); - - let W = W.pad(&S); // pad the witness - let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); - - // append the digest of vk (which includes R1CS matrices) and the - // RelaxedR1CSInstance to the transcript - transcript.absorb(b"vk", &pk.vk_digest); - transcript.absorb(b"U", U); - - // compute the full satisfying assignment by concatenating W.W, U.u, and U.X - let mut z = [W.W.clone(), vec![U.u], U.X.clone()].concat(); - - let (num_rounds_x, num_rounds_y) = ( - usize::try_from(S.num_cons.ilog2()).unwrap(), - (usize::try_from(S.num_vars.ilog2()).unwrap() + 1), - ); - - // outer sum-check - let tau: EqPolynomial<_> = - PowPolynomial::new(&transcript.squeeze(b"t")?, num_rounds_x).into(); - - let mut poly_tau = MultilinearPolynomial::new(tau.evals()); - let (mut poly_Az, mut poly_Bz, poly_Cz, mut poly_uCz_E) = { - let (poly_Az, poly_Bz, poly_Cz) = S.multiply_vec(&z)?; - let poly_uCz_E = (0..S.num_cons) - .into_par_iter() - .map(|i| U.u * poly_Cz[i] + W.E[i]) - .collect::>(); - ( - MultilinearPolynomial::new(poly_Az), - MultilinearPolynomial::new(poly_Bz), - MultilinearPolynomial::new(poly_Cz), - MultilinearPolynomial::new(poly_uCz_E), - ) - }; - - let comb_func_outer = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar, - poly_D_comp: &E::Scalar| - -> E::Scalar { - *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) - }; - let (sc_proof_outer, r_x, claims_outer) = SumcheckProof::prove_cubic_with_additive_term( - &E::Scalar::ZERO, // claim is zero - num_rounds_x, - &mut poly_tau, - &mut poly_Az, - &mut poly_Bz, - &mut poly_uCz_E, - comb_func_outer, - &mut transcript, - )?; - - // claims from the end of sum-check - let (claim_Az, claim_Bz): (E::Scalar, E::Scalar) = (claims_outer[1], claims_outer[2]); - let chis_r_x = EqPolynomial::evals_from_points(&r_x); - - let claim_Cz = MultilinearPolynomial::evaluate_with_chis(poly_Cz.evaluations(), &chis_r_x); - let eval_E = MultilinearPolynomial::evaluate_with_chis(&W.E, &chis_r_x); - transcript.absorb( - b"claims_outer", - &[claim_Az, claim_Bz, claim_Cz, eval_E].as_slice(), - ); - - // inner sum-check - let r = transcript.squeeze(b"r")?; - let claim_inner_joint = claim_Az + r * claim_Bz + r * r * claim_Cz; - - let poly_ABC = { - // compute the initial evaluation table for R(\tau, x) - let evals_rx = EqPolynomial::evals_from_points(&r_x.clone()); - - let (evals_A, evals_B, evals_C) = compute_eval_table_sparse(&S, &evals_rx); - - assert_eq!(evals_A.len(), evals_B.len()); - assert_eq!(evals_A.len(), evals_C.len()); - (0..evals_A.len()) - .into_par_iter() - .map(|i| evals_A[i] + r * evals_B[i] + r * r * evals_C[i]) - .collect::>() - }; - - let poly_z = { - z.resize(S.num_vars * 2, E::Scalar::ZERO); - z - }; - - let comb_func = |poly_A_comp: &E::Scalar, poly_B_comp: &E::Scalar| -> E::Scalar { - *poly_A_comp * *poly_B_comp - }; - let (sc_proof_inner, r_y, _claims_inner) = SumcheckProof::prove_quad( - &claim_inner_joint, - num_rounds_y, - &mut MultilinearPolynomial::new(poly_ABC), - &mut MultilinearPolynomial::new(poly_z), - comb_func, - &mut transcript, - )?; - - // Add additional claims about W and E polynomials to the list from CC - // We will reduce a vector of claims of evaluations at different points into - // claims about them at the same point. For example, eval_W =? - // W(r_y[1..]) and eval_E =? E(r_x) into two claims: eval_W_prime =? - // W(rz) and eval_E_prime =? E(rz) We can them combine the two into one: - // eval_W_prime + gamma * eval_E_prime =? (W + gamma*E)(rz), where gamma - // is a public challenge Since commitments to W and E are homomorphic, - // the verifier can compute a commitment to the batched polynomial. - let eval_W = MultilinearPolynomial::evaluate_with(&W.W, &r_y[1..]); - - let w_vec = vec![PolyEvalWitness { p: W.W }, PolyEvalWitness { p: W.E }]; - let u_vec = vec![ - PolyEvalInstance { - c: U.comm_W, - x: r_y[1..].to_vec(), - e: eval_W, - }, - PolyEvalInstance { - c: U.comm_E, - x: r_x, - e: eval_E, - }, - ]; - - let (batched_u, batched_w, sc_proof_batch, claims_batch_left) = - batch_eval_reduce(u_vec, &w_vec, &mut transcript)?; - - let eval_arg = EE::prove( - ck, - &pk.pk_ee, - &mut transcript, - &batched_u.c, - &batched_w.p, - &batched_u.x, - &batched_u.e, - )?; - - Ok(Self { - sc_proof_outer, - claims_outer: (claim_Az, claim_Bz, claim_Cz), - eval_E, - sc_proof_inner, - eval_W, - sc_proof_batch, - evals_batch: claims_batch_left, - eval_arg, - }) - } - - /// verifies a proof of satisfiability of a `RelaxedR1CS` instance - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); - - // append the digest of R1CS matrices and the RelaxedR1CSInstance to the - // transcript - transcript.absorb(b"vk", &vk.digest()); - transcript.absorb(b"U", U); - - let (num_rounds_x, num_rounds_y) = ( - usize::try_from(vk.S.num_cons.ilog2()).unwrap(), - (usize::try_from(vk.S.num_vars.ilog2()).unwrap() + 1), - ); - - // outer sum-check - let tau: EqPolynomial<_> = - PowPolynomial::new(&transcript.squeeze(b"t")?, num_rounds_x).into(); - - let (claim_outer_final, r_x) = - self.sc_proof_outer - .verify(E::Scalar::ZERO, num_rounds_x, 3, &mut transcript)?; - - // verify claim_outer_final - let (claim_Az, claim_Bz, claim_Cz) = self.claims_outer; - let taus_bound_rx = tau.evaluate(&r_x); - let claim_outer_final_expected = - taus_bound_rx * (claim_Az * claim_Bz - U.u * claim_Cz - self.eval_E); - if claim_outer_final != claim_outer_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - transcript.absorb( - b"claims_outer", - &[ - self.claims_outer.0, - self.claims_outer.1, - self.claims_outer.2, - self.eval_E, - ] - .as_slice(), - ); - - // inner sum-check - let r = transcript.squeeze(b"r")?; - let claim_inner_joint = - self.claims_outer.0 + r * self.claims_outer.1 + r * r * self.claims_outer.2; - - let (claim_inner_final, r_y) = - self.sc_proof_inner - .verify(claim_inner_joint, num_rounds_y, 2, &mut transcript)?; - - // verify claim_inner_final - let eval_Z = { - let eval_X = { - // public IO is (u, X) - let X = vec![U.u] - .into_iter() - .chain(U.X.iter().cloned()) - .collect::>(); - SparsePolynomial::new(usize::try_from(vk.S.num_vars.ilog2()).unwrap(), X) - .evaluate(&r_y[1..]) - }; - (E::Scalar::ONE - r_y[0]) * self.eval_W + r_y[0] * eval_X - }; - - // compute evaluations of R1CS matrices - let multi_evaluate = |M_vec: &[&SparseMatrix], - r_x: &[E::Scalar], - r_y: &[E::Scalar]| - -> Vec { - let evaluate_with_table = - |M: &SparseMatrix, T_x: &[E::Scalar], T_y: &[E::Scalar]| -> E::Scalar { - M.par_iter_rows() - .enumerate() - .map(|(row_idx, row)| { - M.get_row(row) - .map(|(val, col_idx)| T_x[row_idx] * T_y[*col_idx] * val) - .sum::() - }) - .sum() - }; - - let (T_x, T_y) = rayon::join( - || EqPolynomial::evals_from_points(r_x), - || EqPolynomial::evals_from_points(r_y), - ); - - (0..M_vec.len()) - .into_par_iter() - .map(|i| evaluate_with_table(M_vec[i], &T_x, &T_y)) - .collect() - }; - - let evals = multi_evaluate(&[&vk.S.A, &vk.S.B, &vk.S.C], &r_x, &r_y); - - let claim_inner_final_expected = (evals[0] + r * evals[1] + r * r * evals[2]) * eval_Z; - if claim_inner_final != claim_inner_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - // add claims about W and E polynomials - let u_vec: Vec> = vec![ - PolyEvalInstance { - c: U.comm_W, - x: r_y[1..].to_vec(), - e: self.eval_W, - }, - PolyEvalInstance { - c: U.comm_E, - x: r_x, - e: self.eval_E, - }, - ]; - - let batched_u = batch_eval_verify( - u_vec, - &mut transcript, - &self.sc_proof_batch, - &self.evals_batch, - )?; - - // verify - EE::verify( - &vk.vk_ee, - &mut transcript, - &batched_u.c, - &batched_u.x, - &batched_u.e, - &self.eval_arg, - )?; - - Ok(()) - } -} - -/// Reduces a batch of polynomial evaluation claims using Sumcheck -/// to a single claim at the same point. -/// -/// # Details -/// -/// We are given as input a list of instance/witness pairs -/// u = [(Cᵢ, xᵢ, eᵢ)], w = [Pᵢ], such that -/// - nᵢ = |xᵢ| -/// - Cᵢ = Commit(Pᵢ) -/// - eᵢ = Pᵢ(xᵢ) -/// - |Pᵢ| = 2^nᵢ -/// -/// We allow the polynomial Pᵢ to have different sizes, by appropriately scaling -/// the claims and resulting evaluations from Sumcheck. -pub(in crate::spartan) fn batch_eval_reduce( - u_vec: Vec>, - w_vec: &[PolyEvalWitness], - transcript: &mut E::TE, -) -> Result< - ( - PolyEvalInstance, - PolyEvalWitness, - SumcheckProof, - Vec, - ), - NovaError, -> { - let num_claims = u_vec.len(); - assert_eq!(w_vec.len(), num_claims); - - // Compute nᵢ and n = maxᵢ{nᵢ} - let num_rounds = u_vec.iter().map(|u| u.x.len()).collect::>(); - - // Check polynomials match number of variables, i.e. |Pᵢ| = 2^nᵢ - zip_with_for_each!(iter, (w_vec, num_rounds), |w, num_vars| assert_eq!( - w.p.len(), - 1 << num_vars - )); - - // generate a challenge, and powers of it for random linear combination - let rho = transcript.squeeze(b"r")?; - let powers_of_rho = powers(&rho, num_claims); - - let (claims, u_xs, comms): (Vec<_>, Vec<_>, Vec<_>) = - u_vec.into_iter().map(|u| (u.e, u.x, u.c)).multiunzip(); - - // Create clones of polynomials to be given to Sumcheck - // Pᵢ(X) - let polys_P: Vec> = w_vec - .iter() - .map(|w| MultilinearPolynomial::new(w.p.clone())) - .collect(); - // eq(xᵢ, X) - let polys_eq: Vec> = u_xs - .into_iter() - .map(|ux| MultilinearPolynomial::new(EqPolynomial::evals_from_points(&ux))) - .collect(); - - // For each i, check eᵢ = ∑ₓ Pᵢ(x)eq(xᵢ,x), where x ∈ {0,1}^nᵢ - let comb_func = |poly_P: &E::Scalar, poly_eq: &E::Scalar| -> E::Scalar { *poly_P * *poly_eq }; - let (sc_proof_batch, r, claims_batch) = SumcheckProof::prove_quad_batch( - &claims, - &num_rounds, - polys_P, - polys_eq, - &powers_of_rho, - comb_func, - transcript, - )?; - - let (claims_batch_left, _): (Vec, Vec) = claims_batch; - - transcript.absorb(b"l", &claims_batch_left.as_slice()); - - // we now combine evaluation claims at the same point r into one - let gamma = transcript.squeeze(b"g")?; - - let u_joint = - PolyEvalInstance::batch_diff_size(&comms, &claims_batch_left, &num_rounds, r, gamma); - - // P = ∑ᵢ γⁱ⋅Pᵢ - let w_joint = - PolyEvalWitness::batch_diff_size(&w_vec.iter().by_ref().collect::>(), gamma); - - Ok((u_joint, w_joint, sc_proof_batch, claims_batch_left)) -} - -/// Verifies a batch of polynomial evaluation claims using Sumcheck -/// reducing them to a single claim at the same point. -pub(in crate::spartan) fn batch_eval_verify( - u_vec: Vec>, - transcript: &mut E::TE, - sc_proof_batch: &SumcheckProof, - evals_batch: &[E::Scalar], -) -> Result, NovaError> { - let num_claims = u_vec.len(); - assert_eq!(evals_batch.len(), num_claims); - - // generate a challenge - let rho = transcript.squeeze(b"r")?; - let powers_of_rho = powers(&rho, num_claims); - - // Compute nᵢ and n = maxᵢ{nᵢ} - let num_rounds = u_vec.iter().map(|u| u.x.len()).collect::>(); - let num_rounds_max = *num_rounds.iter().max().unwrap(); - - let claims = u_vec.iter().map(|u| u.e).collect::>(); - - let (claim_batch_final, r) = - sc_proof_batch.verify_batch(&claims, &num_rounds, &powers_of_rho, 2, transcript)?; - - let claim_batch_final_expected = { - let evals_r = u_vec.iter().map(|u| { - let (_, r_hi) = r.split_at(num_rounds_max - u.x.len()); - EqPolynomial::new(r_hi.to_vec()).evaluate(&u.x) - }); - - zip_with!( - (evals_r, evals_batch.iter(), powers_of_rho.iter()), - |e_i, p_i, rho_i| e_i * *p_i * rho_i - ) - .sum() - }; - - if claim_batch_final != claim_batch_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - transcript.absorb(b"l", &evals_batch); - - // we now combine evaluation claims at the same point r into one - let gamma = transcript.squeeze(b"g")?; - - let comms = u_vec.into_iter().map(|u| u.c).collect::>(); - - let u_joint = PolyEvalInstance::batch_diff_size(&comms, evals_batch, &num_rounds, r, gamma); - - Ok(u_joint) -} diff --git a/src/spartan/sumcheck/engine.rs b/src/spartan/sumcheck/engine.rs deleted file mode 100644 index 5e41d03..0000000 --- a/src/spartan/sumcheck/engine.rs +++ /dev/null @@ -1,630 +0,0 @@ -use ff::Field; -use rayon::prelude::*; - -use crate::{ - provider::util::field::batch_invert, - spartan::{ - math::Math, - polys::{ - eq::EqPolynomial, masked_eq::MaskedEqPolynomial, multilinear::MultilinearPolynomial, - power::PowPolynomial, - }, - sumcheck::SumcheckProof, - }, - traits::commitment::CommitmentEngineTrait, - Commitment, CommitmentKey, Engine, NovaError, -}; - -/// Defines a trait for implementing sum-check in a generic manner -pub trait SumcheckEngine: Send + Sync { - /// returns the initial claims - fn initial_claims(&self) -> Vec; - - /// degree of the sum-check polynomial - fn degree(&self) -> usize; - - /// the size of the polynomials - fn size(&self) -> usize; - - /// returns evaluation points at 0, 2, d-1 (where d is the degree of the - /// sum-check polynomial) - fn evaluation_points(&self) -> Vec>; - - /// bounds a variable in the constituent polynomials - fn bound(&mut self, r: &E::Scalar); - - /// returns the final claims - fn final_claims(&self) -> Vec>; -} - -/// The [`WitnessBoundSumcheck`] ensures that the witness polynomial W defined -/// over n = log(N) variables, is zero outside of the first `num_vars = 2^m` -/// entries. -/// -/// # Details -/// -/// The `W` polynomial is padded with zeros to size N = 2^n. -/// The `masked_eq` polynomials is defined as with regards to a random challenge -/// `tau` as the eq(tau) polynomial, where the first 2^m evaluations to 0. -/// -/// The instance is given by -/// `0 = ∑_{0≤i<2^n} masked_eq[i] * W[i]`. -/// It is equivalent to the expression -/// `0 = ∑_{2^m≤i<2^n} eq[i] * W[i]` -/// Since `eq` is random, the instance is only satisfied if `W[2^{m}..] = 0`. -pub(in crate::spartan) struct WitnessBoundSumcheck { - poly_W: MultilinearPolynomial, - poly_masked_eq: MultilinearPolynomial, -} - -impl WitnessBoundSumcheck { - pub fn new(tau: E::Scalar, poly_W_padded: Vec, num_vars: usize) -> Self { - let num_vars_log = num_vars.log_2(); - // When num_vars = num_rounds, we shouldn't have to prove anything - // but we still want this instance to compute the evaluation of W - let num_rounds = poly_W_padded.len().log_2(); - assert!(num_vars_log < num_rounds); - - let tau_coords = PowPolynomial::new(&tau, num_rounds).coordinates(); - let poly_masked_eq_evals = - MaskedEqPolynomial::new(&EqPolynomial::new(tau_coords), num_vars_log).evals(); - - Self { - poly_W: MultilinearPolynomial::new(poly_W_padded), - poly_masked_eq: MultilinearPolynomial::new(poly_masked_eq_evals), - } - } -} -impl SumcheckEngine for WitnessBoundSumcheck { - fn initial_claims(&self) -> Vec { - vec![E::Scalar::ZERO] - } - - fn degree(&self) -> usize { - 3 - } - - fn size(&self) -> usize { - assert_eq!(self.poly_W.len(), self.poly_masked_eq.len()); - self.poly_W.len() - } - - fn evaluation_points(&self) -> Vec> { - let comb_func = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - _: &E::Scalar| - -> E::Scalar { *poly_A_comp * *poly_B_comp }; - - let (eval_point_0, eval_point_2, eval_point_3) = - SumcheckProof::::compute_eval_points_cubic( - &self.poly_masked_eq, - &self.poly_W, - &self.poly_W, // unused - &comb_func, - ); - - vec![vec![eval_point_0, eval_point_2, eval_point_3]] - } - - fn bound(&mut self, r: &E::Scalar) { - [&mut self.poly_W, &mut self.poly_masked_eq] - .par_iter_mut() - .for_each(|poly| poly.bind_poly_var_top(r)); - } - - fn final_claims(&self) -> Vec> { - vec![vec![self.poly_W[0], self.poly_masked_eq[0]]] - } -} - -pub(in crate::spartan) struct MemorySumcheckInstance { - // row - w_plus_r_row: MultilinearPolynomial, - t_plus_r_row: MultilinearPolynomial, - t_plus_r_inv_row: MultilinearPolynomial, - w_plus_r_inv_row: MultilinearPolynomial, - ts_row: MultilinearPolynomial, - - // col - w_plus_r_col: MultilinearPolynomial, - t_plus_r_col: MultilinearPolynomial, - t_plus_r_inv_col: MultilinearPolynomial, - w_plus_r_inv_col: MultilinearPolynomial, - ts_col: MultilinearPolynomial, - - // eq - poly_eq: MultilinearPolynomial, - - // zero polynomial - poly_zero: MultilinearPolynomial, -} - -impl MemorySumcheckInstance { - /// Computes witnesses for `MemoryInstanceSumcheck` - /// - /// # Description - /// We use the logUp protocol to prove that - /// ∑ TS[i]/(T[i] + r) - 1/(W[i] + r) = 0 - /// where - /// T_row[i] = mem_row[i] * gamma + i - /// = eq(tau)[i] * gamma + i - /// W_row[i] = L_row[i] * gamma + addr_row[i] - /// = eq(tau)[row[i]] * gamma + addr_row[i] - /// T_col[i] = mem_col[i] * gamma + i - /// = z[i] * gamma + i - /// W_col[i] = addr_col[i] * gamma + addr_col[i] - /// = z[col[i]] * gamma + addr_col[i] - /// and - /// `TS_row`, `TS_col` are integer-valued vectors representing the number - /// of reads to each memory cell of `L_row`, `L_col` - /// - /// The function returns oracles for the polynomials TS[i]/(T[i] + r), - /// 1/(W[i] + r), as well as auxiliary polynomials T[i] + r, W[i] + r - pub fn compute_oracles( - ck: &CommitmentKey, - r: &E::Scalar, - gamma: &E::Scalar, - mem_row: &[E::Scalar], - addr_row: &[E::Scalar], - L_row: &[E::Scalar], - ts_row: &[E::Scalar], - mem_col: &[E::Scalar], - addr_col: &[E::Scalar], - L_col: &[E::Scalar], - ts_col: &[E::Scalar], - ) -> Result<([Commitment; 4], [Vec; 4], [Vec; 4]), NovaError> { - // hash the tuples of (addr,val) memory contents and read responses into a - // single field element using `hash_func` - let hash_func_vec = |mem: &[E::Scalar], - addr: &[E::Scalar], - lookups: &[E::Scalar]| - -> (Vec, Vec) { - let hash_func = - |addr: &E::Scalar, val: &E::Scalar| -> E::Scalar { *val * gamma + *addr }; - assert_eq!(addr.len(), lookups.len()); - rayon::join( - || { - (0..mem.len()) - .map(|i| hash_func(&E::Scalar::from(i as u64), &mem[i])) - .collect::>() - }, - || { - (0..addr.len()) - .map(|i| hash_func(&addr[i], &lookups[i])) - .collect::>() - }, - ) - }; - - let ((T_row, W_row), (T_col, W_col)) = rayon::join( - || hash_func_vec(mem_row, addr_row, L_row), - || hash_func_vec(mem_col, addr_col, L_col), - ); - - // compute vectors TS[i]/(T[i] + r) and 1/(W[i] + r) - let helper = |T: &[E::Scalar], - W: &[E::Scalar], - TS: &[E::Scalar], - r: &E::Scalar| - -> ( - ( - Result, NovaError>, - Result, NovaError>, - ), - (Vec, Vec), - ) { - rayon::join( - || { - rayon::join( - || { - let inv = - batch_invert(T.par_iter().map(|e| *e + *r).collect::>())?; - - // compute inv[i] * TS[i] in parallel - Ok( - zip_with!((inv.into_par_iter(), TS.par_iter()), |e1, e2| e1 * *e2) - .collect::>(), - ) - }, - || batch_invert(W.par_iter().map(|e| *e + *r).collect::>()), - ) - }, - || { - rayon::join( - || T.par_iter().map(|e| *e + *r).collect(), - || W.par_iter().map(|e| *e + *r).collect(), - ) - }, - ) - }; - - let ( - ((t_plus_r_inv_row, w_plus_r_inv_row), (t_plus_r_row, w_plus_r_row)), - ((t_plus_r_inv_col, w_plus_r_inv_col), (t_plus_r_col, w_plus_r_col)), - ) = rayon::join( - || helper(&T_row, &W_row, ts_row, r), - || helper(&T_col, &W_col, ts_col, r), - ); - - let t_plus_r_inv_row = t_plus_r_inv_row?; - let w_plus_r_inv_row = w_plus_r_inv_row?; - let t_plus_r_inv_col = t_plus_r_inv_col?; - let w_plus_r_inv_col = w_plus_r_inv_col?; - - let ( - (comm_t_plus_r_inv_row, comm_w_plus_r_inv_row), - (comm_t_plus_r_inv_col, comm_w_plus_r_inv_col), - ) = rayon::join( - || { - rayon::join( - || E::CE::commit(ck, &t_plus_r_inv_row), - || E::CE::commit(ck, &w_plus_r_inv_row), - ) - }, - || { - rayon::join( - || E::CE::commit(ck, &t_plus_r_inv_col), - || E::CE::commit(ck, &w_plus_r_inv_col), - ) - }, - ); - - let comm_vec = [ - comm_t_plus_r_inv_row, - comm_w_plus_r_inv_row, - comm_t_plus_r_inv_col, - comm_w_plus_r_inv_col, - ]; - - let poly_vec = [ - t_plus_r_inv_row, - w_plus_r_inv_row, - t_plus_r_inv_col, - w_plus_r_inv_col, - ]; - - let aux_poly_vec = [t_plus_r_row, w_plus_r_row, t_plus_r_col, w_plus_r_col]; - - Ok((comm_vec, poly_vec, aux_poly_vec)) - } - - pub fn new( - polys_oracle: [Vec; 4], - polys_aux: [Vec; 4], - poly_eq: Vec, - ts_row: Vec, - ts_col: Vec, - ) -> Self { - let [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] = polys_oracle; - let [t_plus_r_row, w_plus_r_row, t_plus_r_col, w_plus_r_col] = polys_aux; - - let zero = vec![E::Scalar::ZERO; poly_eq.len()]; - - Self { - w_plus_r_row: MultilinearPolynomial::new(w_plus_r_row), - t_plus_r_row: MultilinearPolynomial::new(t_plus_r_row), - t_plus_r_inv_row: MultilinearPolynomial::new(t_plus_r_inv_row), - w_plus_r_inv_row: MultilinearPolynomial::new(w_plus_r_inv_row), - ts_row: MultilinearPolynomial::new(ts_row), - w_plus_r_col: MultilinearPolynomial::new(w_plus_r_col), - t_plus_r_col: MultilinearPolynomial::new(t_plus_r_col), - t_plus_r_inv_col: MultilinearPolynomial::new(t_plus_r_inv_col), - w_plus_r_inv_col: MultilinearPolynomial::new(w_plus_r_inv_col), - ts_col: MultilinearPolynomial::new(ts_col), - poly_eq: MultilinearPolynomial::new(poly_eq), - poly_zero: MultilinearPolynomial::new(zero), - } - } -} - -impl SumcheckEngine for MemorySumcheckInstance { - fn initial_claims(&self) -> Vec { - vec![E::Scalar::ZERO; 6] - } - - fn degree(&self) -> usize { - 3 - } - - fn size(&self) -> usize { - // sanity checks - assert_eq!(self.w_plus_r_row.len(), self.t_plus_r_row.len()); - assert_eq!(self.w_plus_r_row.len(), self.ts_row.len()); - assert_eq!(self.w_plus_r_row.len(), self.w_plus_r_col.len()); - assert_eq!(self.w_plus_r_row.len(), self.t_plus_r_col.len()); - assert_eq!(self.w_plus_r_row.len(), self.ts_col.len()); - - self.w_plus_r_row.len() - } - - fn evaluation_points(&self) -> Vec> { - let comb_func = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - _poly_C_comp: &E::Scalar| - -> E::Scalar { *poly_A_comp - *poly_B_comp }; - - let comb_func2 = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar, - _poly_D_comp: &E::Scalar| - -> E::Scalar { - *poly_A_comp * (*poly_B_comp * *poly_C_comp - E::Scalar::ONE) - }; - - let comb_func3 = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar, - poly_D_comp: &E::Scalar| - -> E::Scalar { - *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) - }; - - // inv related evaluation points - // 0 = ∑ TS[i]/(T[i] + r) - 1/(W[i] + r) - let (eval_inv_0_row, eval_inv_2_row, eval_inv_3_row) = - SumcheckProof::::compute_eval_points_cubic( - &self.t_plus_r_inv_row, - &self.w_plus_r_inv_row, - &self.poly_zero, - &comb_func, - ); - - let (eval_inv_0_col, eval_inv_2_col, eval_inv_3_col) = - SumcheckProof::::compute_eval_points_cubic( - &self.t_plus_r_inv_col, - &self.w_plus_r_inv_col, - &self.poly_zero, - &comb_func, - ); - - // row related evaluation points - // 0 = ∑ eq[i] * (inv_T[i] * (T[i] + r) - TS[i])) - let (eval_T_0_row, eval_T_2_row, eval_T_3_row) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( - &self.poly_eq, - &self.t_plus_r_inv_row, - &self.t_plus_r_row, - &self.ts_row, - &comb_func3, - ); - // 0 = ∑ eq[i] * (inv_W[i] * (T[i] + r) - 1)) - let (eval_W_0_row, eval_W_2_row, eval_W_3_row) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( - &self.poly_eq, - &self.w_plus_r_inv_row, - &self.w_plus_r_row, - &self.poly_zero, - &comb_func2, - ); - - // column related evaluation points - let (eval_T_0_col, eval_T_2_col, eval_T_3_col) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( - &self.poly_eq, - &self.t_plus_r_inv_col, - &self.t_plus_r_col, - &self.ts_col, - &comb_func3, - ); - let (eval_W_0_col, eval_W_2_col, eval_W_3_col) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( - &self.poly_eq, - &self.w_plus_r_inv_col, - &self.w_plus_r_col, - &self.poly_zero, - &comb_func2, - ); - - vec![ - vec![eval_inv_0_row, eval_inv_2_row, eval_inv_3_row], - vec![eval_inv_0_col, eval_inv_2_col, eval_inv_3_col], - vec![eval_T_0_row, eval_T_2_row, eval_T_3_row], - vec![eval_W_0_row, eval_W_2_row, eval_W_3_row], - vec![eval_T_0_col, eval_T_2_col, eval_T_3_col], - vec![eval_W_0_col, eval_W_2_col, eval_W_3_col], - ] - } - - fn bound(&mut self, r: &E::Scalar) { - [ - &mut self.t_plus_r_row, - &mut self.t_plus_r_inv_row, - &mut self.w_plus_r_row, - &mut self.w_plus_r_inv_row, - &mut self.ts_row, - &mut self.t_plus_r_col, - &mut self.t_plus_r_inv_col, - &mut self.w_plus_r_col, - &mut self.w_plus_r_inv_col, - &mut self.ts_col, - &mut self.poly_eq, - ] - .par_iter_mut() - .for_each(|poly| poly.bind_poly_var_top(r)); - } - - fn final_claims(&self) -> Vec> { - let poly_row_final = vec![ - self.t_plus_r_inv_row[0], - self.w_plus_r_inv_row[0], - self.ts_row[0], - ]; - - let poly_col_final = vec![ - self.t_plus_r_inv_col[0], - self.w_plus_r_inv_col[0], - self.ts_col[0], - ]; - - vec![poly_row_final, poly_col_final] - } -} - -pub(in crate::spartan) struct OuterSumcheckInstance { - poly_tau: MultilinearPolynomial, - poly_Az: MultilinearPolynomial, - poly_Bz: MultilinearPolynomial, - poly_uCz_E: MultilinearPolynomial, - - poly_Mz: MultilinearPolynomial, - eval_Mz_at_tau: E::Scalar, - - poly_zero: MultilinearPolynomial, -} - -impl OuterSumcheckInstance { - pub fn new( - tau: Vec, - Az: Vec, - Bz: Vec, - uCz_E: Vec, - Mz: Vec, - eval_Mz_at_tau: &E::Scalar, - ) -> Self { - let zero = vec![E::Scalar::ZERO; tau.len()]; - Self { - poly_tau: MultilinearPolynomial::new(tau), - poly_Az: MultilinearPolynomial::new(Az), - poly_Bz: MultilinearPolynomial::new(Bz), - poly_uCz_E: MultilinearPolynomial::new(uCz_E), - poly_Mz: MultilinearPolynomial::new(Mz), - eval_Mz_at_tau: *eval_Mz_at_tau, - poly_zero: MultilinearPolynomial::new(zero), - } - } -} - -impl SumcheckEngine for OuterSumcheckInstance { - fn initial_claims(&self) -> Vec { - vec![E::Scalar::ZERO, self.eval_Mz_at_tau] - } - - fn degree(&self) -> usize { - 3 - } - - fn size(&self) -> usize { - assert_eq!(self.poly_tau.len(), self.poly_Az.len()); - assert_eq!(self.poly_tau.len(), self.poly_Bz.len()); - assert_eq!(self.poly_tau.len(), self.poly_uCz_E.len()); - assert_eq!(self.poly_tau.len(), self.poly_Mz.len()); - self.poly_tau.len() - } - - fn evaluation_points(&self) -> Vec> { - let comb_func = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar, - poly_D_comp: &E::Scalar| - -> E::Scalar { - *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) - }; - - let (eval_point_h_0, eval_point_h_2, eval_point_h_3) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( - &self.poly_tau, - &self.poly_Az, - &self.poly_Bz, - &self.poly_uCz_E, - &comb_func, - ); - - let comb_func2 = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - _poly_C_comp: &E::Scalar| - -> E::Scalar { *poly_A_comp * *poly_B_comp }; - - let (eval_point_e_0, eval_point_e_2, eval_point_e_3) = - SumcheckProof::::compute_eval_points_cubic( - &self.poly_tau, - &self.poly_Mz, - &self.poly_zero, - &comb_func2, - ); - - vec![ - vec![eval_point_h_0, eval_point_h_2, eval_point_h_3], - vec![eval_point_e_0, eval_point_e_2, eval_point_e_3], - ] - } - - fn bound(&mut self, r: &E::Scalar) { - [ - &mut self.poly_tau, - &mut self.poly_Az, - &mut self.poly_Bz, - &mut self.poly_uCz_E, - &mut self.poly_Mz, - ] - .par_iter_mut() - .for_each(|poly| poly.bind_poly_var_top(r)); - } - - fn final_claims(&self) -> Vec> { - vec![vec![self.poly_Az[0], self.poly_Bz[0]]] - } -} - -pub(in crate::spartan) struct InnerSumcheckInstance { - pub(in crate::spartan) claim: E::Scalar, - pub(in crate::spartan) poly_L_row: MultilinearPolynomial, - pub(in crate::spartan) poly_L_col: MultilinearPolynomial, - pub(in crate::spartan) poly_val: MultilinearPolynomial, -} -impl InnerSumcheckInstance { - pub fn new( - claim: E::Scalar, - poly_L_row: MultilinearPolynomial, - poly_L_col: MultilinearPolynomial, - poly_val: MultilinearPolynomial, - ) -> Self { - Self { - claim, - poly_L_row, - poly_L_col, - poly_val, - } - } -} -impl SumcheckEngine for InnerSumcheckInstance { - fn initial_claims(&self) -> Vec { - vec![self.claim] - } - - fn degree(&self) -> usize { - 3 - } - - fn size(&self) -> usize { - assert_eq!(self.poly_L_row.len(), self.poly_val.len()); - assert_eq!(self.poly_L_row.len(), self.poly_L_col.len()); - self.poly_L_row.len() - } - - fn evaluation_points(&self) -> Vec> { - let (poly_A, poly_B, poly_C) = (&self.poly_L_row, &self.poly_L_col, &self.poly_val); - let comb_func = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar| - -> E::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; - - let (eval_point_0, eval_point_2, eval_point_3) = - SumcheckProof::::compute_eval_points_cubic(poly_A, poly_B, poly_C, &comb_func); - - vec![vec![eval_point_0, eval_point_2, eval_point_3]] - } - - fn bound(&mut self, r: &E::Scalar) { - [ - &mut self.poly_L_row, - &mut self.poly_L_col, - &mut self.poly_val, - ] - .par_iter_mut() - .for_each(|poly| poly.bind_poly_var_top(r)); - } - - fn final_claims(&self) -> Vec> { - vec![vec![self.poly_L_row[0], self.poly_L_col[0]]] - } -} diff --git a/src/spartan/sumcheck/mod.rs b/src/spartan/sumcheck/mod.rs deleted file mode 100644 index 3a0cde4..0000000 --- a/src/spartan/sumcheck/mod.rs +++ /dev/null @@ -1,632 +0,0 @@ -use ff::Field; -use itertools::Itertools as _; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - spartan::polys::{ - multilinear::MultilinearPolynomial, - univariate::{CompressedUniPoly, UniPoly}, - }, - traits::{Engine, TranscriptEngineTrait}, -}; - -pub(in crate::spartan) mod engine; - -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub(crate) struct SumcheckProof { - compressed_polys: Vec>, -} - -impl SumcheckProof { - pub fn new(compressed_polys: Vec>) -> Self { - Self { compressed_polys } - } - - pub fn verify( - &self, - claim: E::Scalar, - num_rounds: usize, - degree_bound: usize, - transcript: &mut E::TE, - ) -> Result<(E::Scalar, Vec), NovaError> { - let mut e = claim; - let mut r: Vec = Vec::new(); - - // verify that there is a univariate polynomial for each round - if self.compressed_polys.len() != num_rounds { - return Err(NovaError::InvalidSumcheckProof); - } - - for i in 0..self.compressed_polys.len() { - let poly = self.compressed_polys[i].decompress(&e); - - // verify degree bound - if poly.degree() != degree_bound { - return Err(NovaError::InvalidSumcheckProof); - } - - // we do not need to check if poly(0) + poly(1) = e, as - // decompress() call above already ensures that holds - debug_assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - - r.push(r_i); - - // evaluate the claimed degree-ell polynomial at r_i - e = poly.evaluate(&r_i); - } - - Ok((e, r)) - } - - pub fn verify_batch( - &self, - claims: &[E::Scalar], - num_rounds: &[usize], - coeffs: &[E::Scalar], - degree_bound: usize, - transcript: &mut E::TE, - ) -> Result<(E::Scalar, Vec), NovaError> { - let num_instances = claims.len(); - assert_eq!(num_rounds.len(), num_instances); - assert_eq!(coeffs.len(), num_instances); - - // n = maxᵢ{nᵢ} - let num_rounds_max = *num_rounds.iter().max().unwrap(); - - // Random linear combination of claims, - // where each claim is scaled by 2^{n-nᵢ} to account for the padding. - // - // claim = ∑ᵢ coeffᵢ⋅2^{n-nᵢ}⋅cᵢ - let claim = zip_with!( - ( - zip_with!(iter, (claims, num_rounds), |claim, num_rounds| { - let scaling_factor = 1 << (num_rounds_max - num_rounds); - E::Scalar::from(scaling_factor as u64) * claim - }), - coeffs.iter() - ), - |scaled_claim, coeff| scaled_claim * coeff - ) - .sum(); - - self.verify(claim, num_rounds_max, degree_bound, transcript) - } - - #[inline] - fn compute_eval_points_quad( - poly_A: &MultilinearPolynomial, - poly_B: &MultilinearPolynomial, - comb_func: &F, - ) -> (E::Scalar, E::Scalar) - where - F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let len = poly_A.len() / 2; - (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let eval_point_2 = comb_func(&poly_A_bound_point, &poly_B_bound_point); - (eval_point_0, eval_point_2) - }) - .reduce( - || (E::Scalar::ZERO, E::Scalar::ZERO), - |a, b| (a.0 + b.0, a.1 + b.1), - ) - } - - pub fn prove_quad( - claim: &E::Scalar, - num_rounds: usize, - poly_A: &mut MultilinearPolynomial, - poly_B: &mut MultilinearPolynomial, - comb_func: F, - transcript: &mut E::TE, - ) -> Result<(Self, Vec, Vec), NovaError> - where - F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); - let mut claim_per_round = *claim; - for _ in 0..num_rounds { - let poly = { - let (eval_point_0, eval_point_2) = - Self::compute_eval_points_quad(poly_A, poly_B, &comb_func); - - let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2]; - UniPoly::from_evals(&evals) - }; - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - polys.push(poly.compress()); - - // Set up next round - claim_per_round = poly.evaluate(&r_i); - - // bind all tables to the verifier's challenge - rayon::join( - || poly_A.bind_poly_var_top(&r_i), - || poly_B.bind_poly_var_top(&r_i), - ); - } - - Ok(( - Self { - compressed_polys: polys, - }, - r, - vec![poly_A[0], poly_B[0]], - )) - } - - pub fn prove_quad_batch( - claims: &[E::Scalar], - num_rounds: &[usize], - mut poly_A_vec: Vec>, - mut poly_B_vec: Vec>, - coeffs: &[E::Scalar], - comb_func: F, - transcript: &mut E::TE, - ) -> Result<(Self, Vec, (Vec, Vec)), NovaError> - where - F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let num_claims = claims.len(); - - assert_eq!(num_rounds.len(), num_claims); - assert_eq!(poly_A_vec.len(), num_claims); - assert_eq!(poly_B_vec.len(), num_claims); - assert_eq!(coeffs.len(), num_claims); - - for (i, &num_rounds) in num_rounds.iter().enumerate() { - let expected_size = 1 << num_rounds; - - // Direct indexing with the assumption that the index will always be in bounds - let a = &poly_A_vec[i]; - let b = &poly_B_vec[i]; - - for (l, polyname) in [(a.len(), "poly_A_vec"), (b.len(), "poly_B_vec")].iter() { - assert_eq!( - *l, expected_size, - "Mismatch in size for {} at index {}", - polyname, i - ); - } - } - - let num_rounds_max = *num_rounds.iter().max().unwrap(); - let mut e = zip_with!( - iter, - (claims, num_rounds, coeffs), - |claim, num_rounds, coeff| { - let scaled_claim = - E::Scalar::from((1 << (num_rounds_max - num_rounds)) as u64) * claim; - scaled_claim * coeff - } - ) - .sum(); - let mut r: Vec = Vec::new(); - let mut quad_polys: Vec> = Vec::new(); - - for current_round in 0..num_rounds_max { - let remaining_rounds = num_rounds_max - current_round; - let evals: Vec<(E::Scalar, E::Scalar)> = zip_with!( - par_iter, - (num_rounds, claims, poly_A_vec, poly_B_vec), - |num_rounds, claim, poly_A, poly_B| { - if remaining_rounds <= *num_rounds { - Self::compute_eval_points_quad(poly_A, poly_B, &comb_func) - } else { - let remaining_variables = remaining_rounds - num_rounds - 1; - let scaled_claim = - E::Scalar::from((1 << remaining_variables) as u64) * claim; - (scaled_claim, scaled_claim) - } - } - ) - .collect(); - - let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum(); - let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum(); - - let evals = vec![evals_combined_0, e - evals_combined_0, evals_combined_2]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - - // bound all tables to the verifier's challenge - zip_with_for_each!( - ( - num_rounds.par_iter(), - poly_A_vec.par_iter_mut(), - poly_B_vec.par_iter_mut() - ), - |num_rounds, poly_A, poly_B| { - if remaining_rounds <= *num_rounds { - let _ = rayon::join( - || poly_A.bind_poly_var_top(&r_i), - || poly_B.bind_poly_var_top(&r_i), - ); - } - } - ); - - e = poly.evaluate(&r_i); - quad_polys.push(poly.compress()); - } - poly_A_vec.iter().for_each(|p| assert_eq!(p.len(), 1)); - poly_B_vec.iter().for_each(|p| assert_eq!(p.len(), 1)); - - let poly_A_final = poly_A_vec - .into_iter() - .map(|poly| poly[0]) - .collect::>(); - let poly_B_final = poly_B_vec - .into_iter() - .map(|poly| poly[0]) - .collect::>(); - - let eval_expected = zip_with!( - iter, - (poly_A_final, poly_B_final, coeffs), - |eA, eB, coeff| comb_func(eA, eB) * coeff - ) - .sum::(); - assert_eq!(e, eval_expected); - - let claims_prod = (poly_A_final, poly_B_final); - - Ok((Self::new(quad_polys), r, claims_prod)) - } - - #[inline] - fn compute_eval_points_cubic( - poly_A: &MultilinearPolynomial, - poly_B: &MultilinearPolynomial, - poly_C: &MultilinearPolynomial, - comb_func: &F, - ) -> (E::Scalar, E::Scalar, E::Scalar) - where - F: Fn(&E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let len = poly_A.len() / 2; - (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); - - let poly_A_right_term = poly_A[len + i] - poly_A[i]; - let poly_B_right_term = poly_B[len + i] - poly_B[i]; - let poly_C_right_term = poly_C[len + i] - poly_C[i]; - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A_right_term; - let poly_B_bound_point = poly_B[len + i] + poly_B_right_term; - let poly_C_bound_point = poly_C[len + i] + poly_C_right_term; - let eval_point_2 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with - // bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A_right_term; - let poly_B_bound_point = poly_B_bound_point + poly_B_right_term; - let poly_C_bound_point = poly_C_bound_point + poly_C_right_term; - let eval_point_3 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - (eval_point_0, eval_point_2, eval_point_3) - }) - .reduce( - || (E::Scalar::ZERO, E::Scalar::ZERO, E::Scalar::ZERO), - |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), - ) - } - - #[inline] - fn compute_eval_points_cubic_with_additive_term( - poly_A: &MultilinearPolynomial, - poly_B: &MultilinearPolynomial, - poly_C: &MultilinearPolynomial, - poly_D: &MultilinearPolynomial, - comb_func: &F, - ) -> (E::Scalar, E::Scalar, E::Scalar) - where - F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let len = poly_A.len() / 2; - (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); - - let poly_A_right_term = poly_A[len + i] - poly_A[i]; - let poly_B_right_term = poly_B[len + i] - poly_B[i]; - let poly_C_right_term = poly_C[len + i] - poly_C[i]; - let poly_D_right_term = poly_D[len + i] - poly_D[i]; - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A_right_term; - let poly_B_bound_point = poly_B[len + i] + poly_B_right_term; - let poly_C_bound_point = poly_C[len + i] + poly_C_right_term; - let poly_D_bound_point = poly_D[len + i] + poly_D_right_term; - let eval_point_2 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with - // bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A_right_term; - let poly_B_bound_point = poly_B_bound_point + poly_B_right_term; - let poly_C_bound_point = poly_C_bound_point + poly_C_right_term; - let poly_D_bound_point = poly_D_bound_point + poly_D_right_term; - let eval_point_3 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - (eval_point_0, eval_point_2, eval_point_3) - }) - .reduce( - || (E::Scalar::ZERO, E::Scalar::ZERO, E::Scalar::ZERO), - |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), - ) - } - - pub fn prove_cubic_with_additive_term( - claim: &E::Scalar, - num_rounds: usize, - poly_A: &mut MultilinearPolynomial, - poly_B: &mut MultilinearPolynomial, - poly_C: &mut MultilinearPolynomial, - poly_D: &mut MultilinearPolynomial, - comb_func: F, - transcript: &mut E::TE, - ) -> Result<(Self, Vec, Vec), NovaError> - where - F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); - let mut claim_per_round = *claim; - - for _ in 0..num_rounds { - let poly = { - // Make an iterator returning the contributions to the evaluations - let (eval_point_0, eval_point_2, eval_point_3) = - Self::compute_eval_points_cubic_with_additive_term( - poly_A, poly_B, poly_C, poly_D, &comb_func, - ); - - let evals = vec![ - eval_point_0, - claim_per_round - eval_point_0, - eval_point_2, - eval_point_3, - ]; - UniPoly::from_evals(&evals) - }; - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - polys.push(poly.compress()); - - // Set up next round - claim_per_round = poly.evaluate(&r_i); - - // bound all tables to the verifier's challenge - rayon::join( - || { - rayon::join( - || poly_A.bind_poly_var_top(&r_i), - || poly_B.bind_poly_var_top(&r_i), - ) - }, - || { - rayon::join( - || poly_C.bind_poly_var_top(&r_i), - || poly_D.bind_poly_var_top(&r_i), - ) - }, - ); - } - - Ok(( - Self { - compressed_polys: polys, - }, - r, - vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]], - )) - } - - pub fn prove_cubic_with_additive_term_batch( - claims: &[E::Scalar], - num_rounds: &[usize], - mut poly_A_vec: Vec>, - mut poly_B_vec: Vec>, - mut poly_C_vec: Vec>, - mut poly_D_vec: Vec>, - coeffs: &[E::Scalar], - comb_func: F, - transcript: &mut E::TE, - ) -> Result<(Self, Vec, Vec>), NovaError> - where - F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let num_instances = claims.len(); - assert_eq!(num_rounds.len(), num_instances); - assert_eq!(coeffs.len(), num_instances); - assert_eq!(poly_A_vec.len(), num_instances); - assert_eq!(poly_B_vec.len(), num_instances); - assert_eq!(poly_C_vec.len(), num_instances); - assert_eq!(poly_D_vec.len(), num_instances); - - for (i, &num_rounds) in num_rounds.iter().enumerate() { - let expected_size = 1 << num_rounds; - - // Direct indexing with the assumption that the index will always be in bounds - let a = &poly_A_vec[i]; - let b = &poly_B_vec[i]; - let c = &poly_C_vec[i]; - let d = &poly_D_vec[i]; - - for (l, polyname) in [ - (a.len(), "poly_A"), - (b.len(), "poly_B"), - (c.len(), "poly_C"), - (d.len(), "poly_D"), - ] - .iter() - { - assert_eq!( - *l, expected_size, - "Mismatch in size for {} at index {}", - polyname, i - ); - } - } - - let num_rounds_max = *num_rounds.iter().max().unwrap(); - - let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); - let mut claim_per_round = zip_with!( - iter, - (claims, num_rounds, coeffs), - |claim, num_rounds, coeff| { - let scaled_claim = - E::Scalar::from((1 << (num_rounds_max - num_rounds)) as u64) * claim; - scaled_claim * *coeff - } - ) - .sum(); - - for current_round in 0..num_rounds_max { - let remaining_rounds = num_rounds_max - current_round; - let evals: Vec<(E::Scalar, E::Scalar, E::Scalar)> = zip_with!( - par_iter, - (num_rounds, claims, poly_A_vec, poly_B_vec, poly_C_vec, poly_D_vec), - |num_rounds, claim, poly_A, poly_B, poly_C, poly_D| { - if remaining_rounds <= *num_rounds { - Self::compute_eval_points_cubic_with_additive_term( - poly_A, poly_B, poly_C, poly_D, &comb_func, - ) - } else { - let remaining_variables = remaining_rounds - num_rounds - 1; - let scaled_claim = - E::Scalar::from((1 << remaining_variables) as u64) * claim; - (scaled_claim, scaled_claim, scaled_claim) - } - } - ) - .collect(); - - let evals_combined_0 = (0..num_instances).map(|i| evals[i].0 * coeffs[i]).sum(); - let evals_combined_2 = (0..num_instances).map(|i| evals[i].1 * coeffs[i]).sum(); - let evals_combined_3 = (0..num_instances).map(|i| evals[i].2 * coeffs[i]).sum(); - - let evals = vec![ - evals_combined_0, - claim_per_round - evals_combined_0, - evals_combined_2, - evals_combined_3, - ]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - - polys.push(poly.compress()); - - // Set up next round - claim_per_round = poly.evaluate(&r_i); - - // bound all the tables to the verifier's challenge - - zip_with_for_each!( - ( - num_rounds.par_iter(), - poly_A_vec.par_iter_mut(), - poly_B_vec.par_iter_mut(), - poly_C_vec.par_iter_mut(), - poly_D_vec.par_iter_mut() - ), - |num_rounds, poly_A, poly_B, poly_C, poly_D| { - if remaining_rounds <= *num_rounds { - let _ = rayon::join( - || { - rayon::join( - || poly_A.bind_poly_var_top(&r_i), - || poly_B.bind_poly_var_top(&r_i), - ) - }, - || { - rayon::join( - || poly_C.bind_poly_var_top(&r_i), - || poly_D.bind_poly_var_top(&r_i), - ) - }, - ); - } - } - ); - } - - let poly_A_final = poly_A_vec.into_iter().map(|poly| poly[0]).collect(); - let poly_B_final = poly_B_vec.into_iter().map(|poly| poly[0]).collect(); - let poly_C_final = poly_C_vec.into_iter().map(|poly| poly[0]).collect(); - let poly_D_final = poly_D_vec.into_iter().map(|poly| poly[0]).collect(); - - Ok(( - Self { - compressed_polys: polys, - }, - r, - vec![poly_A_final, poly_B_final, poly_C_final, poly_D_final], - )) - } -} diff --git a/src/supernova/circuit.rs b/src/supernova/circuit.rs deleted file mode 100644 index f9a3c05..0000000 --- a/src/supernova/circuit.rs +++ /dev/null @@ -1,838 +0,0 @@ -//! Supernova implementation support arbitrary argumented circuits and running -//! instances. There are two Verification Circuits for each argumented circuit: -//! The primary and the secondary. Each of them is over a cycle curve but -//! only the primary executes the next step of the computation. -//! Each circuit takes as input 2 hashes. -//! Each circuit folds the last invocation of the other into the respective -//! running instance, specified by `augmented_circuit_index` -//! -//! The augmented circuit F' for `SuperNova` that includes everything from Nova -//! and additionally checks: -//! 1. Ui[] are contained in X[0] hash pre-image. -//! 2. R1CS Instance u is folded into Ui[augmented_circuit_index] correctly; -//! just like Nova IVC. -//! 3. (optional by F logic) F circuit might check `program_counter_{i}` -//! invoked current F circuit is legal or not. -//! 3. F circuit produce `program_counter_{i+1}` and sent to next round to -//! optionally constraint the next F' argumented circuit. -use std::marker::PhantomData; - -use bellpepper::gadgets::{boolean_utils::conditionally_select_slice, Assignment}; -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, SynthesisError, -}; -use ff::{Field, PrimeField}; -use itertools::Itertools as _; -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::{NIO_NOVA_FOLD, NUM_HASH_BITS}, - gadgets::{ - alloc_num_equals, alloc_scalar_as_base, alloc_zero, - conditionally_select_alloc_relaxed_r1cs, - conditionally_select_vec_allocated_relaxed_r1cs_instance, le_bits_to_num, AllocatedPoint, - AllocatedR1CSInstance, AllocatedRelaxedR1CSInstance, - }, - r1cs::{R1CSInstance, RelaxedR1CSInstance}, - supernova::{ - num_ro_inputs, - utils::{get_from_vec_alloc_relaxed_r1cs, get_selector_vec_from_index}, - }, - traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, - zip_with, Commitment, -}; - -// NOTE: This trait below is actually useful outside of this if you want to -// implement a step circuit on your own type. We use it in our prover code. -// However, there is a conflicting "StepCircuit" in -// `crate::traits::circuit::StepCircuit` which I deleted. We should likely have -// a supertrait here for NIVC that provides the circuit index because we only -// want that when we are using NIVC. Program counter should be able to be put to -// `None` otherwise, or we could handle that slightly differently too - -/// A helper trait for a step of the incremental computation for `SuperNova` -/// (i.e., circuit for F) -- to be implemented by applications. -pub trait StepCircuit: Send + Sync + Clone { - /// Return the the number of inputs or outputs of each step - /// (this method is called only at circuit synthesis time) - /// `synthesize` and `output` methods are expected to take as - /// input a vector of size equal to arity and output a vector of size equal - /// to arity - fn arity(&self) -> usize; - - /// Return this `StepCircuit`'s assigned index, for use when enforcing the - /// program counter. - fn circuit_index(&self) -> usize; - - /// Synthesize the circuit for a computation step and return variable - /// that corresponds to the output of the step `pc_{i+1}` and `z_{i+1}` - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError>; -} - -// NOTES: This seems to just enforce that when we call a circuit at a given -// step, it matches the set program counter. - -/// A helper trait for a step of the incremental computation for `SuperNova` -/// (i.e., circuit for F) -- automatically implemented for `StepCircuit` and -/// used internally to enforce that the circuit selected by the program counter -/// is used at each step. -pub trait EnforcingStepCircuit: Send + Sync + Clone + StepCircuit { - /// Delegate synthesis to `StepCircuit::synthesize`, and additionally, - /// enforce the constraint that program counter `pc`, if supplied, is - /// equal to the circuit's assigned index. - fn enforcing_synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - if let Some(pc) = pc { - let circuit_index = F::from(self.circuit_index() as u64); - - // pc * 1 = circuit_index - cs.enforce( - || "pc matches circuit index", - |lc| lc + pc.get_variable(), - |lc| lc + CS::one(), - |lc| lc + (circuit_index, CS::one()), - ); - } - self.synthesize(cs, pc, z) - } -} - -impl> EnforcingStepCircuit for S {} - -/// A trivial step circuit that simply returns the input -/// NOTE: Should only be used as secondary circuit!!! -#[derive(Clone, Debug, Default)] -pub struct TrivialCircuit { - _p: PhantomData, -} - -impl StepCircuit for TrivialCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 1 - } - - /// This will not interfere with other circuit indices in the primary - /// circuit. - fn circuit_index(&self) -> usize { - 0 - } - - fn synthesize>( - &self, - _cs: &mut CS, - program_counter: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - Ok((program_counter.cloned(), z.to_vec())) - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SuperNovaAugmentedCircuitParams { - limb_width: usize, - n_limbs: usize, - is_primary_circuit: bool, // A boolean indicating if this is the primary circuit -} - -impl SuperNovaAugmentedCircuitParams { - pub const fn new(limb_width: usize, n_limbs: usize, is_primary_circuit: bool) -> Self { - Self { - limb_width, - n_limbs, - is_primary_circuit, - } - } - - pub fn get_n_limbs(&self) -> usize { - self.n_limbs - } -} - -#[derive(Debug)] -pub struct SuperNovaAugmentedCircuitInputs<'a, E: Engine> { - pp_digest: E::Scalar, - i: E::Base, - /// Input to the circuit for the base case - z0: &'a [E::Base], - /// Input to the circuit for the non-base case - zi: Option<&'a [E::Base]>, - /// List of `RelaxedR1CSInstance`. - /// `None` if this is the base case. - /// Elements are `None` if the circuit at that index was not yet executed. - U: Option<&'a [Option>]>, - /// R1CS proof to be folded into U - u: Option<&'a R1CSInstance>, - /// Nova folding proof for accumulating u into U[j] - T: Option<&'a Commitment>, - /// Index of the current circuit - program_counter: Option, - /// Index j of circuit being folded into U[j] - last_augmented_circuit_index: E::Base, -} - -impl<'a, E: Engine> SuperNovaAugmentedCircuitInputs<'a, E> { - /// Create new inputs/witness for the verification circuit - pub fn new( - pp_digest: E::Scalar, - i: E::Base, - z0: &'a [E::Base], - zi: Option<&'a [E::Base]>, - U: Option<&'a [Option>]>, - u: Option<&'a R1CSInstance>, - T: Option<&'a Commitment>, - program_counter: Option, - last_augmented_circuit_index: E::Base, - ) -> Self { - Self { - pp_digest, - i, - z0, - zi, - U, - u, - T, - program_counter, - last_augmented_circuit_index, - } - } -} - -/// The augmented circuit F' in `SuperNova` that includes a step circuit F -/// and the circuit for the verifier in `SuperNova`'s non-interactive folding -/// scheme, `SuperNova` NIFS will fold strictly r1cs instance u with respective -/// relaxed r1cs instance `U[last_augmented_circuit_index]` -pub struct SuperNovaAugmentedCircuit<'a, E: Engine, SC: EnforcingStepCircuit> { - params: &'a SuperNovaAugmentedCircuitParams, - ro_consts: ROConstantsCircuit, - inputs: Option>, - step_circuit: &'a SC, // The function that is applied for each step - num_augmented_circuits: usize, // number of overall augmented circuits -} - -impl<'a, E: Engine, SC: EnforcingStepCircuit> SuperNovaAugmentedCircuit<'a, E, SC> { - /// Create a new verification circuit for the input relaxed r1cs instances - pub const fn new( - params: &'a SuperNovaAugmentedCircuitParams, - inputs: Option>, - step_circuit: &'a SC, - ro_consts: ROConstantsCircuit, - num_augmented_circuits: usize, - ) -> Self { - Self { - params, - inputs, - step_circuit, - ro_consts, - num_augmented_circuits, - } - } - - /// Allocate all witnesses from the augmented function's non-deterministic - /// inputs. Optional entries are allocated as their default values. - fn alloc_witness::Base>>( - &self, - mut cs: CS, - arity: usize, - num_augmented_circuits: usize, - ) -> Result< - ( - AllocatedNum, - AllocatedNum, - Vec>, - Vec>, - Vec>, - AllocatedR1CSInstance, - AllocatedPoint, - Option>, - Vec, - ), - SynthesisError, - > { - let last_augmented_circuit_index = - AllocatedNum::alloc(cs.namespace(|| "last_augmented_circuit_index"), || { - Ok(self.inputs.get()?.last_augmented_circuit_index) - })?; - - // Allocate the params - let params = alloc_scalar_as_base::( - cs.namespace(|| "params"), - self.inputs.as_ref().map(|inputs| inputs.pp_digest), - )?; - - // Allocate i - let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; - - // Allocate program_counter only on primary circuit - let program_counter = if self.params.is_primary_circuit { - Some(AllocatedNum::alloc( - cs.namespace(|| "program_counter"), - || { - Ok(self - .inputs - .get()? - .program_counter - .expect("program_counter missing")) - }, - )?) - } else { - None - }; - - // Allocate z0 - let z_0 = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || { - Ok(self.inputs.get()?.z0[i]) - }) - }) - .collect::>, _>>()?; - - // Allocate zi. If inputs.zi is not provided (base case) allocate default value - // 0 - let zero = vec![E::Base::ZERO; arity]; - let z_i = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { - Ok(self.inputs.get()?.zi.unwrap_or(&zero)[i]) - }) - }) - .collect::>, _>>()?; - - // Allocate the running instances - let U = (0..num_augmented_circuits) - .map(|i| { - AllocatedRelaxedR1CSInstance::alloc( - cs.namespace(|| format!("Allocate U {:?}", i)), - self.inputs - .as_ref() - .and_then(|inputs| inputs.U.and_then(|U| U[i].as_ref())), - self.params.limb_width, - self.params.n_limbs, - ) - }) - .collect::>, _>>()?; - - // Allocate the r1cs instance to be folded in - let u = AllocatedR1CSInstance::alloc( - cs.namespace(|| "allocate instance u to fold"), - self.inputs.as_ref().and_then(|inputs| inputs.u), - )?; - - // Allocate T - let T = AllocatedPoint::alloc( - cs.namespace(|| "allocate T"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.T.map(|T| T.to_coordinates())), - )?; - T.check_on_curve(cs.namespace(|| "check T on curve"))?; - - // Compute instance selector - let last_augmented_circuit_selector = get_selector_vec_from_index( - cs.namespace(|| "instance selector"), - &last_augmented_circuit_index, - num_augmented_circuits, - )?; - - Ok(( - params, - i, - z_0, - z_i, - U, - u, - T, - program_counter, - last_augmented_circuit_selector, - )) - } - - /// Synthesizes base case and returns the new relaxed `R1CSInstance` - fn synthesize_base_case::Base>>( - &self, - mut cs: CS, - u: AllocatedR1CSInstance, - last_augmented_circuit_selector: &[Boolean], - ) -> Result>, SynthesisError> { - let mut cs = cs.namespace(|| "alloc U_i default"); - - // Allocate a default relaxed r1cs instance - let default = AllocatedRelaxedR1CSInstance::default( - cs.namespace(|| "Allocate primary U_default".to_string()), - self.params.limb_width, - self.params.n_limbs, - )?; - - // The primary circuit just initialize single AllocatedRelaxedR1CSInstance - let U_default = if self.params.is_primary_circuit { - vec![default] - } else { - // The secondary circuit convert the incoming R1CS instance on index which match - // last_augmented_circuit_index - let incoming_r1cs = AllocatedRelaxedR1CSInstance::from_r1cs_instance( - cs.namespace(|| "Allocate incoming_r1cs"), - u, - self.params.limb_width, - self.params.n_limbs, - )?; - - last_augmented_circuit_selector - .iter() - .enumerate() - .map(|(i, equal_bit)| { - // If index match last_augmented_circuit_index, then return incoming_r1cs, - // otherwise return the default one - conditionally_select_alloc_relaxed_r1cs( - cs.namespace(|| format!("select on index namespace {:?}", i)), - &incoming_r1cs, - &default, - equal_bit, - ) - }) - .collect::>, _>>()? - }; - Ok(U_default) - } - - /// Synthesizes non base case and returns the new relaxed `R1CSInstance` - /// And a boolean indicating if all checks pass - fn synthesize_non_base_case::Base>>( - &self, - mut cs: CS, - params: &AllocatedNum, - i: &AllocatedNum, - z_0: &[AllocatedNum], - z_i: &[AllocatedNum], - U: &[AllocatedRelaxedR1CSInstance], - u: &AllocatedR1CSInstance, - T: &AllocatedPoint, - arity: usize, - last_augmented_circuit_selector: &[Boolean], - program_counter: &Option>, - ) -> Result< - ( - Vec>, - AllocatedBit, - ), - SynthesisError, - > { - // Check that u.x[0] = Hash(params, i, program_counter, z0, zi, U[]) - let mut ro = E::ROCircuit::new( - self.ro_consts.clone(), - num_ro_inputs( - self.num_augmented_circuits, - self.params.get_n_limbs(), - arity, - self.params.is_primary_circuit, - ), - ); - ro.absorb(params); - ro.absorb(i); - - if self.params.is_primary_circuit { - let Some(program_counter) = program_counter.as_ref() else { - return Err(SynthesisError::AssignmentMissing); - }; - ro.absorb(program_counter) - } - - for e in z_0 { - ro.absorb(e); - } - for e in z_i { - ro.absorb(e); - } - - U.iter().enumerate().try_for_each(|(i, U)| { - U.absorb_in_ro(cs.namespace(|| format!("absorb U {:?}", i)), &mut ro) - })?; - - let hash_bits = ro.squeeze(cs.namespace(|| "Input hash"), NUM_HASH_BITS)?; - let hash = le_bits_to_num(cs.namespace(|| "bits to hash"), &hash_bits)?; - let check_pass: AllocatedBit = alloc_num_equals( - cs.namespace(|| "check consistency of u.X[0] with H(params, U, i, z0, zi)"), - &u.X[0], - &hash, - )?; - - // Run NIFS Verifier - let U_to_fold = get_from_vec_alloc_relaxed_r1cs( - cs.namespace(|| "U to fold"), - U, - last_augmented_circuit_selector, - )?; - let U_fold = U_to_fold.fold_with_r1cs( - cs.namespace(|| "compute fold of U and u"), - params, - u, - T, - self.ro_consts.clone(), - self.params.limb_width, - self.params.n_limbs, - )?; - - // update AllocatedRelaxedR1CSInstance on index match augmented circuit index - let U_next: Vec> = zip_with!( - (U.iter(), last_augmented_circuit_selector.iter()), - |U, equal_bit| { - conditionally_select_alloc_relaxed_r1cs( - cs.namespace(|| "select on index namespace"), - &U_fold, - U, - equal_bit, - ) - } - ) - .collect::>, _>>()?; - - Ok((U_next, check_pass)) - } - - pub fn synthesize::Base>>( - self, - cs: &mut CS, - ) -> Result<(Option>, Vec>), SynthesisError> { - let arity = self.step_circuit.arity(); - let num_augmented_circuits = if self.params.is_primary_circuit { - // primary circuit only fold single running instance with secondary output - // strict r1cs instance - 1 - } else { - // secondary circuit contains the logic to choose one of multiple augments - // running instance to fold - self.num_augmented_circuits - }; - - if self.inputs.is_some() { - // Check arity of z0 - let z0_len = self.inputs.as_ref().map_or(0, |inputs| inputs.z0.len()); - if self.step_circuit.arity() != z0_len { - return Err(SynthesisError::IncompatibleLengthVector(format!( - "z0_len {:?} != arity length {:?}", - z0_len, - self.step_circuit.arity() - ))); - } - - // The primary curve should always fold the circuit with index 0 - let last_augmented_circuit_index = self - .inputs - .get() - .map_or(E::Base::ZERO, |inputs| inputs.last_augmented_circuit_index); - if self.params.is_primary_circuit && last_augmented_circuit_index != E::Base::ZERO { - return Err(SynthesisError::IncompatibleLengthVector( - "primary circuit running instance only valid on index 0".to_string(), - )); - } - } - - // Allocate witnesses - let (params, i, z_0, z_i, U, u, T, program_counter, last_augmented_circuit_selector) = self - .alloc_witness( - cs.namespace(|| "allocate the circuit witness"), - arity, - num_augmented_circuits, - )?; - - // Compute variable indicating if this is the base case - let zero = alloc_zero(cs.namespace(|| "zero")); - let is_base_case = - alloc_num_equals(cs.namespace(|| "Check if base case"), &i.clone(), &zero)?; - - // Synthesize the circuit for the non-base case and get the new running - // instances along with a boolean indicating if all checks have passed - // must use return `last_augmented_circuit_index_checked` since it got range - // checked - let (U_next_non_base, check_non_base_pass) = self.synthesize_non_base_case( - cs.namespace(|| "synthesize non base case"), - ¶ms, - &i, - &z_0, - &z_i, - &U, - &u, - &T, - arity, - &last_augmented_circuit_selector, - &program_counter, - )?; - - // Synthesize the circuit for the base case and get the new running instances - let U_next_base = self.synthesize_base_case( - cs.namespace(|| "base case"), - u.clone(), - &last_augmented_circuit_selector, - )?; - - // Either check_non_base_pass=true or we are in the base case - let should_be_false = AllocatedBit::nor( - cs.namespace(|| "check_non_base_pass nor base_case"), - &check_non_base_pass, - &is_base_case, - )?; - cs.enforce( - || "check_non_base_pass nor base_case = false", - |lc| lc + should_be_false.get_variable(), - |lc| lc + CS::one(), - |lc| lc, - ); - - // Compute the U_next - let U_next = conditionally_select_vec_allocated_relaxed_r1cs_instance( - cs.namespace(|| "U_next"), - &U_next_base[..], - &U_next_non_base[..], - &Boolean::from(is_base_case.clone()), - )?; - - // Compute i + 1 - let i_next = AllocatedNum::alloc(cs.namespace(|| "i + 1"), || { - Ok(*i.get_value().get()? + E::Base::ONE) - })?; - cs.enforce( - || "check i + 1", - |lc| lc + i.get_variable() + CS::one(), - |lc| lc + CS::one(), - |lc| lc + i_next.get_variable(), - ); - - // Compute z_{i+1} - let z_input = conditionally_select_slice( - cs.namespace(|| "select input to F"), - &z_0, - &z_i, - &Boolean::from(is_base_case), - )?; - - let (program_counter_new, z_next) = self.step_circuit.enforcing_synthesize( - &mut cs.namespace(|| "F"), - program_counter.as_ref(), - &z_input, - )?; - - if z_next.len() != arity { - return Err(SynthesisError::IncompatibleLengthVector( - "z_next".to_string(), - )); - } - - // To check correct folding sequencing we are just going to make a hash. - // The next RunningInstance folding can take the pre-image of this hash as - // witness and check. - - // "Finally, there is a subtle sizing issue in the above description: in each - // step, because Ui+1 is produced as the public IO of F0 - // program_counter+1, it must be contained in the public IO of instance - // ui+1. In the next iteration, because ui+1 is folded - // into Ui+1[program_counter+1], this means that Ui+1[program_counter+1] is at - // least as large as Ui by the properties of the folding scheme. This - // means that the list of running instances grows in each step. To - // alleviate this issue, we have each F0j only produce a hash - // of its outputs as public output. In the subsequent step, the next augmented - // function takes as non-deterministic input a preimage to this hash." pg.16 - - // https://eprint.iacr.org/2022/1758.pdf - - // Compute the new hash H(params, i+1, program_counter, z0, z_{i+1}, U_next) - let mut ro = E::ROCircuit::new( - self.ro_consts.clone(), - num_ro_inputs( - self.num_augmented_circuits, - self.params.get_n_limbs(), - self.step_circuit.arity(), - self.params.is_primary_circuit, - ), - ); - ro.absorb(¶ms); - ro.absorb(&i_next); - // optionally absorb program counter if exist - if program_counter.is_some() { - ro.absorb( - program_counter_new - .as_ref() - .expect("new program counter missing"), - ) - } - for e in &z_0 { - ro.absorb(e); - } - for e in &z_next { - ro.absorb(e); - } - U_next.iter().enumerate().try_for_each(|(i, U)| { - U.absorb_in_ro(cs.namespace(|| format!("absorb U_new {:?}", i)), &mut ro) - })?; - - let hash_bits = ro.squeeze(cs.namespace(|| "output hash bits"), NUM_HASH_BITS)?; - let hash = le_bits_to_num(cs.namespace(|| "convert hash to num"), &hash_bits)?; - - // We are cycling of curve implementation, so primary/secondary will rotate hash - // in IO for the others to check bypass unmodified hash of other circuit - // as next X[0] and output the computed the computed hash as next X[1] - u.X[1].inputize(cs.namespace(|| "bypass unmodified hash of the other circuit"))?; - hash.inputize(cs.namespace(|| "output new hash of this circuit"))?; - - Ok((program_counter_new, z_next)) - } -} - -#[cfg(test)] -mod tests { - use expect_test::{expect, Expect}; - - use super::*; - use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - solver::SatisfyingAssignment, - test_shape_cs::TestShapeCS, - }, - constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, - gadgets::scalar_as_base, - provider::{poseidon::PoseidonConstantsCircuit, Bn256EngineIPA, GrumpkinEngine}, - supernova::circuit::TrivialCircuit, - traits::{snark::default_ck_hint, CurveCycleEquipped, Dual}, - }; - - // In the following we use 1 to refer to the primary, and 2 to refer to the - // secondary circuit - fn test_supernova_recursive_circuit_with( - primary_params: &SuperNovaAugmentedCircuitParams, - secondary_params: &SuperNovaAugmentedCircuitParams, - ro_consts1: ROConstantsCircuit>, - ro_consts2: ROConstantsCircuit, - num_constraints_primary: &Expect, - num_constraints_secondary: &Expect, - num_augmented_circuits: usize, - ) where - E1: CurveCycleEquipped, - { - let tc1 = TrivialCircuit::default(); - // Initialize the shape and ck for the primary - let circuit1: SuperNovaAugmentedCircuit< - '_, - Dual, - TrivialCircuit< as Engine>::Base>, - > = SuperNovaAugmentedCircuit::new( - primary_params, - None, - &tc1, - ro_consts1.clone(), - num_augmented_circuits, - ); - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = circuit1.synthesize(&mut cs); - let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - num_constraints_primary.assert_eq(&cs.num_constraints().to_string()); - - let tc2 = TrivialCircuit::default(); - // Initialize the shape and ck for the secondary - let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = - SuperNovaAugmentedCircuit::new( - secondary_params, - None, - &tc2, - ro_consts2.clone(), - num_augmented_circuits, - ); - let mut cs: TestShapeCS> = TestShapeCS::new(); - let _ = circuit2.synthesize(&mut cs); - let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - num_constraints_secondary.assert_eq(&cs.num_constraints().to_string()); - - // Execute the base case for the primary - let zero1 = < as Engine>::Base as Field>::ZERO; - let mut cs1 = SatisfyingAssignment::::new(); - let vzero1 = vec![zero1]; - let inputs1: SuperNovaAugmentedCircuitInputs<'_, Dual> = - SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::(zero1), // pass zero for testing - zero1, - &vzero1, - None, - None, - None, - None, - Some(zero1), - zero1, - ); - let circuit1: SuperNovaAugmentedCircuit< - '_, - Dual, - TrivialCircuit< as Engine>::Base>, - > = SuperNovaAugmentedCircuit::new( - primary_params, - Some(inputs1), - &tc1, - ro_consts1, - num_augmented_circuits, - ); - let _ = circuit1.synthesize(&mut cs1); - let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, &ck1).unwrap(); - // Make sure that this is satisfiable - shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); - - // Execute the base case for the secondary - let zero2 = <::Base as Field>::ZERO; - let mut cs2 = SatisfyingAssignment::>::new(); - let vzero2 = vec![zero2]; - let inputs2: SuperNovaAugmentedCircuitInputs<'_, E1> = SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::>(zero2), // pass zero for testing - zero2, - &vzero2, - None, - None, - Some(&inst1), - None, - Some(zero2), - zero2, - ); - let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = - SuperNovaAugmentedCircuit::new( - secondary_params, - Some(inputs2), - &tc2, - ro_consts2, - num_augmented_circuits, - ); - let _ = circuit2.synthesize(&mut cs2); - let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, &ck2).unwrap(); - // Make sure that it is satisfiable - shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); - } - - #[test] - fn test_supernova_recursive_circuit_grumpkin() { - let params1 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - let params2 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); - let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); - - test_supernova_recursive_circuit_with::( - ¶ms1, - ¶ms2, - ro_consts1, - ro_consts2, - &expect!["10004"], - &expect!["10573"], - 1, - ); - // TODO: extend to num_augmented_circuits >= 2 - } -} diff --git a/src/supernova/mod.rs b/src/supernova/mod.rs deleted file mode 100644 index 90b3740..0000000 --- a/src/supernova/mod.rs +++ /dev/null @@ -1,1296 +0,0 @@ -#![doc = include_str!("./Readme.md")] - -use std::{ops::Index, sync::Arc}; - -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use ff::Field; -use itertools::Itertools as _; -use once_cell::sync::OnceCell; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; -use tracing::debug; - -use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_HASH_BITS}, - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - nifs::NIFS, - r1cs::{ - self, commitment_key_size, CommitmentKeyHint, R1CSInstance, R1CSResult, R1CSShape, - R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness, - }, - scalar_as_base, - traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait}, - AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROConstants, ROConstantsCircuit, - ROTrait, - }, - Commitment, CommitmentKey, R1CSWithArity, -}; - -mod circuit; // declare the module first -pub use circuit::{StepCircuit, SuperNovaAugmentedCircuitParams, TrivialCircuit}; -use circuit::{SuperNovaAugmentedCircuit, SuperNovaAugmentedCircuitInputs}; -use error::SuperNovaError; - -/// A struct that manages all the digests of the primary circuits of a SuperNova -/// instance -#[derive(Debug, PartialEq, Eq, Serialize)] -pub struct CircuitDigests { - digests: Vec, -} - -impl SimpleDigestible for CircuitDigests {} - -impl std::ops::Deref for CircuitDigests { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.digests - } -} - -impl CircuitDigests { - /// Construct a new [`CircuitDigests`] - pub fn new(digests: Vec) -> Self { - Self { digests } - } - - /// Return the [`CircuitDigests`]' digest. - pub fn digest(&self) -> E::Scalar { - let dc: DigestComputer<'_, ::Scalar, Self> = DigestComputer::new(self); - dc.digest().expect("Failure in computing digest") - } -} - -/// A vector of [`R1CSWithArity`] adjoined to a set of [`PublicParams`] -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct PublicParams -where - E1: CurveCycleEquipped, -{ - /// The internal circuit shapes - circuit_shapes: Vec>, - - ro_consts_primary: ROConstants, - ro_consts_circuit_primary: ROConstantsCircuit>, - ck_primary: Arc>, // This is shared between all circuit params - augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams, - - ro_consts_secondary: ROConstants>, - ro_consts_circuit_secondary: ROConstantsCircuit, - ck_secondary: Arc>>, - circuit_shape_secondary: R1CSWithArity>, - augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams, - - /// Digest constructed from this `PublicParams`' parameters - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} - -/// Auxiliary [`PublicParams`] information about the commitment keys and -/// secondary circuit. This is used as a helper struct when reconstructing -/// [`PublicParams`] downstream in lurk. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct AuxParams -where - E1: CurveCycleEquipped, -{ - pub ro_consts_primary: ROConstants, - pub ro_consts_circuit_primary: ROConstantsCircuit>, - pub ck_primary: Arc>, // This is shared between all circuit params - pub augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams, - - pub ro_consts_secondary: ROConstants>, - pub ro_consts_circuit_secondary: ROConstantsCircuit, - pub ck_secondary: Arc>>, - pub circuit_shape_secondary: R1CSWithArity>, - pub augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams, - - pub digest: E1::Scalar, -} - -use std::io::Cursor; - -use crate::{ - fast_serde, - fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, -}; - -impl FastSerde for AuxParams -where - E1: CurveCycleEquipped, - >::CommitmentKey: FastSerde, - <::CE as CommitmentEngineTrait>::CommitmentKey: - FastSerde, -{ - /// Byte format: - /// [0..4] - Magic number (4 bytes) - /// [4] - Serde type: AuxParams (u8) - /// [5] - Number of sections (u8 = 8) - /// Sections (repeated 8 times): - /// [N] - Section type (u8) - /// [N+1..5] - Section size (u32) - /// [N+5..] - Section data (variable length) - /// Section types: - /// 1: ro_consts_primary (bincode) - /// 2: ro_consts_circuit_primary (bincode) - /// 3: ck_primary (FastSerde) - /// 4: ro_consts_secondary (bincode) - /// 5: ro_consts_circuit_secondary (bincode) - /// 6: ck_secondary (FastSerde) - /// 7: circuit_shape_secondary (json) - /// 8: digest (bincode) - fn to_bytes(&self) -> Vec { - let mut out = Vec::new(); - - // Write header - out.extend_from_slice(&fast_serde::MAGIC_NUMBER); - out.push(fast_serde::SerdeByteTypes::AuxParams as u8); - out.push(8); // num_sections - - // Write sections - Self::write_section_bytes( - &mut out, - 1, - &bincode::serialize(&self.ro_consts_primary).unwrap(), - ); - Self::write_section_bytes( - &mut out, - 2, - &bincode::serialize(&self.ro_consts_circuit_primary).unwrap(), - ); - Self::write_section_bytes(&mut out, 3, &self.ck_primary.to_bytes()); - Self::write_section_bytes( - &mut out, - 4, - &bincode::serialize(&self.ro_consts_secondary).unwrap(), - ); - Self::write_section_bytes( - &mut out, - 5, - &bincode::serialize(&self.ro_consts_circuit_secondary).unwrap(), - ); - Self::write_section_bytes(&mut out, 6, &self.ck_secondary.to_bytes()); - Self::write_section_bytes( - &mut out, - 7, - &bincode::serialize(&self.circuit_shape_secondary).unwrap(), - ); - Self::write_section_bytes(&mut out, 8, &bincode::serialize(&self.digest).unwrap()); - - out - } - - fn from_bytes(bytes: &Vec) -> Result { - let mut cursor = Cursor::new(bytes); - - // Validate header - Self::validate_header(&mut cursor, SerdeByteTypes::AuxParams, 8)?; - - // Read all sections - let ro_consts_primary = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 1)?)?; - let ro_consts_circuit_primary = - bincode::deserialize(&Self::read_section_bytes(&mut cursor, 2)?)?; - let ck_primary = Arc::new( - >::CommitmentKey::from_bytes( - &Self::read_section_bytes(&mut cursor, 3)?, - )?, - ); - let ro_consts_secondary = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 4)?)?; - let ro_consts_circuit_secondary = - bincode::deserialize(&Self::read_section_bytes(&mut cursor, 5)?)?; - let ck_secondary = Arc::new(<::CE as CommitmentEngineTrait< - E1::Secondary, - >>::CommitmentKey::from_bytes( - &Self::read_section_bytes(&mut cursor, 6)? - )?); - let circuit_shape_secondary = - bincode::deserialize(&Self::read_section_bytes(&mut cursor, 7)?)?; - let digest = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 8)?)?; - - // NOTE: This does not check the digest. Maybe we should. - Ok(Self { - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams::new( - BN_LIMB_WIDTH, - BN_N_LIMBS, - true, - ), - ro_consts_secondary, - ro_consts_circuit_secondary, - ck_secondary, - circuit_shape_secondary, - augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams::new( - BN_LIMB_WIDTH, - BN_N_LIMBS, - false, - ), - digest, - }) - } -} - -impl Index for PublicParams -where - E1: CurveCycleEquipped, -{ - type Output = R1CSWithArity; - - fn index(&self, index: usize) -> &Self::Output { - &self.circuit_shapes[index] - } -} - -impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} - -impl PublicParams -where - E1: CurveCycleEquipped, -{ - /// Construct a new [`PublicParams`] - /// - /// # Note - /// - /// Public parameters set up a number of bases for the homomorphic - /// commitment scheme of Nova. - /// - /// Some final compressing SNARKs, like variants of Spartan, use computation - /// commitments that require larger sizes for these parameters. These - /// SNARKs provide a hint for these values by implementing - /// `RelaxedR1CSSNARKTrait::commitment_key_floor()`, which can be passed to - /// this function. - /// - /// If you're not using such a SNARK, pass `&(|_| 0)` instead. - /// - /// # Arguments - /// - /// * `non_uniform_circuit`: The non-uniform circuit of type `NC`. - /// * `ck_hint1`: A `CommitmentKeyHint` for `E1`, which is a function that - /// provides a hint for the number of generators required in the - /// commitment scheme for the primary circuit. - /// * `ck_hint2`: A `CommitmentKeyHint` for `E2`, similar to `ck_hint1`, but - /// for the secondary circuit. - pub fn setup>( - non_uniform_circuit: &NC, - ck_hint1: &CommitmentKeyHint, - ck_hint2: &CommitmentKeyHint>, - ) -> Self { - let num_circuits = non_uniform_circuit.num_circuits(); - - let augmented_circuit_params_primary = - SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - let ro_consts_primary: ROConstants = ROConstants::::default(); - // ro_consts_circuit_primary are parameterized by E2 because the type alias uses - // E2::Base = E1::Scalar - let ro_consts_circuit_primary: ROConstantsCircuit> = - ROConstantsCircuit::>::default(); - - let circuit_shapes = get_circuit_shapes(non_uniform_circuit); - - let ck_primary = Self::compute_primary_ck(&circuit_shapes, ck_hint1); - let ck_primary = Arc::new(ck_primary); - - let augmented_circuit_params_secondary = - SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - let ro_consts_secondary = ROConstants::>::default(); - let c_secondary = non_uniform_circuit.secondary_circuit(); - let F_arity_secondary = c_secondary.arity(); - let ro_consts_circuit_secondary: ROConstantsCircuit = - ROConstantsCircuit::::default(); - - let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, NC::C2> = - SuperNovaAugmentedCircuit::new( - &augmented_circuit_params_secondary, - None, - &c_secondary, - ro_consts_circuit_secondary.clone(), - num_circuits, - ); - let mut cs: ShapeCS> = ShapeCS::new(); - circuit_secondary - .synthesize(&mut cs) - .expect("circuit synthesis failed"); - let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape_and_key(ck_hint2); - let ck_secondary = Arc::new(ck_secondary); - let circuit_shape_secondary = R1CSWithArity::new(r1cs_shape_secondary, F_arity_secondary); - - let pp = Self { - circuit_shapes, - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - augmented_circuit_params_primary, - ro_consts_secondary, - ro_consts_circuit_secondary, - ck_secondary, - circuit_shape_secondary, - augmented_circuit_params_secondary, - digest: OnceCell::new(), - }; - - // make sure to initialize the `OnceCell` and compute the digest - // and avoid paying for unexpected performance costs later - pp.digest(); - pp - } - - /// Breaks down an instance of [`PublicParams`] into the circuit params and - /// auxiliary params. - pub fn into_parts(self) -> (Vec>, AuxParams) { - let digest = self.digest(); - - let Self { - circuit_shapes, - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - augmented_circuit_params_primary, - ro_consts_secondary, - ro_consts_circuit_secondary, - ck_secondary, - circuit_shape_secondary, - augmented_circuit_params_secondary, - digest: _digest, - } = self; - - let aux_params = AuxParams { - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - augmented_circuit_params_primary, - ro_consts_secondary, - ro_consts_circuit_secondary, - ck_secondary, - circuit_shape_secondary, - augmented_circuit_params_secondary, - digest, - }; - - (circuit_shapes, aux_params) - } - - /// Returns just the [`AuxParams`] portion of [`PublicParams`] from a - /// reference to [`PublicParams`]. - pub fn aux_params(&self) -> AuxParams { - AuxParams { - ro_consts_primary: self.ro_consts_primary.clone(), - ro_consts_circuit_primary: self.ro_consts_circuit_primary.clone(), - ck_primary: self.ck_primary.clone(), - augmented_circuit_params_primary: self.augmented_circuit_params_primary.clone(), - ro_consts_secondary: self.ro_consts_secondary.clone(), - ro_consts_circuit_secondary: self.ro_consts_circuit_secondary.clone(), - ck_secondary: self.ck_secondary.clone(), - circuit_shape_secondary: self.circuit_shape_secondary.clone(), - augmented_circuit_params_secondary: self.augmented_circuit_params_secondary.clone(), - digest: self.digest(), - } - } - - /// Create a [`PublicParams`] from a vector of raw [`R1CSWithArity`] and - /// auxiliary params. - pub fn from_parts(circuit_shapes: Vec>, aux_params: AuxParams) -> Self { - let pp = Self { - circuit_shapes, - ro_consts_primary: aux_params.ro_consts_primary, - ro_consts_circuit_primary: aux_params.ro_consts_circuit_primary, - ck_primary: aux_params.ck_primary, - augmented_circuit_params_primary: aux_params.augmented_circuit_params_primary, - ro_consts_secondary: aux_params.ro_consts_secondary, - ro_consts_circuit_secondary: aux_params.ro_consts_circuit_secondary, - ck_secondary: aux_params.ck_secondary, - circuit_shape_secondary: aux_params.circuit_shape_secondary, - augmented_circuit_params_secondary: aux_params.augmented_circuit_params_secondary, - digest: OnceCell::new(), - }; - assert_eq!( - aux_params.digest, - pp.digest(), - "param data is invalid; aux_params contained the incorrect digest" - ); - pp - } - - /// Create a [`PublicParams`] from a vector of raw [`R1CSWithArity`] and - /// auxiliary params. We don't check that the `aux_params.digest` is a - /// valid digest for the created params. - pub fn from_parts_unchecked( - circuit_shapes: Vec>, - aux_params: AuxParams, - ) -> Self { - Self { - circuit_shapes, - ro_consts_primary: aux_params.ro_consts_primary, - ro_consts_circuit_primary: aux_params.ro_consts_circuit_primary, - ck_primary: aux_params.ck_primary, - augmented_circuit_params_primary: aux_params.augmented_circuit_params_primary, - ro_consts_secondary: aux_params.ro_consts_secondary, - ro_consts_circuit_secondary: aux_params.ro_consts_circuit_secondary, - ck_secondary: aux_params.ck_secondary, - circuit_shape_secondary: aux_params.circuit_shape_secondary, - augmented_circuit_params_secondary: aux_params.augmented_circuit_params_secondary, - digest: aux_params.digest.into(), - } - } - - /// Compute primary and secondary commitment keys sized to handle the - /// largest of the circuits in the provided `R1CSWithArity`. - fn compute_primary_ck( - circuit_params: &[R1CSWithArity], - ck_hint1: &CommitmentKeyHint, - ) -> CommitmentKey { - let size_primary = circuit_params - .iter() - .map(|circuit| commitment_key_size(&circuit.r1cs_shape, ck_hint1)) - .max() - .unwrap(); - - E1::CE::setup(b"ck", size_primary) - } - - /// Return the [`PublicParams`]' digest. - pub fn digest(&self) -> E1::Scalar { - self.digest - .get_or_try_init(|| { - let dc: DigestComputer<'_, ::Scalar, Self> = - DigestComputer::new(self); - dc.digest() - }) - .cloned() - .expect("Failure in retrieving digest") - } - - /// Returns the number of constraints and variables of inner circuit based - /// on index - pub fn num_constraints_and_variables(&self, index: usize) -> (usize, usize) { - ( - self.circuit_shapes[index].r1cs_shape.num_cons, - self.circuit_shapes[index].r1cs_shape.num_vars, - ) - } - - /// Returns the number of constraints and variables of the secondary circuit - pub fn num_constraints_and_variables_secondary(&self) -> (usize, usize) { - ( - self.circuit_shape_secondary.r1cs_shape.num_cons, - self.circuit_shape_secondary.r1cs_shape.num_vars, - ) - } - - /// All of the primary circuit digests of this [`PublicParams`] - pub fn circuit_param_digests(&self) -> CircuitDigests { - let digests = self - .circuit_shapes - .iter() - .map(|cp| cp.digest()) - .collect::>(); - CircuitDigests { digests } - } - - /// Returns all the primary R1CS Shapes - fn primary_r1cs_shapes(&self) -> Vec<&R1CSShape> { - self.circuit_shapes - .iter() - .map(|cs| &cs.r1cs_shape) - .collect::>() - } -} - -pub fn get_circuit_shapes>( - non_uniform_circuit: &NC, -) -> Vec> { - let num_circuits = non_uniform_circuit.num_circuits(); - let augmented_circuit_params_primary = - SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - - // ro_consts_circuit_primary are parameterized by E2 because the type alias uses - // E2::Base = E1::Scalar - let ro_consts_circuit_primary: ROConstantsCircuit> = - ROConstantsCircuit::>::default(); - - (0..num_circuits) - .map(|i| { - let c_primary = non_uniform_circuit.primary_circuit(i); - let F_arity = c_primary.arity(); - // Initialize ck for the primary - let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, NC::C1> = - SuperNovaAugmentedCircuit::new( - &augmented_circuit_params_primary, - None, - &c_primary, - ro_consts_circuit_primary.clone(), - num_circuits, - ); - let mut cs: ShapeCS = ShapeCS::new(); - circuit_primary - .synthesize(&mut cs) - .expect("circuit synthesis failed"); - - // We use the largest commitment_key for all instances - let r1cs_shape_primary = cs.r1cs_shape(); - R1CSWithArity::new(r1cs_shape_primary, F_arity) - }) - .collect::>() -} - -/// A resource buffer for SuperNova's [`RecursiveSNARK`] for storing scratch -/// values that are computed by `prove_step`, which allows the reuse of memory -/// allocations and avoids unnecessary new allocations in the critical section. -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -struct ResourceBuffer { - l_w: Option>, - l_u: Option>, - - ABC_Z_1: R1CSResult, - ABC_Z_2: R1CSResult, - - /// buffer for `commit_T` - T: Vec, -} - -/// A SNARK that proves the correct execution of an non-uniform incremental -/// computation -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - // Cached digest of the public parameters - pp_digest: E1::Scalar, - num_augmented_circuits: usize, - - // Number of iterations performed up to now - i: usize, - - // Inputs and outputs of the primary circuits - z0_primary: Vec, - zi_primary: Vec, - - // Proven circuit index, and current program counter - proven_circuit_index: usize, - program_counter: E1::Scalar, - - /// Buffer for memory needed by the primary fold-step - buffer_primary: ResourceBuffer, - /// Buffer for memory needed by the secondary fold-step - buffer_secondary: ResourceBuffer>, - - // Relaxed instances for the primary circuits - // Entries are `None` if the circuit has not been executed yet - r_W_primary: Vec>>, - r_U_primary: Vec>>, - - // Inputs and outputs of the secondary circuit - z0_secondary: Vec< as Engine>::Scalar>, - zi_secondary: Vec< as Engine>::Scalar>, - // Relaxed instance for the secondary circuit - r_W_secondary: RelaxedR1CSWitness>, - r_U_secondary: RelaxedR1CSInstance>, - // Proof for the secondary circuit to be accumulated into r_secondary in the next iteration - l_w_secondary: R1CSWitness>, - l_u_secondary: R1CSInstance>, -} - -impl RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - /// iterate base step to get new instance of recursive SNARK - #[allow(clippy::too_many_arguments)] - pub fn new>( - pp: &PublicParams, - non_uniform_circuit: &C0, - c_primary: &C0::C1, - c_secondary: &C0::C2, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result { - let num_augmented_circuits = non_uniform_circuit.num_circuits(); - let circuit_index = non_uniform_circuit.initial_circuit_index(); - - let r1cs_secondary = &pp.circuit_shape_secondary.r1cs_shape; - - // check the length of the secondary initial input - if z0_secondary.len() != pp.circuit_shape_secondary.F_arity { - return Err(SuperNovaError::NovaError( - NovaError::InvalidStepOutputLength, - )); - } - - // check the arity of all the primary circuits match the initial input length - // pp.circuit_shapes.iter().try_for_each(|circuit| { - // if circuit.F_arity != z0_primary.len() { - // return Err(SuperNovaError::NovaError( - // NovaError::InvalidStepOutputLength, - // )); - // } - // Ok(()) - // })?; - - // base case for the primary - let mut cs_primary = SatisfyingAssignment::::new(); - let program_counter = E1::Scalar::from(circuit_index as u64); - let inputs_primary: SuperNovaAugmentedCircuitInputs<'_, Dual> = - SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - E1::Scalar::ZERO, - z0_primary, - None, // zi = None for basecase - None, // U = [None], since no previous proofs have been computed - None, // u = None since we are not verifying a secondary circuit - None, // T = None since there is not proof to fold - Some(program_counter), // pc = initial_program_counter for primary circuit - E1::Scalar::ZERO, // u_index is always zero for the primary circuit - ); - - let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C0::C1> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_primary, - Some(inputs_primary), - c_primary, - pp.ro_consts_circuit_primary.clone(), - num_augmented_circuits, - ); - - let (zi_primary_pc_next, zi_primary) = - circuit_primary.synthesize(&mut cs_primary).map_err(|err| { - debug!("err {:?}", err); - NovaError::from(err) - })?; - if zi_primary.len() != pp[circuit_index].F_arity { - return Err(SuperNovaError::NovaError( - NovaError::InvalidStepOutputLength, - )); - } - let (u_primary, w_primary) = cs_primary - .r1cs_instance_and_witness(&pp[circuit_index].r1cs_shape, &pp.ck_primary) - .map_err(|err| { - debug!("err {:?}", err); - err - })?; - - // base case for the secondary - let mut cs_secondary = SatisfyingAssignment::>::new(); - let u_primary_index = as Engine>::Scalar::from(circuit_index as u64); - let inputs_secondary: SuperNovaAugmentedCircuitInputs<'_, E1> = - SuperNovaAugmentedCircuitInputs::new( - pp.digest(), - as Engine>::Scalar::ZERO, - z0_secondary, - None, // zi = None for basecase - None, // U = Empty list of accumulators for the primary circuits - Some(&u_primary), // Proof for first iteration of current primary circuit - None, // T = None, since we just copy u_primary rather than fold it - None, // program_counter is always None for secondary circuit - u_primary_index, // index of the circuit proof u_primary - ); - - let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C0::C2> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_secondary, - Some(inputs_secondary), - c_secondary, - pp.ro_consts_circuit_secondary.clone(), - num_augmented_circuits, - ); - let (_, zi_secondary) = circuit_secondary - .synthesize(&mut cs_secondary) - .map_err(NovaError::from)?; - if zi_secondary.len() != pp.circuit_shape_secondary.F_arity { - return Err(NovaError::InvalidStepOutputLength.into()); - } - let (u_secondary, w_secondary) = cs_secondary - .r1cs_instance_and_witness(r1cs_secondary, &pp.ck_secondary) - .map_err(|_| SuperNovaError::NovaError(NovaError::UnSat))?; - - // IVC proof for the primary circuit - let l_w_primary = w_primary; - let l_u_primary = u_primary; - let r_W_primary = - RelaxedR1CSWitness::from_r1cs_witness(&pp[circuit_index].r1cs_shape, l_w_primary); - - let r_U_primary = RelaxedR1CSInstance::from_r1cs_instance( - &*pp.ck_primary, - &pp[circuit_index].r1cs_shape, - l_u_primary, - ); - - // IVC proof of the secondary circuit - let l_w_secondary = w_secondary; - let l_u_secondary = u_secondary; - - // Initialize relaxed instance/witness pair for the secondary circuit proofs - let r_W_secondary = RelaxedR1CSWitness::>::default(r1cs_secondary); - let r_U_secondary = RelaxedR1CSInstance::default(&*pp.ck_secondary, r1cs_secondary); - - // Outputs of the two circuits and next program counter thus far. - let zi_primary = zi_primary - .iter() - .map(|v| { - v.get_value() - .ok_or(NovaError::from(SynthesisError::AssignmentMissing).into()) - }) - .collect::::Scalar>, SuperNovaError>>()?; - let zi_primary_pc_next = zi_primary_pc_next - .expect("zi_primary_pc_next missing") - .get_value() - .ok_or::(NovaError::from(SynthesisError::AssignmentMissing).into())?; - let zi_secondary = zi_secondary - .iter() - .map(|v| { - v.get_value() - .ok_or(NovaError::from(SynthesisError::AssignmentMissing).into()) - }) - .collect:: as Engine>::Scalar>, SuperNovaError>>()?; - - // handle the base case by initialize U_next in next round - let r_W_primary_initial_list = (0..num_augmented_circuits) - .map(|i| (i == circuit_index).then(|| r_W_primary.clone())) - .collect::>>>(); - - let r_U_primary_initial_list = (0..num_augmented_circuits) - .map(|i| (i == circuit_index).then(|| r_U_primary.clone())) - .collect::>>>(); - - // find the largest length r1cs shape for the buffer size - let max_num_cons = pp - .circuit_shapes - .iter() - .map(|circuit| circuit.r1cs_shape.num_cons) - .max() - .unwrap(); - - let buffer_primary = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(max_num_cons), - ABC_Z_2: R1CSResult::default(max_num_cons), - T: r1cs::default_T::(max_num_cons), - }; - - let buffer_secondary = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(r1cs_secondary.num_cons), - ABC_Z_2: R1CSResult::default(r1cs_secondary.num_cons), - T: r1cs::default_T::>(r1cs_secondary.num_cons), - }; - - Ok(Self { - pp_digest: pp.digest(), - num_augmented_circuits, - i: 0_usize, // after base case, next iteration start from 1 - z0_primary: z0_primary.to_vec(), - zi_primary, - - proven_circuit_index: circuit_index, - program_counter: zi_primary_pc_next, - - buffer_primary, - buffer_secondary, - - r_W_primary: r_W_primary_initial_list, - r_U_primary: r_U_primary_initial_list, - z0_secondary: z0_secondary.to_vec(), - zi_secondary, - r_W_secondary, - r_U_secondary, - l_w_secondary, - l_u_secondary, - }) - } - - /// Inputs of the primary circuits - pub fn z0_primary(&self) -> &Vec { - &self.z0_primary - } - - /// Outputs of the primary circuits - pub fn zi_primary(&self) -> &Vec { - &self.zi_primary - } - - /// executing a step of the incremental computation - #[allow(clippy::too_many_arguments)] - #[tracing::instrument(skip_all, name = "supernova::RecursiveSNARK::prove_step")] - pub fn prove_step< - C1: StepCircuit, - C2: StepCircuit< as Engine>::Scalar>, - >( - &mut self, - pp: &PublicParams, - c_primary: &C1, - c_secondary: &C2, - ) -> Result<(), SuperNovaError> { - // First step was already done in the constructor - if self.i == 0 { - self.i = 1; - return Ok(()); - } - - // save the inputs before proceeding to the `i+1`th step - let r_U_primary_i = self.r_U_primary.clone(); - // Create single-entry accumulator list for the secondary circuit to hand to - // SuperNovaAugmentedCircuitInputs - let r_U_secondary_i = vec![Some(self.r_U_secondary.clone())]; - let l_u_secondary_i = self.l_u_secondary.clone(); - - let circuit_index = c_primary.circuit_index(); - assert_eq!(self.program_counter, E1::Scalar::from(circuit_index as u64)); - - // fold the secondary circuit's instance - let (nifs_secondary, _) = NIFS::prove_mut( - &*pp.ck_secondary, - &pp.ro_consts_secondary, - &scalar_as_base::(self.pp_digest), - &pp.circuit_shape_secondary.r1cs_shape, - &mut self.r_U_secondary, - &mut self.r_W_secondary, - &self.l_u_secondary, - &self.l_w_secondary, - &mut self.buffer_secondary.T, - &mut self.buffer_secondary.ABC_Z_1, - &mut self.buffer_secondary.ABC_Z_2, - ) - .map_err(SuperNovaError::NovaError)?; - - let mut cs_primary = SatisfyingAssignment::::with_capacity( - pp[circuit_index].r1cs_shape.num_io + 1, - pp[circuit_index].r1cs_shape.num_vars, - ); - let T = Commitment::>::decompress(&nifs_secondary.comm_T) - .map_err(SuperNovaError::NovaError)?; - let inputs_primary: SuperNovaAugmentedCircuitInputs<'_, Dual> = - SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::(self.pp_digest), - E1::Scalar::from(self.i as u64), - &self.z0_primary, - Some(&self.zi_primary), - Some(&r_U_secondary_i), - Some(&l_u_secondary_i), - Some(&T), - Some(self.program_counter), - E1::Scalar::ZERO, - ); - - let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C1> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_primary, - Some(inputs_primary), - c_primary, - pp.ro_consts_circuit_primary.clone(), - self.num_augmented_circuits, - ); - - let (zi_primary_pc_next, zi_primary) = circuit_primary - .synthesize(&mut cs_primary) - .map_err(NovaError::from)?; - if zi_primary.len() != pp[circuit_index].F_arity { - return Err(SuperNovaError::NovaError( - NovaError::InvalidInitialInputLength, - )); - } - - let (l_u_primary, l_w_primary) = cs_primary - .r1cs_instance_and_witness(&pp[circuit_index].r1cs_shape, &pp.ck_primary) - .map_err(SuperNovaError::NovaError)?; - - let (r_U_primary, r_W_primary) = if let (Some(Some(r_U_primary)), Some(Some(r_W_primary))) = ( - self.r_U_primary.get_mut(circuit_index), - self.r_W_primary.get_mut(circuit_index), - ) { - (r_U_primary, r_W_primary) - } else { - self.r_U_primary[circuit_index] = Some(RelaxedR1CSInstance::default( - &*pp.ck_primary, - &pp[circuit_index].r1cs_shape, - )); - self.r_W_primary[circuit_index] = - Some(RelaxedR1CSWitness::default(&pp[circuit_index].r1cs_shape)); - ( - self.r_U_primary[circuit_index].as_mut().unwrap(), - self.r_W_primary[circuit_index].as_mut().unwrap(), - ) - }; - - let (nifs_primary, _) = NIFS::prove_mut( - &*pp.ck_primary, - &pp.ro_consts_primary, - &self.pp_digest, - &pp[circuit_index].r1cs_shape, - r_U_primary, - r_W_primary, - &l_u_primary, - &l_w_primary, - &mut self.buffer_primary.T, - &mut self.buffer_primary.ABC_Z_1, - &mut self.buffer_primary.ABC_Z_2, - ) - .map_err(SuperNovaError::NovaError)?; - - let mut cs_secondary = SatisfyingAssignment::>::with_capacity( - pp.circuit_shape_secondary.r1cs_shape.num_io + 1, - pp.circuit_shape_secondary.r1cs_shape.num_vars, - ); - let binding = Commitment::::decompress(&nifs_primary.comm_T) - .map_err(SuperNovaError::NovaError)?; - let inputs_secondary: SuperNovaAugmentedCircuitInputs<'_, E1> = - SuperNovaAugmentedCircuitInputs::new( - self.pp_digest, - as Engine>::Scalar::from(self.i as u64), - &self.z0_secondary, - Some(&self.zi_secondary), - Some(&r_U_primary_i), - Some(&l_u_primary), - Some(&binding), - None, // pc is always None for secondary circuit - as Engine>::Scalar::from(circuit_index as u64), - ); - - let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C2> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_secondary, - Some(inputs_secondary), - c_secondary, - pp.ro_consts_circuit_secondary.clone(), - self.num_augmented_circuits, - ); - let (_, zi_secondary) = circuit_secondary - .synthesize(&mut cs_secondary) - .map_err(NovaError::from)?; - if zi_secondary.len() != pp.circuit_shape_secondary.F_arity { - return Err(SuperNovaError::NovaError( - NovaError::InvalidInitialInputLength, - )); - } - - let (l_u_secondary_next, l_w_secondary_next) = cs_secondary - .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary)?; - - // update the running instances and witnesses - let zi_primary = zi_primary - .iter() - .map(|v| { - v.get_value() - .ok_or(NovaError::from(SynthesisError::AssignmentMissing).into()) - }) - .collect::::Scalar>, SuperNovaError>>()?; - let zi_primary_pc_next = zi_primary_pc_next - .expect("zi_primary_pc_next missing") - .get_value() - .ok_or::(NovaError::from(SynthesisError::AssignmentMissing).into())?; - let zi_secondary = zi_secondary - .iter() - .map(|v| { - v.get_value() - .ok_or(NovaError::from(SynthesisError::AssignmentMissing).into()) - }) - .collect:: as Engine>::Scalar>, SuperNovaError>>()?; - - if zi_primary.len() != pp[circuit_index].F_arity - || zi_secondary.len() != pp.circuit_shape_secondary.F_arity - { - return Err(SuperNovaError::NovaError( - NovaError::InvalidStepOutputLength, - )); - } - - self.l_w_secondary = l_w_secondary_next; - self.l_u_secondary = l_u_secondary_next; - self.i += 1; - self.zi_primary = zi_primary; - self.zi_secondary = zi_secondary; - self.proven_circuit_index = circuit_index; - self.program_counter = zi_primary_pc_next; - Ok(()) - } - - /// verify recursive snark - pub fn verify( - &self, - pp: &PublicParams, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result<(Vec, Vec< as Engine>::Scalar>), SuperNovaError> { - // number of steps cannot be zero - if self.i == 0 { - debug!("must verify on valid RecursiveSNARK where i > 0"); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - - // Check lengths of r_primary - if self.r_U_primary.len() != self.num_augmented_circuits - || self.r_W_primary.len() != self.num_augmented_circuits - { - debug!("r_primary length mismatch"); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - - // Check that there are no missing instance/witness pairs - self.r_U_primary - .iter() - .zip_eq(self.r_W_primary.iter()) - .enumerate() - .try_for_each(|(i, (u, w))| match (u, w) { - (Some(_), Some(_)) | (None, None) => Ok(()), - _ => { - debug!("r_primary[{:?}]: mismatched instance/witness pair", i); - Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)) - } - })?; - - let circuit_index = self.proven_circuit_index; - - // check we have an instance/witness pair for the circuit_index - if self.r_U_primary[circuit_index].is_none() { - debug!( - "r_primary[{:?}]: instance/witness pair is missing", - circuit_index - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - - // check the (relaxed) R1CS instances public outputs. - { - for (i, r_U_primary_i) in self.r_U_primary.iter().enumerate() { - if let Some(u) = r_U_primary_i { - if u.X.len() != 2 { - debug!( - "r_U_primary[{:?}] got instance length {:?} != 2", - i, - u.X.len(), - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - } - } - - if self.l_u_secondary.X.len() != 2 { - debug!( - "l_U_secondary got instance length {:?} != 2", - self.l_u_secondary.X.len(), - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - - if self.r_U_secondary.X.len() != 2 { - debug!( - "r_U_secondary got instance length {:?} != 2", - self.r_U_secondary.X.len(), - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - } - - let hash_primary = { - let num_absorbs = num_ro_inputs( - self.num_augmented_circuits, - pp.augmented_circuit_params_primary.get_n_limbs(), - pp[circuit_index].F_arity, - true, // is_primary - ); - - let mut hasher = - as Engine>::RO::new(pp.ro_consts_secondary.clone(), num_absorbs); - hasher.absorb(self.pp_digest); - hasher.absorb(E1::Scalar::from(self.i as u64)); - hasher.absorb(self.program_counter); - - for e in z0_primary { - hasher.absorb(*e); - } - for e in &self.zi_primary { - hasher.absorb(*e); - } - - self.r_U_secondary.absorb_in_ro(&mut hasher); - hasher.squeeze(NUM_HASH_BITS) - }; - - let hash_secondary = { - let num_absorbs = num_ro_inputs( - self.num_augmented_circuits, - pp.augmented_circuit_params_secondary.get_n_limbs(), - pp.circuit_shape_secondary.F_arity, - false, // is_primary - ); - let mut hasher = ::RO::new(pp.ro_consts_primary.clone(), num_absorbs); - hasher.absorb(scalar_as_base::(self.pp_digest)); - hasher.absorb( as Engine>::Scalar::from(self.i as u64)); - - for e in z0_secondary { - hasher.absorb(*e); - } - for e in &self.zi_secondary { - hasher.absorb(*e); - } - - self.r_U_primary.iter().enumerate().for_each(|(i, U)| { - U.as_ref() - .unwrap_or(&RelaxedR1CSInstance::default( - &*pp.ck_primary, - &pp[i].r1cs_shape, - )) - .absorb_in_ro(&mut hasher); - }); - hasher.squeeze(NUM_HASH_BITS) - }; - - if hash_primary != self.l_u_secondary.X[0] { - debug!( - "hash_primary {:?} not equal l_u_secondary.X[0] {:?}", - hash_primary, self.l_u_secondary.X[0] - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - if hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) { - debug!( - "hash_secondary {:?} not equal l_u_secondary.X[1] {:?}", - hash_secondary, self.l_u_secondary.X[1] - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - - // check the satisfiability of all instance/witness pairs - let (res_r_primary, (res_r_secondary, res_l_secondary)) = rayon::join( - || { - self.r_U_primary - .par_iter() - .zip_eq(self.r_W_primary.par_iter()) - .enumerate() - .try_for_each(|(i, (u, w))| { - if let (Some(u), Some(w)) = (u, w) { - pp[i].r1cs_shape.is_sat_relaxed(&pp.ck_primary, u, w)? - } - Ok(()) - }) - }, - || { - rayon::join( - || { - pp.circuit_shape_secondary.r1cs_shape.is_sat_relaxed( - &pp.ck_secondary, - &self.r_U_secondary, - &self.r_W_secondary, - ) - }, - || { - pp.circuit_shape_secondary.r1cs_shape.is_sat( - &pp.ck_secondary, - &self.l_u_secondary, - &self.l_w_secondary, - ) - }, - ) - }, - ); - - res_r_primary.map_err(|err| match err { - NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("r_primary", i), - e => SuperNovaError::NovaError(e), - })?; - res_r_secondary.map_err(|err| match err { - NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("r_secondary", i), - e => SuperNovaError::NovaError(e), - })?; - res_l_secondary.map_err(|err| match err { - NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("l_secondary", i), - e => SuperNovaError::NovaError(e), - })?; - - Ok((self.zi_primary.clone(), self.zi_secondary.clone())) - } -} - -/// SuperNova helper trait, for implementors that provide sets of sub-circuits -/// to be proved via NIVC. `C1` must be a type (likely an `Enum`) for which a -/// potentially-distinct instance can be supplied for each `index` below -/// `self.num_circuits()`. -pub trait NonUniformCircuit -where - E1: CurveCycleEquipped, -{ - /// The type of the step-circuits on the primary - type C1: StepCircuit; - /// The type of the step-circuits on the secondary - type C2: StepCircuit< as Engine>::Scalar>; - - /// Initial circuit index, defaults to zero. - fn initial_circuit_index(&self) -> usize { - 0 - } - - /// How many circuits are provided? - fn num_circuits(&self) -> usize; - - /// Return a new instance of the primary circuit at `index`. - fn primary_circuit(&self, circuit_index: usize) -> Self::C1; - - /// Return a new instance of the secondary circuit. - fn secondary_circuit(&self) -> Self::C2; -} - -/// Compute the circuit digest of a supernova [`StepCircuit`]. -/// -/// Note for callers: This function should be called with its performance -/// characteristics in mind. It will synthesize and digest the full `circuit` -/// given. -pub fn circuit_digest>( - circuit: &C, - num_augmented_circuits: usize, -) -> E1::Scalar { - let augmented_circuit_params = - SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - - // ro_consts_circuit are parameterized by E2 because the type alias uses - // E2::Base = E1::Scalar - let ro_consts_circuit = ROConstantsCircuit::>::default(); - - // Initialize ck for the primary - let augmented_circuit: SuperNovaAugmentedCircuit<'_, Dual, C> = - SuperNovaAugmentedCircuit::new( - &augmented_circuit_params, - None, - circuit, - ro_consts_circuit, - num_augmented_circuits, - ); - let mut cs: ShapeCS = ShapeCS::new(); - let _ = augmented_circuit.synthesize(&mut cs); - - let F_arity = circuit.arity(); - let circuit_params = R1CSWithArity::new(cs.r1cs_shape(), F_arity); - circuit_params.digest() -} - -/// Compute the number of absorbs for the random-oracle computing the circuit -/// output X = H(vk, i, pc, z0, zi, U) -fn num_ro_inputs(num_circuits: usize, num_limbs: usize, arity: usize, is_primary: bool) -> usize { - let num_circuits = if is_primary { 1 } else { num_circuits }; - - // [W(x,y,∞), E(x,y,∞), u] + [X0, X1] * #num_limb - let instance_size = 3 + 3 + 1 + 2 * num_limbs; - - 2 // params, i - + usize::from(is_primary) // optional program counter - + 2 * arity // z0, zi - + num_circuits * instance_size -} - -pub mod error; -pub mod snark; -mod utils; - -#[cfg(test)] -mod test; diff --git a/src/supernova/snark.rs b/src/supernova/snark.rs deleted file mode 100644 index d01f9b5..0000000 --- a/src/supernova/snark.rs +++ /dev/null @@ -1,663 +0,0 @@ -//! This module defines a final compressing SNARK for supernova proofs - -use ff::PrimeField; -use serde::{Deserialize, Serialize}; - -use super::{error::SuperNovaError, PublicParams, RecursiveSNARK}; -use crate::{ - constants::NUM_HASH_BITS, - errors::NovaError, - r1cs::{R1CSInstance, RelaxedR1CSWitness}, - scalar_as_base, - traits::{ - snark::{BatchedRelaxedR1CSSNARKTrait, RelaxedR1CSSNARKTrait}, - AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROTrait, - }, - RelaxedR1CSInstance, NIFS, -}; - -/// A type that holds the prover key for `CompressedSNARK` -#[derive(Debug, Serialize, Deserialize)] -pub struct ProverKey -where - E1: CurveCycleEquipped, - S1: BatchedRelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - pub pk_primary: S1::ProverKey, - pub pk_secondary: S2::ProverKey, -} - -/// A type that holds the verifier key for `CompressedSNARK` -#[derive(Debug, Serialize, Deserialize)] -pub struct VerifierKey -where - E1: CurveCycleEquipped, - S1: BatchedRelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - pub vk_primary: S1::VerifierKey, - pub vk_secondary: S2::VerifierKey, -} - -/// A SNARK that proves the knowledge of a valid `RecursiveSNARK` -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CompressedSNARK -where - E1: CurveCycleEquipped, - S1: BatchedRelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - r_U_primary: Vec>, - r_W_snark_primary: S1, - - r_U_secondary: RelaxedR1CSInstance>, - l_u_secondary: R1CSInstance>, - nifs_secondary: NIFS>, - f_W_snark_secondary: S2, - - num_steps: usize, - program_counter: E1::Scalar, - - zn_primary: Vec, - zn_secondary: Vec< as Engine>::Scalar>, -} - -impl CompressedSNARK -where - E1: CurveCycleEquipped, - S1: BatchedRelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - pub fn initialize_pk( - pp: &PublicParams, - primary_vk_digest: E1::Scalar, - secondary_vk_digest: as Engine>::Scalar, - ) -> Result, SuperNovaError> { - // TODO: Should we actually clone here? - let pk_primary = S1::initialize_pk(pp.ck_primary.clone(), primary_vk_digest)?; - let pk_secondary = S2::initialize_pk(pp.ck_secondary.clone(), secondary_vk_digest)?; - - return Ok(ProverKey { - pk_primary, - pk_secondary, - }); - } - - /// Creates prover and verifier keys for `CompressedSNARK` - pub fn setup( - pp: &PublicParams, - ) -> Result<(ProverKey, VerifierKey), SuperNovaError> { - let (pk_primary, vk_primary) = S1::setup(pp.ck_primary.clone(), pp.primary_r1cs_shapes())?; - - let (pk_secondary, vk_secondary) = S2::setup( - pp.ck_secondary.clone(), - &pp.circuit_shape_secondary.r1cs_shape, - )?; - - let prover_key = ProverKey { - pk_primary, - pk_secondary, - }; - let verifier_key = VerifierKey { - vk_primary, - vk_secondary, - }; - - Ok((prover_key, verifier_key)) - } - - /// Create a new `CompressedSNARK` - pub fn prove( - pp: &PublicParams, - pk: &ProverKey, - recursive_snark: &RecursiveSNARK, - ) -> Result { - // fold the secondary circuit's instance - let res_secondary = NIFS::prove( - &*pp.ck_secondary, - &pp.ro_consts_secondary, - &scalar_as_base::(pp.digest()), - &pp.circuit_shape_secondary.r1cs_shape, - &recursive_snark.r_U_secondary, - &recursive_snark.r_W_secondary, - &recursive_snark.l_u_secondary, - &recursive_snark.l_w_secondary, - ); - - let (nifs_secondary, (f_U_secondary, f_W_secondary), _) = res_secondary?; - - // Prepare the list of primary Relaxed R1CS instances (a default instance is - // provided for uninitialized circuits) - let r_U_primary = recursive_snark - .r_U_primary - .iter() - .enumerate() - .map(|(idx, r_U)| { - r_U.clone().unwrap_or_else(|| { - RelaxedR1CSInstance::default(&*pp.ck_primary, &pp[idx].r1cs_shape) - }) - }) - .collect::>(); - - // Prepare the list of primary relaxed R1CS witnesses (a default witness is - // provided for uninitialized circuits) - let r_W_primary: Vec> = recursive_snark - .r_W_primary - .iter() - .enumerate() - .map(|(idx, r_W)| { - r_W.clone() - .unwrap_or_else(|| RelaxedR1CSWitness::default(&pp[idx].r1cs_shape)) - }) - .collect::>(); - - // Generate a primary SNARK proof for the list of primary circuits - let r_W_snark_primary = S1::prove( - &pp.ck_primary, - &pk.pk_primary, - pp.primary_r1cs_shapes(), - &r_U_primary, - &r_W_primary, - )?; - - // Generate a secondary SNARK proof for the secondary circuit - let f_W_snark_secondary = S2::prove( - &pp.ck_secondary, - &pk.pk_secondary, - &pp.circuit_shape_secondary.r1cs_shape, - &f_U_secondary, - &f_W_secondary, - )?; - - let compressed_snark = Self { - r_U_primary, - r_W_snark_primary, - - r_U_secondary: recursive_snark.r_U_secondary.clone(), - l_u_secondary: recursive_snark.l_u_secondary.clone(), - nifs_secondary, - f_W_snark_secondary, - - num_steps: recursive_snark.i, - program_counter: recursive_snark.program_counter, - - zn_primary: recursive_snark.zi_primary.clone(), - zn_secondary: recursive_snark.zi_secondary.clone(), - }; - - Ok(compressed_snark) - } - - /// Verify the correctness of the `CompressedSNARK` - pub fn verify( - &self, - pp: &PublicParams, - vk: &VerifierKey, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result<(Vec, Vec< as Engine>::Scalar>), SuperNovaError> { - let last_circuit_idx = field_as_usize(self.program_counter); - - let num_field_primary_ro = 3 // params_next, i_new, program_counter_new - + 2 * pp[last_circuit_idx].F_arity // zo, z1 - + (7 + 2 * pp.augmented_circuit_params_primary.get_n_limbs()); // # 1 * (7 + [X0, X1]*#num_limb) - - // secondary circuit - // NOTE: This count ensure the number of witnesses sent by the prover must equal - // the number of NIVC circuits - let num_field_secondary_ro = 2 // params_next, i_new - + 2 * pp.circuit_shape_secondary.F_arity // zo, z1 - + pp.circuit_shapes.len() * (7 + 2 * pp.augmented_circuit_params_primary.get_n_limbs()); // #num_augment - - // Compute the primary and secondary hashes given the digest, program counter, - // instances, and witnesses provided by the prover - let (hash_primary, hash_secondary) = { - let mut hasher = - as Engine>::RO::new(pp.ro_consts_secondary.clone(), num_field_primary_ro); - - hasher.absorb(pp.digest()); - hasher.absorb(E1::Scalar::from(self.num_steps as u64)); - hasher.absorb(self.program_counter); - - for e in z0_primary { - hasher.absorb(*e); - } - - for e in &self.zn_primary { - hasher.absorb(*e); - } - - self.r_U_secondary.absorb_in_ro(&mut hasher); - - let mut hasher2 = - ::RO::new(pp.ro_consts_primary.clone(), num_field_secondary_ro); - - hasher2.absorb(scalar_as_base::(pp.digest())); - hasher2.absorb( as Engine>::Scalar::from(self.num_steps as u64)); - - for e in z0_secondary { - hasher2.absorb(*e); - } - - for e in &self.zn_secondary { - hasher2.absorb(*e); - } - - self.r_U_primary.iter().for_each(|U| { - U.absorb_in_ro(&mut hasher2); - }); - - ( - hasher.squeeze(NUM_HASH_BITS), - hasher2.squeeze(NUM_HASH_BITS), - ) - }; - - // Compare the computed hashes with the public IO of the last invocation of - // `prove_step` - if hash_primary != self.l_u_secondary.X[0] { - return Err(NovaError::ProofVerifyError.into()); - } - - if hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) { - return Err(NovaError::ProofVerifyError.into()); - } - - // Verify the primary SNARK - let res_primary = self - .r_W_snark_primary - .verify(&vk.vk_primary, &self.r_U_primary); - - // Fold the secondary circuit's instance - let f_U_secondary = self.nifs_secondary.verify( - &pp.ro_consts_secondary, - &scalar_as_base::(pp.digest()), - &self.r_U_secondary, - &self.l_u_secondary, - )?; - - // Verify the secondary SNARK - let res_secondary = self - .f_W_snark_secondary - .verify(&vk.vk_secondary, &f_U_secondary); - - res_primary?; - - res_secondary?; - - Ok((self.zn_primary.clone(), self.zn_secondary.clone())) - } -} - -fn field_as_usize(x: F) -> usize { - u32::from_le_bytes(x.to_repr().as_ref()[0..4].try_into().unwrap()) as usize -} - -#[cfg(test)] -mod test { - use std::marker::PhantomData; - - use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; - use ff::Field; - - use super::*; - use crate::{ - provider::{ipa_pc, Bn256EngineIPA}, - spartan::{batched, batched_ppsnark, snark::RelaxedR1CSSNARK}, - supernova::{circuit::TrivialCircuit, NonUniformCircuit, StepCircuit}, - }; - - type EE = ipa_pc::EvaluationEngine; - type S1 = batched::BatchedRelaxedR1CSSNARK>; - type S1PP = batched_ppsnark::BatchedRelaxedR1CSSNARK>; - type S2 = RelaxedR1CSSNARK>; - - #[derive(Clone)] - struct SquareCircuit { - _p: PhantomData, - } - - impl StepCircuit for SquareCircuit { - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - 0 - } - - fn synthesize>( - &self, - cs: &mut CS, - _pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result< - ( - Option>, - Vec>, - ), - SynthesisError, - > { - let z_i = &z[0]; - - let z_next = z_i.square(cs.namespace(|| "z_i^2"))?; - - let next_pc = - AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(1u64)))?; - - cs.enforce( - || "next_pc = 1", - |lc| lc + CS::one(), - |lc| lc + next_pc.get_variable(), - |lc| lc + CS::one(), - ); - - Ok((Some(next_pc), vec![z_next])) - } - } - - #[derive(Clone)] - struct CubeCircuit { - _p: PhantomData, - } - - impl StepCircuit for CubeCircuit { - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - 1 - } - - fn synthesize>( - &self, - cs: &mut CS, - _pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result< - ( - Option>, - Vec>, - ), - SynthesisError, - > { - let z_i = &z[0]; - - let z_sq = z_i.square(cs.namespace(|| "z_i^2"))?; - let z_cu = z_sq.mul(cs.namespace(|| "z_i^3"), z_i)?; - - let next_pc = - AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(0u64)))?; - - cs.enforce( - || "next_pc = 0", - |lc| lc + CS::one(), - |lc| lc + next_pc.get_variable(), - |lc| lc, - ); - - Ok((Some(next_pc), vec![z_cu])) - } - } - - #[derive(Clone)] - enum TestCircuit { - Square(SquareCircuit), - Cube(CubeCircuit), - } - - impl TestCircuit { - fn new(num_steps: usize) -> Vec { - let mut circuits = Vec::new(); - - for idx in 0..num_steps { - if idx % 2 == 0 { - circuits.push(Self::Square(SquareCircuit { _p: PhantomData })) - } else { - circuits.push(Self::Cube(CubeCircuit { _p: PhantomData })) - } - } - - circuits - } - } - - impl StepCircuit for TestCircuit { - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - match self { - Self::Square(c) => c.circuit_index(), - Self::Cube(c) => c.circuit_index(), - } - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result< - ( - Option>, - Vec>, - ), - SynthesisError, - > { - match self { - Self::Square(c) => c.synthesize(cs, pc, z), - Self::Cube(c) => c.synthesize(cs, pc, z), - } - } - } - - impl NonUniformCircuit for TestCircuit { - type C1 = Self; - type C2 = TrivialCircuit< as Engine>::Scalar>; - - fn num_circuits(&self) -> usize { - 2 - } - - fn primary_circuit(&self, circuit_index: usize) -> Self { - match circuit_index { - 0 => Self::Square(SquareCircuit { _p: PhantomData }), - 1 => Self::Cube(CubeCircuit { _p: PhantomData }), - _ => panic!("Invalid circuit index"), - } - } - - fn secondary_circuit(&self) -> Self::C2 { - Default::default() - } - } - - #[derive(Clone)] - struct BigPowerCircuit { - _p: PhantomData, - } - - impl StepCircuit for BigPowerCircuit { - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - 1 - } - - fn synthesize>( - &self, - cs: &mut CS, - _pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result< - ( - Option>, - Vec>, - ), - SynthesisError, - > { - let mut x = z[0].clone(); - let mut y = x.clone(); - for i in 0..10_000 { - y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; - x = y.clone(); - } - - let next_pc = - AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(0u64)))?; - - cs.enforce( - || "next_pc = 0", - |lc| lc + CS::one(), - |lc| lc + next_pc.get_variable(), - |lc| lc, - ); - - Ok((Some(next_pc), vec![y])) - } - } - - #[derive(Clone)] - enum BigTestCircuit { - Square(SquareCircuit), - BigPower(BigPowerCircuit), - } - - impl BigTestCircuit { - fn new(num_steps: usize) -> Vec { - let mut circuits = Vec::new(); - - for idx in 0..num_steps { - if idx % 2 == 0 { - circuits.push(Self::Square(SquareCircuit { _p: PhantomData })) - } else { - circuits.push(Self::BigPower(BigPowerCircuit { _p: PhantomData })) - } - } - - circuits - } - } - - impl StepCircuit for BigTestCircuit { - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - match self { - Self::Square(c) => c.circuit_index(), - Self::BigPower(c) => c.circuit_index(), - } - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result< - ( - Option>, - Vec>, - ), - SynthesisError, - > { - match self { - Self::Square(c) => c.synthesize(cs, pc, z), - Self::BigPower(c) => c.synthesize(cs, pc, z), - } - } - } - - impl NonUniformCircuit for BigTestCircuit { - type C1 = Self; - type C2 = TrivialCircuit< as Engine>::Scalar>; - - fn num_circuits(&self) -> usize { - 2 - } - - fn primary_circuit(&self, circuit_index: usize) -> Self { - match circuit_index { - 0 => Self::Square(SquareCircuit { _p: PhantomData }), - 1 => Self::BigPower(BigPowerCircuit { _p: PhantomData }), - _ => panic!("Invalid circuit index"), - } - } - - fn secondary_circuit(&self) -> Self::C2 { - Default::default() - } - } - - fn test_compression_with(num_steps: usize, circuits_factory: F) - where - E1: CurveCycleEquipped, - S1: BatchedRelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, - C: NonUniformCircuit as Engine>::Scalar>> - + StepCircuit, - F: Fn(usize) -> Vec, - { - let secondary_circuit = TrivialCircuit::default(); - let test_circuits = circuits_factory(num_steps); - - let pp = PublicParams::setup(&test_circuits[0], &*S1::ck_floor(), &*S2::ck_floor()); - - let z0_primary = vec![E1::Scalar::from(17u64)]; - let z0_secondary = vec![ as Engine>::Scalar::ZERO]; - - let mut recursive_snark = RecursiveSNARK::new( - &pp, - &test_circuits[0], - &test_circuits[0], - &secondary_circuit, - &z0_primary, - &z0_secondary, - ) - .unwrap(); - - for circuit in test_circuits.iter().take(num_steps) { - recursive_snark - .prove_step(&pp, circuit, &secondary_circuit) - .unwrap(); - - recursive_snark - .verify(&pp, &z0_primary, &z0_secondary) - .unwrap(); - } - - let (prover_key, verifier_key) = CompressedSNARK::<_, S1, S2>::setup(&pp).unwrap(); - - let compressed_snark = CompressedSNARK::prove(&pp, &prover_key, &recursive_snark).unwrap(); - - compressed_snark - .verify(&pp, &verifier_key, &z0_primary, &z0_secondary) - .unwrap(); - } - - #[test] - fn test_nivc_trivial_with_compression() { - const NUM_STEPS: usize = 6; - test_compression_with::, S2<_>, _, _>(NUM_STEPS, TestCircuit::new); - test_compression_with::, S2<_>, _, _>(NUM_STEPS, TestCircuit::new); - } - - #[test] - fn test_compression_with_circuit_size_difference() { - const NUM_STEPS: usize = 4; - test_compression_with::, S2<_>, _, _>( - NUM_STEPS, - BigTestCircuit::new, - ); - test_compression_with::, S2<_>, _, _>(NUM_STEPS, BigTestCircuit::new); - } -} diff --git a/src/supernova/test.rs b/src/supernova/test.rs deleted file mode 100644 index 722fda0..0000000 --- a/src/supernova/test.rs +++ /dev/null @@ -1,876 +0,0 @@ -use core::marker::PhantomData; -use std::fmt::Write; - -use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; -use expect_test::{expect, Expect}; -use ff::{Field, PrimeField}; -use tap::TapOptional; - -use super::{utils::get_selector_vec_from_index, *}; -use crate::{ - bellpepper::test_shape_cs::TestShapeCS, - gadgets::{alloc_one, alloc_zero}, - provider::{poseidon::PoseidonConstantsCircuit, Bn256EngineIPA, GrumpkinEngine}, - supernova::circuit::{StepCircuit, TrivialCircuit}, - traits::snark::default_ck_hint, -}; - -#[derive(Clone, Debug, Default)] -struct CubicCircuit { - _p: PhantomData, - circuit_index: usize, - rom_size: usize, -} - -impl CubicCircuit { - fn new(circuit_index: usize, rom_size: usize) -> Self { - Self { - circuit_index, - rom_size, - _p: PhantomData, - } - } -} - -fn next_rom_index_and_pc>( - cs: &mut CS, - rom_index: &AllocatedNum, - allocated_rom: &[AllocatedNum], - pc: &AllocatedNum, -) -> Result<(AllocatedNum, AllocatedNum), SynthesisError> { - // Compute a selector for the current rom_index in allocated_rom - let current_rom_selector = get_selector_vec_from_index( - cs.namespace(|| "rom selector"), - rom_index, - allocated_rom.len(), - )?; - - // Enforce that allocated_rom[rom_index] = pc - for (rom, bit) in allocated_rom.iter().zip_eq(current_rom_selector.iter()) { - // if bit = 1, then rom = pc - // bit * (rom - pc) = 0 - cs.enforce( - || "enforce bit = 1 => rom = pc", - |lc| lc + &bit.lc(CS::one(), F::ONE), - |lc| lc + rom.get_variable() - pc.get_variable(), - |lc| lc, - ); - } - - // Get the index of the current rom, or the index of the invalid rom if no match - let current_rom_index = current_rom_selector - .iter() - .position(|bit| bit.get_value().is_some_and(|v| v)) - .unwrap_or_default(); - let next_rom_index = current_rom_index + 1; - - let rom_index_next = AllocatedNum::alloc_infallible(cs.namespace(|| "next rom index"), || { - F::from(next_rom_index as u64) - }); - cs.enforce( - || " rom_index + 1 - next_rom_index_num = 0", - |lc| lc, - |lc| lc, - |lc| lc + rom_index.get_variable() + CS::one() - rom_index_next.get_variable(), - ); - - // Allocate the next pc without checking. - // The next iteration will check whether the next pc is valid. - let pc_next = AllocatedNum::alloc_infallible(cs.namespace(|| "next pc"), || { - allocated_rom - .get(next_rom_index) - .and_then(|v| v.get_value()) - .unwrap_or(-F::ONE) - }); - - Ok((rom_index_next, pc_next)) -} - -impl StepCircuit for CubicCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 2 + self.rom_size // value + rom_pc + rom[].len() - } - - fn circuit_index(&self) -> usize { - self.circuit_index - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - let rom_index = &z[1]; - let allocated_rom = &z[2..]; - - let (rom_index_next, pc_next) = next_rom_index_and_pc( - &mut cs.namespace(|| "next and rom_index and pc"), - rom_index, - allocated_rom, - pc.ok_or(SynthesisError::AssignmentMissing)?, - )?; - - // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are - // respectively the input and output. - let x = &z[0]; - let x_sq = x.square(cs.namespace(|| "x_sq"))?; - let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + F::from(5u64)) - })?; - - cs.enforce( - || "y = x^3 + x + 5", - |lc| { - lc + x_cu.get_variable() - + x.get_variable() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - }, - |lc| lc + CS::one(), - |lc| lc + y.get_variable(), - ); - - let mut z_next = vec![y]; - z_next.push(rom_index_next); - z_next.extend(z[2..].iter().cloned()); - Ok((Some(pc_next), z_next)) - } -} - -#[derive(Clone, Debug, Default)] -struct SquareCircuit { - _p: PhantomData, - circuit_index: usize, - rom_size: usize, -} - -impl SquareCircuit { - fn new(circuit_index: usize, rom_size: usize) -> Self { - Self { - circuit_index, - rom_size, - _p: PhantomData, - } - } -} - -impl StepCircuit for SquareCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 2 + self.rom_size // value + rom_pc + rom[].len() - } - - fn circuit_index(&self) -> usize { - self.circuit_index - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - let rom_index = &z[1]; - let allocated_rom = &z[2..]; - - let (rom_index_next, pc_next) = next_rom_index_and_pc( - &mut cs.namespace(|| "next and rom_index and pc"), - rom_index, - allocated_rom, - pc.ok_or(SynthesisError::AssignmentMissing)?, - )?; - - // Consider an equation: `x^2 + x + 5 = y`, where `x` and `y` are respectively - // the input and output. - let x = &z[0]; - let x_sq = x.square(cs.namespace(|| "x_sq"))?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(x_sq.get_value().unwrap() + x.get_value().unwrap() + F::from(5u64)) - })?; - - cs.enforce( - || "y = x^2 + x + 5", - |lc| { - lc + x_sq.get_variable() - + x.get_variable() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - }, - |lc| lc + CS::one(), - |lc| lc + y.get_variable(), - ); - - let mut z_next = vec![y]; - z_next.push(rom_index_next); - z_next.extend(z[2..].iter().cloned()); - Ok((Some(pc_next), z_next)) - } -} - -fn print_constraints_name_on_error_index< - E1, - C1: StepCircuit, - C2: StepCircuit< as Engine>::Scalar>, ->( - err: &SuperNovaError, - pp: &PublicParams, - c_primary: &C1, - c_secondary: &C2, - num_augmented_circuits: usize, -) where - E1: CurveCycleEquipped, -{ - match err { - SuperNovaError::UnSatIndex(msg, index) if *msg == "r_primary" => { - let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C1> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_primary, - None, - c_primary, - pp.ro_consts_circuit_primary.clone(), - num_augmented_circuits, - ); - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = circuit_primary.synthesize(&mut cs); - cs.constraints - .get(*index) - .tap_some(|constraint| debug!("{msg} failed at constraint {}", constraint.3)); - } - SuperNovaError::UnSatIndex(msg, index) - if *msg == "r_secondary" || *msg == "l_secondary" => - { - let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C2> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_secondary, - None, - c_secondary, - pp.ro_consts_circuit_secondary.clone(), - num_augmented_circuits, - ); - let mut cs: TestShapeCS> = TestShapeCS::new(); - let _ = circuit_secondary.synthesize(&mut cs); - cs.constraints - .get(*index) - .tap_some(|constraint| debug!("{msg} failed at constraint {}", constraint.3)); - } - _ => (), - } -} - -const OPCODE_0: usize = 0; -const OPCODE_1: usize = 1; - -struct TestROM { - rom: Vec, - _p: PhantomData, -} - -#[derive(Debug, Clone)] -enum TestROMCircuit { - Cubic(CubicCircuit), - Square(SquareCircuit), -} - -impl StepCircuit for TestROMCircuit { - fn arity(&self) -> usize { - match self { - Self::Cubic(x) => x.arity(), - Self::Square(x) => x.arity(), - } - } - - fn circuit_index(&self) -> usize { - match self { - Self::Cubic(x) => x.circuit_index(), - Self::Square(x) => x.circuit_index(), - } - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - match self { - Self::Cubic(x) => x.synthesize(cs, pc, z), - Self::Square(x) => x.synthesize(cs, pc, z), - } - } -} - -impl NonUniformCircuit for TestROM -where - E1: CurveCycleEquipped, -{ - type C1 = TestROMCircuit; - type C2 = TrivialCircuit< as Engine>::Scalar>; - - fn num_circuits(&self) -> usize { - 2 - } - - fn primary_circuit(&self, circuit_index: usize) -> Self::C1 { - match circuit_index { - 0 => TestROMCircuit::Cubic(CubicCircuit::new(circuit_index, self.rom.len())), - 1 => TestROMCircuit::Square(SquareCircuit::new(circuit_index, self.rom.len())), - _ => panic!("unsupported primary circuit index"), - } - } - - fn secondary_circuit(&self) -> Self::C2 { - Default::default() - } - - fn initial_circuit_index(&self) -> usize { - self.rom[0] - } -} - -impl TestROM { - fn new(rom: Vec) -> Self { - Self { - rom, - _p: Default::default(), - } - } -} - -fn test_trivial_nivc_with() -where - E1: CurveCycleEquipped, -{ - // Here demo a simple RAM machine - // - with 2 argumented circuit - // - each argumented circuit contains primary and secondary circuit - // - a memory commitment via a public IO `rom` (like a program) to constraint - // the sequence execution - - // This test also ready to add more argumented circuit and ROM can be arbitrary - // length - - // ROM is for constraints the sequence of execution order for opcode - - // TODO: replace with memory commitment along with suggestion from Supernova 4.4 - // optimisations - - // This is mostly done with the existing Nova code. With additions of U_i[] and - // program_counter checks in the augmented circuit. - - let rom = vec![ - OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, - OPCODE_1, - ]; // Rom can be arbitrary length. - - let test_rom = TestROM::::new(rom); - - let pp = PublicParams::setup(&test_rom, &*default_ck_hint(), &*default_ck_hint()); - - // extend z0_primary/secondary with rom content - let mut z0_primary = vec![::Scalar::ONE]; - z0_primary.push(::Scalar::ZERO); // rom_index = 0 - z0_primary.extend( - test_rom - .rom - .iter() - .map(|opcode| ::Scalar::from(*opcode as u64)), - ); - let z0_secondary = vec![ as Engine>::Scalar::ONE]; - - let mut recursive_snark_option: Option> = None; - - for &op_code in test_rom.rom.iter() { - let circuit_primary = test_rom.primary_circuit(op_code); - let circuit_secondary = test_rom.secondary_circuit(); - - let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { - RecursiveSNARK::new( - &pp, - &test_rom, - &circuit_primary, - &circuit_secondary, - &z0_primary, - &z0_secondary, - ) - .unwrap() - }); - - recursive_snark - .prove_step(&pp, &circuit_primary, &circuit_secondary) - .unwrap(); - recursive_snark - .verify(&pp, &z0_primary, &z0_secondary) - .map_err(|err| { - print_constraints_name_on_error_index( - &err, - &pp, - &circuit_primary, - &circuit_secondary, - test_rom.num_circuits(), - ) - }) - .unwrap(); - - recursive_snark_option = Some(recursive_snark) - } - - assert!(recursive_snark_option.is_some()); - - // Now you can handle the Result using if let - let RecursiveSNARK { - zi_primary, - zi_secondary, - program_counter, - .. - } = &recursive_snark_option.unwrap(); - - println!("zi_primary: {:?}", zi_primary); - println!("zi_secondary: {:?}", zi_secondary); - println!("final program_counter: {:?}", program_counter); - - // The final program counter should be -1 - assert_eq!(*program_counter, -::Scalar::ONE); -} - -#[test] -#[tracing_test::traced_test] -fn test_trivial_nivc() { - // Experimenting with selecting the running claims for nifs - test_trivial_nivc_with::(); -} - -// In the following we use 1 to refer to the primary, and 2 to refer to the -// secondary circuit -fn test_recursive_circuit_with( - primary_params: &SuperNovaAugmentedCircuitParams, - secondary_params: &SuperNovaAugmentedCircuitParams, - ro_consts1: ROConstantsCircuit>, - ro_consts2: ROConstantsCircuit, - num_constraints_primary: &Expect, - num_constraints_secondary: &Expect, -) where - E1: CurveCycleEquipped, -{ - // Initialize the shape and ck for the primary - let step_circuit1 = TrivialCircuit::default(); - let arity1 = step_circuit1.arity(); - let circuit1: SuperNovaAugmentedCircuit< - '_, - Dual, - TrivialCircuit< as Engine>::Base>, - > = SuperNovaAugmentedCircuit::new(primary_params, None, &step_circuit1, ro_consts1.clone(), 2); - let mut cs: ShapeCS = ShapeCS::new(); - if let Err(e) = circuit1.synthesize(&mut cs) { - panic!("{}", e) - } - let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); - num_constraints_primary.assert_eq(&cs.num_constraints().to_string()); - - // Initialize the shape and ck for the secondary - let step_circuit2 = TrivialCircuit::default(); - let arity2 = step_circuit2.arity(); - let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = - SuperNovaAugmentedCircuit::new( - secondary_params, - None, - &step_circuit2, - ro_consts2.clone(), - 2, - ); - let mut cs: ShapeCS> = ShapeCS::new(); - if let Err(e) = circuit2.synthesize(&mut cs) { - panic!("{}", e) - } - let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); - num_constraints_secondary.assert_eq(&cs.num_constraints().to_string()); - - // Execute the base case for the primary - let zero1 = < as Engine>::Base as Field>::ZERO; - let z0 = vec![zero1; arity1]; - let mut cs1 = SatisfyingAssignment::::new(); - let inputs1: SuperNovaAugmentedCircuitInputs<'_, Dual> = - SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::(zero1), // pass zero for testing - zero1, - &z0, - None, - None, - None, - None, - Some(zero1), - zero1, - ); - let step_circuit = TrivialCircuit::default(); - let circuit1: SuperNovaAugmentedCircuit< - '_, - Dual, - TrivialCircuit< as Engine>::Base>, - > = SuperNovaAugmentedCircuit::new(primary_params, Some(inputs1), &step_circuit, ro_consts1, 2); - if let Err(e) = circuit1.synthesize(&mut cs1) { - panic!("{}", e) - } - let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, &ck1).unwrap(); - // Make sure that this is satisfiable - shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); - - // Execute the base case for the secondary - let zero2 = <::Base as Field>::ZERO; - let z0 = vec![zero2; arity2]; - let mut cs2 = SatisfyingAssignment::>::new(); - let inputs2: SuperNovaAugmentedCircuitInputs<'_, E1> = SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::>(zero2), // pass zero for testing - zero2, - &z0, - None, - None, - Some(&inst1), - None, - None, - zero2, - ); - let step_circuit = TrivialCircuit::default(); - let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = - SuperNovaAugmentedCircuit::new( - secondary_params, - Some(inputs2), - &step_circuit, - ro_consts2, - 2, - ); - if let Err(e) = circuit2.synthesize(&mut cs2) { - panic!("{}", e) - } - let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, &ck2).unwrap(); - // Make sure that it is satisfiable - shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); -} - -#[test] -fn test_recursive_circuit() { - let params1 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - let params2 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); - let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); - - test_recursive_circuit_with::( - ¶ms1, - ¶ms2, - ro_consts1, - ro_consts2, - &expect!["9836"], - &expect!["12017"], - ); -} - -fn test_pp_digest_with(non_uniform_circuit: &NC, expected: &Expect) -where - E1: CurveCycleEquipped, - NC: NonUniformCircuit, -{ - // TODO: add back in https://github.com/argumentcomputer/arecibo/issues/53 - // // this tests public parameters with a size specifically intended for a - // spark-compressed SNARK let pp_hint1 = - // Some(SPrime::::commitment_key_floor()); let pp_hint2 = - // Some(SPrime::::commitment_key_floor()); - let pp = PublicParams::::setup( - non_uniform_circuit, - &*default_ck_hint(), - &*default_ck_hint(), - ); - - let digest_str = pp - .digest() - .to_repr() - .as_ref() - .iter() - .fold(String::new(), |mut output, b| { - let _ = write!(output, "{b:02x}"); - output - }); - expected.assert_eq(&digest_str); -} - -#[test] -fn test_supernova_pp_digest() { - let rom = vec![ - OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, - OPCODE_1, - ]; // Rom can be arbitrary length. - let test_rom_grumpkin = TestROM::::new(rom); - - test_pp_digest_with::( - &test_rom_grumpkin, - &expect!["30418e576c11dd698054a6cc69d1b1e43ddf0f562abfb50b777147afad741a01"], - ); -} - -// y is a non-deterministic hint representing the cube root of the input at a -// step. -#[derive(Clone, Debug)] -struct CubeRootCheckingCircuit { - y: Option, -} - -impl StepCircuit for CubeRootCheckingCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - 0 - } - - fn synthesize>( - &self, - cs: &mut CS, - _pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - let x = &z[0]; - - // we allocate a variable and set it to the provided non-deterministic hint. - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - self.y.ok_or(SynthesisError::AssignmentMissing) - })?; - - // We now check if y = x^{1/3} by checking if y^3 = x - let y_sq = y.square(cs.namespace(|| "y_sq"))?; - let y_cube = y_sq.mul(cs.namespace(|| "y_cube"), &y)?; - - cs.enforce( - || "y^3 = x", - |lc| lc + y_cube.get_variable(), - |lc| lc + CS::one(), - |lc| lc + x.get_variable(), - ); - - let next_pc = alloc_one(&mut cs.namespace(|| "next_pc")); - - Ok((Some(next_pc), vec![y])) - } -} - -// y is a non-deterministic hint representing the fifth root of the input at a -// step. -#[derive(Clone, Debug)] -struct FifthRootCheckingCircuit { - y: Option, -} - -impl StepCircuit for FifthRootCheckingCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - 1 - } - - fn synthesize>( - &self, - cs: &mut CS, - _pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - let x = &z[0]; - - // we allocate a variable and set it to the provided non-deterministic hint. - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - self.y.ok_or(SynthesisError::AssignmentMissing) - })?; - - // We now check if y = x^{1/5} by checking if y^5 = x - let y_sq = y.square(cs.namespace(|| "y_sq"))?; - let y_quad = y_sq.square(cs.namespace(|| "y_quad"))?; - let y_pow_5 = y_quad.mul(cs.namespace(|| "y_fifth"), &y)?; - - cs.enforce( - || "y^5 = x", - |lc| lc + y_pow_5.get_variable(), - |lc| lc + CS::one(), - |lc| lc + x.get_variable(), - ); - - let next_pc = alloc_zero(&mut cs.namespace(|| "next_pc")); - - Ok((Some(next_pc), vec![y])) - } -} - -#[derive(Clone, Debug)] -enum RootCheckingCircuit { - Cube(CubeRootCheckingCircuit), - Fifth(FifthRootCheckingCircuit), -} - -impl RootCheckingCircuit { - fn new(num_steps: usize) -> (Vec, Vec) { - let mut powers = Vec::new(); - let rng = &mut rand::rngs::OsRng; - let mut seed = F::random(rng); - - for i in 0..num_steps + 1 { - let seed_sq = seed.clone().square(); - // Cube-root and fifth-root circuits alternate. We compute the hints backward, - // so the calculations appear to be associated with the 'wrong' - // circuit. The final circuit is discarded, and only the final seed is used (as - // z_0). - powers.push(if i % 2 == num_steps % 2 { - seed *= seed_sq; - Self::Fifth(FifthRootCheckingCircuit { y: Some(seed) }) - } else { - seed *= seed_sq.clone().square(); - Self::Cube(CubeRootCheckingCircuit { y: Some(seed) }) - }) - } - - // reverse the powers to get roots - let roots = powers.into_iter().rev().collect::>(); - (vec![roots[0].get_y().unwrap()], roots[1..].to_vec()) - } - - fn get_y(&self) -> Option { - match self { - Self::Fifth(x) => x.y, - Self::Cube(x) => x.y, - } - } -} - -impl StepCircuit for RootCheckingCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - match self { - Self::Cube(x) => x.circuit_index(), - Self::Fifth(x) => x.circuit_index(), - } - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - match self { - Self::Cube(c) => c.synthesize(cs, pc, z), - Self::Fifth(c) => c.synthesize(cs, pc, z), - } - } -} - -impl NonUniformCircuit for RootCheckingCircuit -where - E1: CurveCycleEquipped, -{ - type C1 = Self; - type C2 = TrivialCircuit< as Engine>::Scalar>; - - fn num_circuits(&self) -> usize { - 2 - } - - fn primary_circuit(&self, circuit_index: usize) -> Self { - match circuit_index { - 0 => Self::Cube(CubeRootCheckingCircuit { y: None }), - 1 => Self::Fifth(FifthRootCheckingCircuit { y: None }), - _ => unreachable!(), - } - } - - fn secondary_circuit(&self) -> Self::C2 { - TrivialCircuit::::default() - } -} - -fn test_nivc_nondet_with() -where - E1: CurveCycleEquipped, -{ - let circuit_secondary = TrivialCircuit::default(); - - let num_steps = 3; - - // produce non-deterministic hint - let (z0_primary, roots) = RootCheckingCircuit::new(num_steps); - assert_eq!(num_steps, roots.len()); - let z0_secondary = vec![ as Engine>::Scalar::ZERO]; - - // produce public parameters - let pp = PublicParams::::setup(&roots[0], &*default_ck_hint(), &*default_ck_hint()); - // produce a recursive SNARK - - let circuit_primary = &roots[0]; - - let mut recursive_snark = RecursiveSNARK::::new( - &pp, - circuit_primary, - circuit_primary, - &circuit_secondary, - &z0_primary, - &z0_secondary, - ) - .map_err(|err| { - print_constraints_name_on_error_index(&err, &pp, circuit_primary, &circuit_secondary, 2) - }) - .unwrap(); - - for circuit_primary in roots.iter().take(num_steps) { - let res = recursive_snark.prove_step(&pp, circuit_primary, &circuit_secondary); - assert!(res - .map_err(|err| { - print_constraints_name_on_error_index( - &err, - &pp, - circuit_primary, - &circuit_secondary, - 2, - ) - }) - .is_ok()); - - // verify the recursive SNARK - recursive_snark - .verify(&pp, &z0_primary, &z0_secondary) - .map_err(|err| { - print_constraints_name_on_error_index( - &err, - &pp, - circuit_primary, - &circuit_secondary, - 2, - ) - }) - .unwrap(); - } -} - -#[test] -fn test_nivc_nondet() { - test_nivc_nondet_with::(); -} diff --git a/src/traits/commitment.rs b/src/traits/commitment.rs deleted file mode 100644 index 87487fb..0000000 --- a/src/traits/commitment.rs +++ /dev/null @@ -1,89 +0,0 @@ -//! This module defines a collection of traits that define the behavior of a -//! commitment engine We require the commitment engine to provide a commitment -//! to vectors with a single group element -use core::{ - fmt::Debug, - ops::{Add, Mul, MulAssign}, -}; - -use serde::{Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - traits::{AbsorbInROTrait, Engine, TranscriptReprTrait}, -}; - -/// A helper trait for types implementing scalar multiplication. -pub trait ScalarMul: Mul + MulAssign {} - -impl ScalarMul for T where T: Mul + MulAssign -{} - -/// This trait defines the behavior of the commitment -pub trait CommitmentTrait: - Clone - + Copy - + Debug - + Default - + PartialEq - + Eq - + Send - + Sync - + TranscriptReprTrait - + Serialize - + for<'de> Deserialize<'de> - + AbsorbInROTrait - + Add - + ScalarMul -{ - /// Holds the type of the compressed commitment - type CompressedCommitment: Clone - + Debug - + PartialEq - + Eq - + Send - + Sync - + TranscriptReprTrait - + Serialize - + for<'de> Deserialize<'de>; - - /// Compresses self into a compressed commitment - fn compress(&self) -> Self::CompressedCommitment; - - /// Returns the coordinate representation of the commitment - fn to_coordinates(&self) -> (E::Base, E::Base, bool); - - /// Decompresses a compressed commitment into a commitment - fn decompress(c: &Self::CompressedCommitment) -> Result; -} - -/// A trait that helps determine the length of a structure. -/// Note this does not impose any memory representation constraints on the -/// structure. -pub trait Len { - /// Returns the length of the structure. - fn length(&self) -> usize; -} - -/// A trait that ties different pieces of the commitment generation together -pub trait CommitmentEngineTrait: Clone + Send + Sync { - /// Holds the type of the commitment key - /// The key should quantify its length in terms of group generators. - type CommitmentKey: Len - + Clone - + PartialEq - + Debug - + Send - + Sync - + Serialize - + for<'de> Deserialize<'de>; - - /// Holds the type of the commitment - type Commitment: CommitmentTrait; - - /// Samples a new commitment key of a specified size - fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey; - - /// Commits to the provided vector using the provided generators - fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment; -} diff --git a/src/traits/evaluation.rs b/src/traits/evaluation.rs deleted file mode 100644 index e7d7537..0000000 --- a/src/traits/evaluation.rs +++ /dev/null @@ -1,60 +0,0 @@ -//! This module defines a collection of traits that define the behavior of a -//! polynomial evaluation engine A vector of size N is treated as a multilinear -//! polynomial in \log{N} variables, and a commitment provided by the commitment -//! engine is treated as a multilinear polynomial commitment -use std::sync::Arc; - -use serde::{Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - traits::{commitment::CommitmentEngineTrait, Engine}, -}; - -/// A trait that ties different pieces of the commitment evaluation together -pub trait EvaluationEngineTrait: Clone + Send + Sync { - /// A type that holds the prover key - type ProverKey: Send + Sync + Serialize + for<'de> Deserialize<'de>; - - /// A type that holds the verifier key - type VerifierKey: Send - + Sync - // required for easy Digest computation purposes, could be relaxed to - // [`crate::digest::Digestible`] - + Serialize - + for<'de> Deserialize<'de>; - - /// A type that holds the evaluation argument - type EvaluationArgument: Clone + Send + Sync + Serialize + for<'de> Deserialize<'de>; - - /// A method to perform any additional setup needed to produce proofs of - /// evaluations - /// - /// **Note:** This method should be cheap and should not copy most of the - /// commitment key. Look at `CommitmentEngineTrait::setup` for generating - /// SRS data. - fn setup( - ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, - ) -> (Self::ProverKey, Self::VerifierKey); - - /// A method to prove the evaluation of a multilinear polynomial - fn prove( - ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, - pk: &Self::ProverKey, - transcript: &mut E::TE, - comm: &<::CE as CommitmentEngineTrait>::Commitment, - poly: &[E::Scalar], - point: &[E::Scalar], - eval: &E::Scalar, - ) -> Result; - - /// A method to verify the purported evaluation of a multilinear polynomials - fn verify( - vk: &Self::VerifierKey, - transcript: &mut E::TE, - comm: &<::CE as CommitmentEngineTrait>::Commitment, - point: &[E::Scalar], - eval: &E::Scalar, - arg: &Self::EvaluationArgument, - ) -> Result<(), NovaError>; -} diff --git a/src/traits/mod.rs b/src/traits/mod.rs deleted file mode 100644 index 1ad5602..0000000 --- a/src/traits/mod.rs +++ /dev/null @@ -1,192 +0,0 @@ -//! This module defines various traits required by the users of the library to -//! implement. -use core::fmt::Debug; - -use bellpepper_core::{boolean::AllocatedBit, num::AllocatedNum, ConstraintSystem, SynthesisError}; -use ff::{PrimeField, PrimeFieldBits}; -use num_bigint::BigInt; -use serde::{Deserialize, Serialize}; - -use crate::errors::NovaError; - -pub mod commitment; - -use commitment::CommitmentEngineTrait; - -/// Represents an element of a group -/// This is currently tailored for an elliptic curve group -pub trait Group: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { - /// A type representing an element of the base field of the group - type Base: PrimeFieldBits + Serialize + for<'de> Deserialize<'de>; - - /// A type representing an element of the scalar field of the group - type Scalar: PrimeFieldBits - + PrimeFieldExt - + Send - + Sync - + Serialize - + for<'de> Deserialize<'de>; - - /// Returns A, B, the order of the group, the size of the base field as big - /// integers - fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt); -} - -/// A collection of engines that are required by the library -pub trait Engine: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { - /// A type representing an element of the base field of the group - type Base: PrimeFieldBits - + TranscriptReprTrait - + Serialize - + for<'de> Deserialize<'de>; - - /// A type representing an element of the scalar field of the group - type Scalar: PrimeFieldBits - + PrimeFieldExt - + Send - + Sync - + TranscriptReprTrait - + Serialize - + for<'de> Deserialize<'de>; - - /// A type that represents an element of the group - type GE: Group + Serialize + for<'de> Deserialize<'de>; - - /// A type that represents a circuit-friendly sponge that consumes elements - /// from the base field and squeezes out elements of the scalar field - type RO: ROTrait; - - /// An alternate implementation of `Self::RO` in the circuit model - type ROCircuit: ROCircuitTrait; - - /// A type that provides a generic Fiat-Shamir transcript to be used when - /// externalizing proofs - type TE: TranscriptEngineTrait; - - /// A type that defines a commitment engine over scalars in the group - type CE: CommitmentEngineTrait; -} - -/// This is a convenience trait to pair engines which fields are in a curve -/// cycle relationship -pub trait CurveCycleEquipped: Engine { - /// The secondary `Engine` of `Self` - type Secondary: Engine::Scalar, Scalar = ::Base>; -} - -/// Convenience projection to the secondary `Engine` of a `CurveCycleEquipped` -pub type Dual = ::Secondary; - -/// A helper trait to absorb different objects in RO -pub trait AbsorbInROTrait { - /// Absorbs the value in the provided RO - fn absorb_in_ro(&self, ro: &mut E::RO); -} - -/// A helper trait that defines the behavior of a hash function that we use as -/// an RO -pub trait ROTrait { - /// The circuit alter ego of this trait impl - this constrains it to use the - /// same constants - type CircuitRO: ROCircuitTrait; - - /// A type representing constants/parameters associated with the hash - /// function - type Constants: Debug - + Default - + Clone - + PartialEq - + Send - + Sync - + Serialize - + for<'de> Deserialize<'de>; - - /// Initializes the hash function - fn new(constants: Self::Constants, num_absorbs: usize) -> Self; - - /// Adds a scalar to the internal state - fn absorb(&mut self, e: Base); - - /// Returns a challenge of `num_bits` by hashing the internal state - fn squeeze(&mut self, num_bits: usize) -> Scalar; -} - -/// A helper trait that defines the behavior of a hash function that we use as -/// an RO in the circuit model -pub trait ROCircuitTrait { - /// the vanilla alter ego of this trait - this constrains it to use the same - /// constants - type NativeRO: ROTrait; - - /// A type representing constants/parameters associated with the hash - /// function on this Base field - type Constants: Debug - + Default - + Clone - + PartialEq - + Send - + Sync - + Serialize - + for<'de> Deserialize<'de>; - - /// Initializes the hash function - fn new(constants: Self::Constants, num_absorbs: usize) -> Self; - - /// Adds a scalar to the internal state - fn absorb(&mut self, e: &AllocatedNum); - - /// Returns a challenge of `num_bits` by hashing the internal state - fn squeeze>( - &mut self, - cs: CS, - num_bits: usize, - ) -> Result, SynthesisError>; -} - -/// An alias for constants associated with `E::RO` -pub type ROConstants = - <::RO as ROTrait<::Base, ::Scalar>>::Constants; - -/// An alias for constants associated with `E::ROCircuit` -pub type ROConstantsCircuit = - <::ROCircuit as ROCircuitTrait<::Base>>::Constants; - -/// This trait allows types to implement how they want to be added to -/// `TranscriptEngine` -pub trait TranscriptReprTrait: Send + Sync { - /// returns a byte representation of self to be added to the transcript - fn to_transcript_bytes(&self) -> Vec; -} - -/// This trait defines the behavior of a transcript engine compatible with -/// Spartan -pub trait TranscriptEngineTrait: Send + Sync { - /// initializes the transcript - fn new(label: &'static [u8]) -> Self; - - /// returns a scalar element of the group as a challenge - fn squeeze(&mut self, label: &'static [u8]) -> Result; - - /// absorbs any type that implements `TranscriptReprTrait` under a label - fn absorb>(&mut self, label: &'static [u8], o: &T); - - /// adds a domain separator - fn dom_sep(&mut self, bytes: &'static [u8]); -} - -/// Defines additional methods on `PrimeField` objects -pub trait PrimeFieldExt: PrimeField { - /// Returns a scalar representing the bytes - fn from_uniform(bytes: &[u8]) -> Self; -} - -impl> TranscriptReprTrait for &[T] { - fn to_transcript_bytes(&self) -> Vec { - self.iter() - .flat_map(|t| t.to_transcript_bytes()) - .collect::>() - } -} - -pub mod evaluation; -pub mod snark; diff --git a/src/traits/snark.rs b/src/traits/snark.rs deleted file mode 100644 index 8e6f93a..0000000 --- a/src/traits/snark.rs +++ /dev/null @@ -1,137 +0,0 @@ -//! This module defines a collection of traits that define the behavior of a -//! `zkSNARK` for `RelaxedR1CS` -use std::sync::Arc; - -use serde::{Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, - traits::Engine, - CommitmentKey, -}; - -// NOTES: This function seems heavily reliant on dynamic allocation all to -// return 0 in the end... - -/// Public parameter creation takes a size hint. This size hint carries the -/// particular requirements of the final compressing SNARK the user expected to -/// use with these public parameters, and the below is a sensible default, which -/// is to not require any more bases then the usual (maximum of the number of -/// variables and constraints of the involved R1CS circuit). -pub fn default_ck_hint() -> Box Fn(&'a R1CSShape) -> usize> { - // The default is to not put an additional floor on the size of the commitment - // key - Box::new(|_shape: &R1CSShape| 0) -} - -// NOTES: I'm not sure having a trait here is really necessary unless you're -// wanting to have a much larger abstraction. I'd consider just gutting this and -// forming one SNARK that we use. - -/// A trait that defines the behavior of a `zkSNARK` -pub trait RelaxedR1CSSNARKTrait: - Send + Sync + Serialize + for<'de> Deserialize<'de> -{ - /// A type that represents the prover's key - type ProverKey: Send + Sync; - - /// A type that represents the verifier's key - type VerifierKey: Send + Sync + Serialize; - - /// This associated function (not a method) provides a hint that offers - /// a minimum sizing cue for the commitment key used by this SNARK - /// implementation. The commitment key passed in setup should then - /// be at least as large as this hint. - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - // The default is to not put an additional floor on the size of the commitment - // key - default_ck_hint() - } - - /// Initialize a ProvingKey directly from a CommitmentKey and a - /// already known vk_digest. - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result; - - /// Produces the keys for the prover and the verifier - fn setup( - ck: Arc>, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; - - /// Produces a new SNARK for a relaxed R1CS - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result; - - /// Verifies a SNARK for a relaxed R1CS - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError>; -} - -/// A trait that defines the behavior of a `zkSNARK` to prove knowledge of -/// satisfying witness to batches of relaxed R1CS instances. -pub trait BatchedRelaxedR1CSSNARKTrait: - Send + Sync + Serialize + for<'de> Deserialize<'de> -{ - /// A type that represents the prover's key - type ProverKey: Send + Sync + Serialize + for<'de> Deserialize<'de>; - - /// A type that represents the verifier's key - type VerifierKey: Send + Sync + DigestHelperTrait + Serialize + for<'de> Deserialize<'de>; - - // NOTES: If we don't need something more general here, this is just an odd - // thing to have defined generically since it just calls the weird function - // above. - - /// This associated function (not a method) provides a hint that offers - /// a minimum sizing cue for the commitment key used by this SNARK - /// implementation. The commitment key passed in setup should then - /// be at least as large as this hint. - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - default_ck_hint() - } - - /// Initialize a ProvingKey directly from a CommitmentKey and a - /// already known vk_digest. - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result; - - /// Produces the keys for the prover and the verifier - /// - /// **Note:** This method should be cheap and should not copy most of the - /// commitment key. Look at `CommitmentEngineTrait::setup` for generating - /// SRS data. - fn setup( - ck: Arc>, // NOTES: Why `Arc` this? - S: Vec<&R1CSShape>, /* NOTES: Why not a &[R1CSShape] here?, would get the same - * thing across as an iter i think */ - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; - - /// Produces a new SNARK for a batch of relaxed R1CS - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: Vec<&R1CSShape>, - U: &[RelaxedR1CSInstance], - W: &[RelaxedR1CSWitness], - ) -> Result; - - /// Verifies a SNARK for a batch of relaxed R1CS - fn verify(&self, vk: &Self::VerifierKey, U: &[RelaxedR1CSInstance]) - -> Result<(), NovaError>; -} - -/// A helper trait that defines the behavior of a verifier key of `zkSNARK` -pub trait DigestHelperTrait { - /// Returns the digest of the verifier's key - fn digest(&self) -> E::Scalar; -} diff --git a/taplo.toml b/taplo.toml new file mode 100644 index 0000000..f58bb47 --- /dev/null +++ b/taplo.toml @@ -0,0 +1,20 @@ +# .toml file formatting settings for `taplo` +# https://taplo.tamasfe.dev/configuration/formatter-options.html + +[formatting] +# align entries vertically +align_entries=true +# allow up to 1 consecutive empty line (default: 2) +allowed_blank_lines=1 +# collapse arrays into one line if they fit +array_auto_collapse=true +# default: 80 +column_width=100 +# remove whitespace around '=' +compact_entries=true +# alphabetically sort entries not separated by line breaks +reorder_keys=false +# align entries vertically (default: true) +# align_comments =false +# expand arrays into multiple lines (default: true) +# array_auto_expand =false From 032827bfbad720e2f1a68884724f6f8f9f413742 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 06:44:20 -0700 Subject: [PATCH 02/51] fix: some old tests --- prover/src/digest.rs | 151 +++++++++++++++++++------------------------ prover/src/lib.rs | 44 ++++++------- 2 files changed, 89 insertions(+), 106 deletions(-) diff --git a/prover/src/digest.rs b/prover/src/digest.rs index 6be08e3..4063ede 100644 --- a/prover/src/digest.rs +++ b/prover/src/digest.rs @@ -67,87 +67,70 @@ impl<'a, F: PrimeField, T: Digestible> DigestComputer<'a, F, T> { } } -// #[cfg(test)] -// mod tests { -// use ff::Field; -// use once_cell::sync::OnceCell; -// use serde::{Deserialize, Serialize}; - -// use super::{DigestComputer, SimpleDigestible}; -// use crate::traits::Engine; - -// type E = PallasEngine; - -// #[derive(Serialize, Deserialize)] -// struct S { -// i: usize, -// #[serde(skip, default = "OnceCell::new")] -// digest: OnceCell, -// } - -// impl SimpleDigestible for S {} - -// impl S { -// fn new(i: usize) -> Self { -// Self { -// i, -// digest: OnceCell::new(), -// } -// } - -// fn digest(&self) -> E::Scalar { -// self.digest -// .get_or_try_init(|| DigestComputer::new(self).digest()) -// .cloned() -// .unwrap() -// } -// } - -// #[test] -// fn test_digest_field_not_ingested_in_computation() { -// let s1 = S::::new(42); - -// // let's set up a struct with a weird digest field to make sure the -// digest // computation does not depend of it -// let oc = OnceCell::new(); -// oc.set(::Scalar::ONE).unwrap(); - -// let s2: S = S { i: 42, digest: oc }; - -// assert_eq!( -// DigestComputer::<::Scalar, _>::new(&s1) -// .digest() -// .unwrap(), -// DigestComputer::<::Scalar, _>::new(&s2) -// .digest() -// .unwrap() -// ); - -// // note: because of the semantics of `OnceCell::get_or_try_init`, the -// above // equality will not result in `s1.digest() == s2.digest` -// assert_ne!( -// s2.digest(), -// DigestComputer::<::Scalar, _>::new(&s2) -// .digest() -// .unwrap() -// ); -// } - -// #[test] -// fn test_digest_impervious_to_serialization() { -// let good_s = S::::new(42); - -// // let's set up a struct with a weird digest field to confuse -// deserializers let oc = OnceCell::new(); -// oc.set(::Scalar::ONE).unwrap(); - -// let bad_s: S = S { i: 42, digest: oc }; -// // this justifies the adjective "bad" -// assert_ne!(good_s.digest(), bad_s.digest()); - -// let naughty_bytes = bincode::serialize(&bad_s).unwrap(); - -// let retrieved_s: S = -// bincode::deserialize(&naughty_bytes).unwrap(); assert_eq!(good_s. -// digest(), retrieved_s.digest()) } -// } +#[cfg(test)] +mod tests { + use ff::Field; + use once_cell::sync::OnceCell; + use serde::{Deserialize, Serialize}; + + use super::{DigestComputer, SimpleDigestible}; + use crate::{provider::GrumpkinEngine, traits::Engine}; + + type E = GrumpkinEngine; + + #[derive(Serialize, Deserialize)] + struct S { + i: usize, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, + } + + impl SimpleDigestible for S {} + + impl S { + fn new(i: usize) -> Self { Self { i, digest: OnceCell::new() } } + + fn digest(&self) -> E::Scalar { + self.digest.get_or_try_init(|| DigestComputer::new(self).digest()).cloned().unwrap() + } + } + + #[test] + fn test_digest_field_not_ingested_in_computation() { + let s1 = S::::new(42); + + // let's set up a struct with a weird digest field to make sure the digest + // computation does not depend of it + let oc = OnceCell::new(); + oc.set(::Scalar::ONE).unwrap(); + + let s2: S = S { i: 42, digest: oc }; + + assert_eq!( + DigestComputer::<::Scalar, _>::new(&s1).digest().unwrap(), + DigestComputer::<::Scalar, _>::new(&s2).digest().unwrap() + ); + + // note: because of the semantics of `OnceCell::get_or_try_init`, the above + // equality will not result in `s1.digest() == s2.digest` + assert_ne!(s2.digest(), DigestComputer::<::Scalar, _>::new(&s2).digest().unwrap()); + } + + #[test] + fn test_digest_impervious_to_serialization() { + let good_s = S::::new(42); + + // let's set up a struct with a weird digest field to confuse deserializers + let oc = OnceCell::new(); + oc.set(::Scalar::ONE).unwrap(); + + let bad_s: S = S { i: 42, digest: oc }; + // this justifies the adjective "bad" + assert_ne!(good_s.digest(), bad_s.digest()); + + let naughty_bytes = bincode::serialize(&bad_s).unwrap(); + + let retrieved_s: S = bincode::deserialize(&naughty_bytes).unwrap(); + assert_eq!(good_s.digest(), retrieved_s.digest()) + } +} diff --git a/prover/src/lib.rs b/prover/src/lib.rs index d0ed78b..2b6c593 100644 --- a/prover/src/lib.rs +++ b/prover/src/lib.rs @@ -125,28 +125,28 @@ where E1: CurveCycleEquipped /// circuit. /// /// # Example - /// - /// ```rust - /// # use client_side_prover::spartan::ppsnark::RelaxedR1CSSNARK; - /// # use client_side_prover::provider::ipa_pc::EvaluationEngine; - /// # use client_side_prover::provider::{PallasEngine, VestaEngine}; - /// # use client_side_prover::traits::{circuit::TrivialCircuit, Engine, snark::RelaxedR1CSSNARKTrait}; - /// use client_side_prover::PublicParams; - /// - /// type E1 = PallasEngine; - /// type E2 = VestaEngine; - /// type EE = EvaluationEngine; - /// type SPrime = RelaxedR1CSSNARK>; - /// - /// let circuit1 = TrivialCircuit::<::Scalar>::default(); - /// let circuit2 = TrivialCircuit::<::Scalar>::default(); - /// // Only relevant for a SNARK using computation commitmnets, pass &(|_| 0) - /// // or &*nova_snark::traits::snark::default_ck_hint() otherwise. - /// let ck_hint1 = &*SPrime::::ck_floor(); - /// let ck_hint2 = &*SPrime::::ck_floor(); - /// - /// let pp = PublicParams::setup(&circuit1, &circuit2, ck_hint1, ck_hint2).unwrap(); - /// ``` + // TODO: THIS TEST DOES NOT WORK RIGHT NOW + // / ```rust + // / # use client_side_prover::spartan::ppsnark::RelaxedR1CSSNARK; + // / # use client_side_prover::provider::ipa_pc::EvaluationEngine; + // / # use client_side_prover::provider::{PallasEngine, VestaEngine}; + // / # use client_side_prover::traits::{circuit::TrivialCircuit, Engine, + // snark::RelaxedR1CSSNARKTrait}; / use client_side_prover::PublicParams; + // / + // / type E1 = PallasEngine; + // / type E2 = VestaEngine; + // / type EE = EvaluationEngine; + // / type SPrime = RelaxedR1CSSNARK>; + // / + // / let circuit1 = TrivialCircuit::<::Scalar>::default(); + // / let circuit2 = TrivialCircuit::<::Scalar>::default(); + // / // Only relevant for a SNARK using computation commitmnets, pass &(|_| 0) + // / // or &*nova_snark::traits::snark::default_ck_hint() otherwise. + // / let ck_hint1 = &*SPrime::::ck_floor(); + // / let ck_hint2 = &*SPrime::::ck_floor(); + // / + // / let pp = PublicParams::setup(&circuit1, &circuit2, ck_hint1, ck_hint2).unwrap(); + // / ``` pub fn setup, C2: StepCircuit< as Engine>::Scalar>>( c_primary: &C1, c_secondary: &C2, From 6a32cb92f8d6a3aaf908edc63ed7ed4acb61c3ac Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 10:16:15 -0700 Subject: [PATCH 03/51] cleanup --- Cargo.lock | 58 ++++++++++++++++++++++----------------------- README.md | 21 ++++++++++++---- frontend/Cargo.toml | 2 +- 3 files changed, 46 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d454ac..9745808 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -930,6 +930,35 @@ dependencies = [ "tracing-texray", ] +[[package]] +name = "client-side-prover-frontend" +version = "0.1.0" +dependencies = [ + "acvm", + "ark-bn254 0.5.0", + "bellpepper-core", + "bincode", + "byteorder", + "circom_witnesscalc", + "client-side-prover", + "ff 0.13.0", + "halo2curves", + "hex", + "itertools 0.13.0", + "js-sys", + "num-bigint 0.4.6", + "serde", + "serde-wasm-bindgen", + "serde_json", + "tempdir", + "thiserror", + "tokio", + "tracing", + "tracing-test", + "wasm-bindgen", + "wasm-bindgen-futures", +] + [[package]] name = "code_producers" version = "2.1.9" @@ -1432,35 +1461,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "frontend" -version = "0.1.0" -dependencies = [ - "acvm", - "ark-bn254 0.5.0", - "bellpepper-core", - "bincode", - "byteorder", - "circom_witnesscalc", - "client-side-prover", - "ff 0.13.0", - "halo2curves", - "hex", - "itertools 0.13.0", - "js-sys", - "num-bigint 0.4.6", - "serde", - "serde-wasm-bindgen", - "serde_json", - "tempdir", - "thiserror", - "tokio", - "tracing", - "tracing-test", - "wasm-bindgen", - "wasm-bindgen-futures", -] - [[package]] name = "fuchsia-cprng" version = "0.1.1" diff --git a/README.md b/README.md index 6042152..fbfbe92 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,20 @@ -# Nova: High-speed recursive arguments from folding schemes +# Client Side Prover > [!NOTE] -> This repository is a fork of the original hosted at [https://github.com/microsoft/nova](https://github.com/microsoft/nova) and also forked from [https://github.com/argumentcomputer/arecibo](https://github.com/argumentcomputer/arecibo) currently, but will likely be so different in the future that those are just motivation. +> This repository is a fork of the original hosted at [https://github.com/microsoft/nova](https://github.com/microsoft/nova) and also forked from [https://github.com/argumentcomputer/arecibo](https://github.com/argumentcomputer/arecibo). -## Notes +## Project Structure +The repository contains several key components: +- `client-side-prover-frontend`: Frontend adapters for both Noir and Circom +- `client-side-prover`: Backend implementation of the client side prover -### deny -I removed deny.toml, but having this could be good to use actually, \ No newline at end of file +## Features +- Supernova NIVC folding scheme implementation +- Support for both Noir and Circom circuit frameworks +- Client-side proving capabilities through WebAssembly +- Recursive proof generation and verification + +## Usage +This repository and its crates are **not** production ready. Do not use them in production. No audits have been done and none are planned. + +With that said, work has been done to make the implementation here work with an offline setup phase. Therefore, this can be used run proofs on an edge device which can later be verified by a remote server. \ No newline at end of file diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index 46fcb44..bc6a0bc 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -1,5 +1,5 @@ [package] -name ="frontend" +name ="client-side-prover-frontend" version="0.1.0" edition="2021" From 974e04d9a6525dc9216bca55096ec275677adb1c Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 13:17:37 -0700 Subject: [PATCH 04/51] lint + workflows --- .github/workflows/check.yaml | 42 ++++++++++++++++++++++++++ Cargo.toml | 5 +++ frontend/Cargo.toml | 1 - frontend/src/lib.rs | 3 +- frontend/src/noir/tests.rs | 1 - frontend/src/program/mod.rs | 10 +----- frontend/src/setup.rs | 6 ++-- frontend/src/tests/mod.rs | 2 +- prover/Cargo.toml | 5 +-- prover/src/bellpepper/test_shape_cs.rs | 5 +-- prover/src/circuit.rs | 2 +- prover/src/cyclefold/circuit.rs | 1 + prover/src/cyclefold/gadgets.rs | 1 + prover/src/cyclefold/nifs.rs | 2 ++ prover/src/cyclefold/nova_circuit.rs | 3 ++ prover/src/fast_serde.rs | 8 ++--- prover/src/gadgets/ecc.rs | 2 +- prover/src/gadgets/nonnative/bignat.rs | 2 +- prover/src/lib.rs | 5 ++- prover/src/provider/keccak.rs | 4 +-- prover/src/provider/kzg_commitment.rs | 2 +- prover/src/provider/pedersen.rs | 4 +-- prover/src/provider/util/fb_msm.rs | 4 +-- prover/src/r1cs/sparse.rs | 2 +- prover/src/spartan/batched.rs | 29 +++++++++--------- prover/src/spartan/batched_ppsnark.rs | 4 +-- prover/src/spartan/macros.rs | 1 - prover/src/spartan/mod.rs | 1 + prover/src/spartan/polys/univariate.rs | 2 +- prover/src/spartan/snark.rs | 4 +-- prover/src/spartan/sumcheck/mod.rs | 2 ++ prover/src/supernova/Readme.md | 5 +-- prover/src/supernova/circuit.rs | 6 ++++ prover/src/supernova/mod.rs | 3 +- prover/src/supernova/snark.rs | 4 ++- prover/src/traits/snark.rs | 3 ++ 36 files changed, 123 insertions(+), 63 deletions(-) create mode 100644 .github/workflows/check.yaml diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml new file mode 100644 index 0000000..55bb363 --- /dev/null +++ b/.github/workflows/check.yaml @@ -0,0 +1,42 @@ +name: rust + +on: + pull_request: + types: [opened, synchronize, reopened] + push: + branches: [main] + +jobs: + test: + name: test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + components: [rustfmt, clippy] + + - name: Install cargo-binstall + uses: taiki-e/install-action@cargo-binstall + + - name: Install tools + run: | + cargo binstall --no-confirm cargo-udeps taplo-cli + + - name: Run tests + run: cargo test --all + + - name: Check formatting + run: cargo fmt --all -- --check + + - name: Run clippy + run: cargo clippy --all + + - name: Check unused dependencies + run: cargo udeps + + - name: Check TOML formatting + run: taplo fmt --check \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index cff1fa3..9e90af8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,3 +59,8 @@ tracing-texray ="0.2.0" tracing-subscriber={ version="0.3.17", features=["env-filter"] } handlebars ="5.1.0" serde_json ="1.0.1" + +[profile.release] +lto =true +codegen-units=1 +panic ="abort" \ No newline at end of file diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index bc6a0bc..3286edb 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -41,4 +41,3 @@ tokio = { version = "1.43", features = ["full"] } [features] verify-steps=[] -timing =[] diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index 09f93ad..b40d742 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -1,4 +1,5 @@ -#![warn(missing_docs, clippy::missing_docs_in_private_items)] +// TODO: Add back missing docs +// #![warn(missing_docs, clippy::missing_docs_in_private_items)] //! # Proofs Crate //! diff --git a/frontend/src/noir/tests.rs b/frontend/src/noir/tests.rs index b6d8600..cae9cef 100644 --- a/frontend/src/noir/tests.rs +++ b/frontend/src/noir/tests.rs @@ -97,7 +97,6 @@ pub fn run(memory: &NoirMemory) -> Result, ProofError> { let mut recursive_snark_option = None; - #[cfg(feature = "timing")] let time = std::time::Instant::now(); for (idx, &op_code) in memory.rom.iter().enumerate() { info!("Step {} of ROM", idx); diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index a818922..07eba9d 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -21,7 +21,7 @@ use client_side_prover::{ }; use data::{Expanded, InitializedSetup}; use proof::FoldingProof; -#[cfg(feature = "timing")] use tracing::trace; +use tracing::trace; use utils::into_input_json; use super::*; @@ -135,7 +135,6 @@ impl StepCircuit> for RomCircuit { /// Setup function pub fn setup(setup_data: &UninitializedSetup) -> PublicParams { // Optionally time the setup stage for the program - #[cfg(feature = "timing")] let time = std::time::Instant::now(); // TODO: I don't think we want to have to call `initialize_circuit_list` more @@ -146,7 +145,6 @@ pub fn setup(setup_data: &UninitializedSetup) -> PublicParams { let memory = Memory { circuits, rom: vec![0; setup_data.max_rom_length] }; // Note, `rom` here is not used in setup, only `circuits` let public_params = PublicParams::setup(&memory, &*default_ck_hint(), &*default_ck_hint()); - #[cfg(feature = "timing")] trace!("`PublicParams::setup()` elapsed: {:?}", time.elapsed()); public_params @@ -197,7 +195,6 @@ pub async fn run( let mut memory = Memory { rom: resized_rom.clone(), circuits }; - #[cfg(feature = "timing")] let time = std::time::Instant::now(); for (idx, &op_code) in resized_rom.iter().enumerate().take_while(|(_, &op_code)| op_code != u64::MAX) @@ -280,7 +277,6 @@ pub async fn run( } // Note, this unwrap cannot fail let recursive_snark = recursive_snark_option.unwrap(); - #[cfg(feature = "timing")] trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); Ok(recursive_snark?) @@ -364,14 +360,11 @@ pub fn compress_proof( public_params: &PublicParams, ) -> Result { debug!("Setting up `CompressedSNARK`"); - #[cfg(feature = "timing")] let time = std::time::Instant::now(); let (pk, _vk) = CompressedSNARK::::setup(public_params)?; debug!("Done setting up `CompressedSNARK`"); - #[cfg(feature = "timing")] trace!("`CompressedSNARK::setup` elapsed: {:?}", time.elapsed()); - #[cfg(feature = "timing")] let time = std::time::Instant::now(); let proof = FoldingProof { @@ -380,7 +373,6 @@ pub fn compress_proof( }; debug!("`CompressedSNARK::prove completed!"); - #[cfg(feature = "timing")] trace!("`CompressedSNARK::prove` elapsed: {:?}", time.elapsed()); Ok(proof) diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 020bf35..5d5759a 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -47,7 +47,7 @@ pub struct ProvingParams { impl FastSerde for ProvingParams { /// Initialize ProvingParams from an efficiently serializable data format. - fn from_bytes(bytes: &Vec) -> Result { + fn from_bytes(bytes: &[u8]) -> Result { let mut cursor = Cursor::new(bytes); Self::validate_header(&mut cursor, SerdeByteTypes::ProverParams, 3)?; @@ -77,8 +77,8 @@ impl FastSerde for ProvingParams { out.push(3); // num_sections Self::write_section_bytes(&mut out, 1, &self.aux_params.to_bytes()); - Self::write_section_bytes(&mut out, 2, &self.vk_digest_primary.to_bytes().to_vec()); - Self::write_section_bytes(&mut out, 3, &self.vk_digest_secondary.to_bytes().to_vec()); + Self::write_section_bytes(&mut out, 2, &self.vk_digest_primary.to_bytes()); + Self::write_section_bytes(&mut out, 3, &self.vk_digest_secondary.to_bytes()); out } diff --git a/frontend/src/tests/mod.rs b/frontend/src/tests/mod.rs index 57f8c09..8a798a6 100644 --- a/frontend/src/tests/mod.rs +++ b/frontend/src/tests/mod.rs @@ -5,7 +5,7 @@ use client_side_prover::supernova::RecursiveSNARK; use super::*; use crate::program::{ - data::{CircuitData, NotExpanded, ProofParams, SetupParams, UninitializedSetup}, + data::{CircuitData, NotExpanded, ProofParams, SetupParams}, initialize_setup_data, }; diff --git a/prover/Cargo.toml b/prover/Cargo.toml index b4bf4a7..485d137 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -78,7 +78,4 @@ default=["grumpkin-msm/portable"] # portable = ["grumpkin-msm/portable"] # cuda = ["grumpkin-msm/cuda"] -[profile.release] -lto =true -codegen-units=1 -panic ="abort" + diff --git a/prover/src/bellpepper/test_shape_cs.rs b/prover/src/bellpepper/test_shape_cs.rs index 01e5098..b420e8c 100644 --- a/prover/src/bellpepper/test_shape_cs.rs +++ b/prover/src/bellpepper/test_shape_cs.rs @@ -53,6 +53,7 @@ pub struct TestShapeCS { named_objects: HashMap, current_namespace: Vec, /// All constraints added to the `TestShapeCS`. + #[allow(clippy::type_complexity)] pub constraints: Vec<( LinearCombination, LinearCombination, @@ -73,9 +74,9 @@ fn proc_lc( // Remove terms that have a zero coefficient to normalize let mut to_remove = vec![]; - for (var, coeff) in map.iter() { + for (var, coeff) in &map { if coeff.is_zero().into() { - to_remove.push(*var) + to_remove.push(*var); } } diff --git a/prover/src/circuit.rs b/prover/src/circuit.rs index 8976cc6..298aab0 100644 --- a/prover/src/circuit.rs +++ b/prover/src/circuit.rs @@ -232,7 +232,7 @@ impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { } } -impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { +impl> NovaAugmentedCircuit<'_, E, SC> { /// synthesize circuit giving constraint system pub fn synthesize::Base>>( self, diff --git a/prover/src/cyclefold/circuit.rs b/prover/src/cyclefold/circuit.rs index 9d4e7d3..b3f126c 100644 --- a/prover/src/cyclefold/circuit.rs +++ b/prover/src/cyclefold/circuit.rs @@ -41,6 +41,7 @@ impl CycleFoldCircuit { Self { commit_1, commit_2, scalar, poseidon_constants } } + #[allow(clippy::type_complexity)] fn alloc_witness::Base>>( &self, mut cs: CS, diff --git a/prover/src/cyclefold/gadgets.rs b/prover/src/cyclefold/gadgets.rs index 58dafbc..9e4a494 100644 --- a/prover/src/cyclefold/gadgets.rs +++ b/prover/src/cyclefold/gadgets.rs @@ -444,6 +444,7 @@ pub mod emulated { /// provided to the primary circuit as non-deterministic advice, /// this folding simply sets those values as the new witness and /// error vector commitments. + #[allow(clippy::too_many_arguments, clippy::type_complexity)] pub fn fold_with_r1cs::Base>>( &self, mut cs: CS, diff --git a/prover/src/cyclefold/nifs.rs b/prover/src/cyclefold/nifs.rs index 95e008c..fc199bc 100644 --- a/prover/src/cyclefold/nifs.rs +++ b/prover/src/cyclefold/nifs.rs @@ -34,6 +34,7 @@ where /// instance-witness pair (U2, W2) and folds them into a new relaxed /// R1CS instance-witness pair (U, W) and a commitment to the cross term /// T. It also provides the challenge r used to fold the instances. + #[allow(clippy::too_many_arguments, clippy::type_complexity)] pub fn prove( ck: &CommitmentKey, ro_consts: &ROConstants, @@ -87,6 +88,7 @@ impl CycleFoldNIFS { /// Folds an R1CS instance/witness pair (U2, W2) into a relaxed R1CS /// instance/witness (U1, W1) returning the new folded accumulator and a /// commitment to the cross-term. + #[allow(clippy::too_many_arguments, clippy::type_complexity)] pub fn prove( ck: &CommitmentKey, ro_consts: &ROConstants, diff --git a/prover/src/cyclefold/nova_circuit.rs b/prover/src/cyclefold/nova_circuit.rs index 5ca44ce..cf5e6c5 100644 --- a/prover/src/cyclefold/nova_circuit.rs +++ b/prover/src/cyclefold/nova_circuit.rs @@ -57,6 +57,7 @@ where E1: Engine::Scalar>, E2: Engine::Scalar>, { + #[allow(clippy::too_many_arguments, clippy::type_complexity)] pub fn new( pp_digest: E1::Scalar, i: E1::Base, @@ -97,6 +98,7 @@ where Self { params, ro_consts, inputs, step_circuit } } + #[allow(clippy::type_complexity)] fn alloc_witness::Base>>( &self, mut cs: CS, @@ -212,6 +214,7 @@ where Ok((U_c_default, U_p_default)) } + #[allow(clippy::too_many_arguments, clippy::type_complexity)] pub fn synthesize_non_base_case::Base>>( &self, mut cs: CS, diff --git a/prover/src/fast_serde.rs b/prover/src/fast_serde.rs index 4c6facc..5279c20 100644 --- a/prover/src/fast_serde.rs +++ b/prover/src/fast_serde.rs @@ -45,10 +45,10 @@ pub enum SerdeByteError { /// A trait for fast conversions to bytes pub trait FastSerde: Sized { fn to_bytes(&self) -> Vec; - fn from_bytes(bytes: &Vec) -> Result; + fn from_bytes(bytes: &[u8]) -> Result; fn validate_header( - cursor: &mut Cursor<&Vec>, + cursor: &mut Cursor<&[u8]>, expected_type: SerdeByteTypes, expected_sections: u8, ) -> Result<(), SerdeByteError> { @@ -74,7 +74,7 @@ pub trait FastSerde: Sized { } fn read_section_bytes( - cursor: &mut Cursor<&Vec>, + cursor: &mut Cursor<&[u8]>, expected_type: u8, ) -> Result, SerdeByteError> { let mut section_type = [0u8; 1]; @@ -92,7 +92,7 @@ pub trait FastSerde: Sized { Ok(section_data) } - fn write_section_bytes(out: &mut Vec, section_type: u8, data: &Vec) { + fn write_section_bytes(out: &mut Vec, section_type: u8, data: &[u8]) { out.push(section_type); out.extend_from_slice(&(data.len() as u32).to_le_bytes()); out.extend_from_slice(data); diff --git a/prover/src/gadgets/ecc.rs b/prover/src/gadgets/ecc.rs index ee6d182..e617658 100644 --- a/prover/src/gadgets/ecc.rs +++ b/prover/src/gadgets/ecc.rs @@ -37,7 +37,7 @@ impl AllocatedPoint { let y = AllocatedNum::alloc(cs.namespace(|| "y"), || Ok(coords.map_or(G::Base::ZERO, |c| c.1)))?; let is_infinity = AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { - Ok(if coords.map_or(true, |c| c.2) { G::Base::ONE } else { G::Base::ZERO }) + Ok(if coords.is_none_or(|c| c.2) { G::Base::ONE } else { G::Base::ZERO }) })?; cs.enforce( || "is_infinity is bit", diff --git a/prover/src/gadgets/nonnative/bignat.rs b/prover/src/gadgets/nonnative/bignat.rs index 2561777..19f03d3 100644 --- a/prover/src/gadgets/nonnative/bignat.rs +++ b/prover/src/gadgets/nonnative/bignat.rs @@ -220,7 +220,7 @@ impl BigNat { ) -> Result { let bignat = Self::alloc_from_nat( cs.namespace(|| "bignat"), - || Ok({ n.value.as_ref().map(|n| f_to_nat(n)).ok_or(SynthesisError::AssignmentMissing)? }), + || n.value.as_ref().map(|n| f_to_nat(n)).ok_or(SynthesisError::AssignmentMissing), limb_width, n_limbs, )?; diff --git a/prover/src/lib.rs b/prover/src/lib.rs index 2b6c593..24ad85f 100644 --- a/prover/src/lib.rs +++ b/prover/src/lib.rs @@ -1,4 +1,4 @@ -#![allow(non_snake_case)] +#![allow(non_snake_case, clippy::type_complexity, clippy::too_many_arguments)] // private modules pub mod bellpepper; @@ -537,6 +537,7 @@ where E1: CurveCycleEquipped } /// Verify the correctness of the `RecursiveSNARK` + #[allow(clippy::type_complexity)] pub fn verify( &self, pp: &PublicParams, @@ -707,6 +708,7 @@ where S2: RelaxedR1CSSNARKTrait>, { /// Creates prover and verifier keys for `CompressedSNARK` + #[allow(clippy::type_complexity)] pub fn setup( pp: &PublicParams, ) -> Result<(ProverKey, VerifierKey), NovaError> { @@ -784,6 +786,7 @@ where }) } + #[allow(clippy::type_complexity)] /// Verify the correctness of the `CompressedSNARK` pub fn verify( &self, diff --git a/prover/src/provider/keccak.rs b/prover/src/provider/keccak.rs index 76ea95c..6a27b01 100644 --- a/prover/src/provider/keccak.rs +++ b/prover/src/provider/keccak.rs @@ -65,7 +65,7 @@ impl TranscriptEngineTrait for Keccak256Transcript { let output = compute_updated_state(self.transcript.clone(), &input); // update state - self.round = { self.round.checked_add(1).ok_or(NovaError::InternalTranscriptError)? }; + self.round = self.round.checked_add(1).ok_or(NovaError::InternalTranscriptError)?; self.state.copy_from_slice(&output); self.transcript = Keccak256::new(); @@ -75,7 +75,7 @@ impl TranscriptEngineTrait for Keccak256Transcript { fn absorb>(&mut self, label: &'static [u8], o: &T) { self.transcript.update(label); - self.transcript.update(&o.to_transcript_bytes()); + self.transcript.update(o.to_transcript_bytes()); } fn dom_sep(&mut self, bytes: &'static [u8]) { diff --git a/prover/src/provider/kzg_commitment.rs b/prover/src/provider/kzg_commitment.rs index 30940e0..a2232cc 100644 --- a/prover/src/provider/kzg_commitment.rs +++ b/prover/src/provider/kzg_commitment.rs @@ -163,7 +163,7 @@ where out } - fn from_bytes(bytes: &Vec) -> Result { + fn from_bytes(bytes: &[u8]) -> Result { let mut cursor = Cursor::new(bytes); Self::validate_header(&mut cursor, SerdeByteTypes::UniversalKZGParam, 2)?; diff --git a/prover/src/provider/pedersen.rs b/prover/src/provider/pedersen.rs index 47e0b09..4fb139f 100644 --- a/prover/src/provider/pedersen.rs +++ b/prover/src/provider/pedersen.rs @@ -73,7 +73,7 @@ where out } - fn from_bytes(bytes: &Vec) -> Result { + fn from_bytes(bytes: &[u8]) -> Result { let mut cursor = Cursor::new(bytes); // Validate header @@ -179,7 +179,7 @@ where fn mul_assign(&mut self, scalar: E::Scalar) { *self = Self { comm: self.comm * scalar }; } } -impl<'a, 'b, E> Mul<&'b E::Scalar> for &'a Commitment +impl<'b, E> Mul<&'b E::Scalar> for &Commitment where E: Engine, E::GE: DlogGroup, diff --git a/prover/src/provider/util/fb_msm.rs b/prover/src/provider/util/fb_msm.rs index 9513c94..cf5b9be 100644 --- a/prover/src/provider/util/fb_msm.rs +++ b/prover/src/provider/util/fb_msm.rs @@ -41,7 +41,7 @@ where { let in_window = 1 << window; // Number of outer iterations needed to cover the entire scalar - let outerc = (scalar_size + window - 1) / window; + let outerc = scalar_size.div_ceil(window); // Number of multiples of the window's "outer point" needed for each window // (fewer for the last window) @@ -119,7 +119,7 @@ where T: PrimeCurve, T::Scalar: PrimeFieldBits, { - let outerc = (scalar_size + window - 1) / window; + let outerc = scalar_size.div_ceil(window); assert!(outerc <= table.len()); v.par_iter().map(|e| windowed_mul::(outerc, window, table, e)).collect::>() diff --git a/prover/src/r1cs/sparse.rs b/prover/src/r1cs/sparse.rs index c0c3e2d..e52e537 100644 --- a/prover/src/r1cs/sparse.rs +++ b/prover/src/r1cs/sparse.rs @@ -229,7 +229,7 @@ pub struct Iter<'a, F: PrimeField> { nnz: usize, } -impl<'a, F: PrimeField> Iterator for Iter<'a, F> { +impl Iterator for Iter<'_, F> { type Item = (usize, usize, F); fn next(&mut self) -> Option { diff --git a/prover/src/spartan/batched.rs b/prover/src/spartan/batched.rs index ab63abd..90d25ac 100644 --- a/prover/src/spartan/batched.rs +++ b/prover/src/spartan/batched.rs @@ -309,21 +309,20 @@ impl> BatchedRelaxedR1CSSNARKTrait .collect::>(); // Create evaluation instances for W(r_y[1..]) and E(r_x) - let (w_vec, u_vec) = - { - let mut w_vec = Vec::with_capacity(2 * num_instances); - let mut u_vec = Vec::with_capacity(2 * num_instances); - w_vec.extend(polys_W.into_iter().map(|poly| PolyEvalWitness { p: poly })); - u_vec.extend(zip_with!(iter, (evals_W, U, r_y), |eval, u, r_y| { - PolyEvalInstance { c: u.comm_W, x: r_y[1..].to_vec(), e: *eval } - })); - - w_vec.extend(polys_E.into_iter().map(|poly| PolyEvalWitness { p: poly })); - u_vec.extend(zip_with!((evals_E.iter(), U.iter(), r_x), |eval_E, u, r_x| { - PolyEvalInstance { c: u.comm_E, x: r_x, e: *eval_E } - })); - (w_vec, u_vec) - }; + let (w_vec, u_vec) = { + let mut w_vec = Vec::with_capacity(2 * num_instances); + let mut u_vec = Vec::with_capacity(2 * num_instances); + w_vec.extend(polys_W.into_iter().map(|poly| PolyEvalWitness { p: poly })); + u_vec.extend(zip_with!(iter, (evals_W, U, r_y), |eval, u, r_y| { + PolyEvalInstance { c: u.comm_W, x: r_y[1..].to_vec(), e: *eval } + })); + + w_vec.extend(polys_E.into_iter().map(|poly| PolyEvalWitness { p: poly })); + u_vec.extend(zip_with!((evals_E.iter(), U.iter(), r_x), |eval_E, u, r_x| { + PolyEvalInstance { c: u.comm_E, x: r_x, e: *eval_E } + })); + (w_vec, u_vec) + }; let (batched_u, batched_w, sc_proof_batch, claims_batch_left) = batch_eval_reduce(u_vec, &w_vec, &mut transcript)?; diff --git a/prover/src/spartan/batched_ppsnark.rs b/prover/src/spartan/batched_ppsnark.rs index 1fe8d3b..7e963a4 100644 --- a/prover/src/spartan/batched_ppsnark.rs +++ b/prover/src/spartan/batched_ppsnark.rs @@ -138,8 +138,8 @@ impl> BatchedRelaxedR1CSSNARKTrait } fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, + _ck: Arc>, + _vk_digest: E::Scalar, ) -> Result { todo!("unimplemented for batched_ppsnark"); } diff --git a/prover/src/spartan/macros.rs b/prover/src/spartan/macros.rs index 0f2d943..c0e3cf8 100644 --- a/prover/src/spartan/macros.rs +++ b/prover/src/spartan/macros.rs @@ -14,7 +14,6 @@ /// /// println!("{:?}", res); // Output: [6, 9, 12] /// ``` - #[macro_export] macro_rules! zip_with { // no iterator projection specified: the macro assumes the arguments *are* iterators diff --git a/prover/src/spartan/mod.rs b/prover/src/spartan/mod.rs index 84aeeca..6514e4d 100644 --- a/prover/src/spartan/mod.rs +++ b/prover/src/spartan/mod.rs @@ -149,6 +149,7 @@ impl PolyEvalInstance { /// Binds "row" variables of (A, B, C) matrices viewed as 2d multilinear /// polynomials +#[allow(clippy::type_complexity)] fn compute_eval_table_sparse( S: &R1CSShape, rx: &[E::Scalar], diff --git a/prover/src/spartan/polys/univariate.rs b/prover/src/spartan/polys/univariate.rs index 7e4d687..a4b1666 100644 --- a/prover/src/spartan/polys/univariate.rs +++ b/prover/src/spartan/polys/univariate.rs @@ -108,7 +108,7 @@ impl UniPoly { } fn truncate_leading_zeros(&mut self) { - while self.coeffs.last().map_or(false, |c| c == &Scalar::ZERO) { + while self.coeffs.last() == Some(&Scalar::ZERO) { self.coeffs.pop(); } } diff --git a/prover/src/spartan/snark.rs b/prover/src/spartan/snark.rs index 79acbb2..e903cbd 100644 --- a/prover/src/spartan/snark.rs +++ b/prover/src/spartan/snark.rs @@ -95,8 +95,8 @@ impl> RelaxedR1CSSNARKTrait for Relax type VerifierKey = VerifierKey; fn initialize_pk( - ck: Arc>, - vk_digest: ::Scalar, + _ck: Arc>, + _vk_digest: ::Scalar, ) -> Result { todo!("not implemented for nova snarks"); } diff --git a/prover/src/spartan/sumcheck/mod.rs b/prover/src/spartan/sumcheck/mod.rs index 0cd7756..49d4bc0 100644 --- a/prover/src/spartan/sumcheck/mod.rs +++ b/prover/src/spartan/sumcheck/mod.rs @@ -364,6 +364,7 @@ impl SumcheckProof { ) } + #[allow(clippy::too_many_arguments, clippy::type_complexity)] pub fn prove_cubic_with_additive_term( claim: &E::Scalar, num_rounds: usize, @@ -414,6 +415,7 @@ impl SumcheckProof { Ok((Self { compressed_polys: polys }, r, vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]])) } + #[allow(clippy::too_many_arguments, clippy::type_complexity)] pub fn prove_cubic_with_additive_term_batch( claims: &[E::Scalar], num_rounds: &[usize], diff --git a/prover/src/supernova/Readme.md b/prover/src/supernova/Readme.md index 82c69f1..8d78ae5 100644 --- a/prover/src/supernova/Readme.md +++ b/prover/src/supernova/Readme.md @@ -8,9 +8,10 @@ We aim to provide a mathematical description of the protocol, as it is implement Before delving into the specifics of the implementation, it's crucial to define and clarify some key terms and concepts used throughout this document: - **Recursive SNARK**: A Recursive SNARK is a type of succinct non-interactive argument of knowledge for a circuit $F$ which can be composed with itself as $z\_{i+1} \gets F(z_i)$. -Each iteration proves the verification of a proof for $z_i$ and the correctness of $z\_{i+1}$, ensuring the proving of each step remains constant. + Each iteration proves the verification of a proof for $z_i$ and the correctness of $z\_{i+1}$, ensuring the proving of each step remains constant. - **Augmentation Circuit**: In the context of the SuperNova protocol, an augmentation circuit refers to a circuit $F'$ composing $F$ with a circuit which partially verifies the validity of the previous output $z_i$ before running $F(z_i)$. - **NIFS Folding Verifier**: A non-interactive folding scheme is a protocol for efficiently updating a proof $\pi_i$ about an iterated function $z\_{i+1} \gets F(z_i)$ into a new proof $\pi\_{i+1}$, through a process referred to as "folding". + By splitting the proof into an instance/witness pair $(u,w) = \pi$, the folding verifier describes an algorithm for verifying that the $u$ component was properly updated. ## SuperNova vs. Nova @@ -44,7 +45,7 @@ While the original Nova implementation allows computation to be done on both cur The prover needs to store data about the previous function iteration. It is defined by the `supernova::RecursiveSNARK` struct. It contains: - $i$: the number of iterations performed. -Note that the `new` constructor actually performs the first iteration, and the first call to `prove_step` simply sets the counter to 1. + Note that the `new` constructor actually performs the first iteration, and the first call to `prove_step` simply sets the counter to 1. - Primary curve: - $(\mathsf{pc}_i, z_0, z_i)$: current program counter and inputs for the primary circuit - $U[\ ],W[\ ]$: List of relaxed instance/witness pairs for all the circuits on the primary curve. diff --git a/prover/src/supernova/circuit.rs b/prover/src/supernova/circuit.rs index ff93acb..1846cff 100644 --- a/prover/src/supernova/circuit.rs +++ b/prover/src/supernova/circuit.rs @@ -66,6 +66,7 @@ pub trait StepCircuit: Send + Sync + Clone { /// Synthesize the circuit for a computation step and return variable /// that corresponds to the output of the step `pc_{i+1}` and `z_{i+1}` + #[allow(clippy::type_complexity)] fn synthesize>( &self, cs: &mut CS, @@ -85,6 +86,7 @@ pub trait EnforcingStepCircuit: Send + Sync + Clone + StepCircuit /// Delegate synthesis to `StepCircuit::synthesize`, and additionally, /// enforce the constraint that program counter `pc`, if supplied, is /// equal to the circuit's assigned index. + #[allow(clippy::type_complexity)] fn enforcing_synthesize>( &self, cs: &mut CS, @@ -173,6 +175,7 @@ pub struct SuperNovaAugmentedCircuitInputs<'a, E: Engine> { impl<'a, E: Engine> SuperNovaAugmentedCircuitInputs<'a, E> { /// Create new inputs/witness for the verification circuit + #[allow(clippy::too_many_arguments)] pub fn new( pp_digest: E::Scalar, i: E::Base, @@ -214,6 +217,7 @@ impl<'a, E: Engine, SC: EnforcingStepCircuit> SuperNovaAugmentedCircuit /// Allocate all witnesses from the augmented function's non-deterministic /// inputs. Optional entries are allocated as their default values. + #[allow(clippy::type_complexity)] fn alloc_witness::Base>>( &self, mut cs: CS, @@ -358,6 +362,7 @@ impl<'a, E: Engine, SC: EnforcingStepCircuit> SuperNovaAugmentedCircuit /// Synthesizes non base case and returns the new relaxed `R1CSInstance` /// And a boolean indicating if all checks pass + #[allow(clippy::too_many_arguments)] fn synthesize_non_base_case::Base>>( &self, mut cs: CS, @@ -443,6 +448,7 @@ impl<'a, E: Engine, SC: EnforcingStepCircuit> SuperNovaAugmentedCircuit Ok((U_next, check_pass)) } + #[allow(clippy::type_complexity)] pub fn synthesize::Base>>( self, cs: &mut CS, diff --git a/prover/src/supernova/mod.rs b/prover/src/supernova/mod.rs index 1812a20..a7b20a5 100644 --- a/prover/src/supernova/mod.rs +++ b/prover/src/supernova/mod.rs @@ -171,7 +171,7 @@ where out } - fn from_bytes(bytes: &Vec) -> Result { + fn from_bytes(bytes: &[u8]) -> Result { let mut cursor = Cursor::new(bytes); // Validate header @@ -936,6 +936,7 @@ where E1: CurveCycleEquipped } /// verify recursive snark + #[allow(clippy::type_complexity)] pub fn verify( &self, pp: &PublicParams, diff --git a/prover/src/supernova/snark.rs b/prover/src/supernova/snark.rs index cb2645b..363b26d 100644 --- a/prover/src/supernova/snark.rs +++ b/prover/src/supernova/snark.rs @@ -76,10 +76,11 @@ where let pk_primary = S1::initialize_pk(pp.ck_primary.clone(), primary_vk_digest)?; let pk_secondary = S2::initialize_pk(pp.ck_secondary.clone(), secondary_vk_digest)?; - return Ok(ProverKey { pk_primary, pk_secondary }); + Ok(ProverKey { pk_primary, pk_secondary }) } /// Creates prover and verifier keys for `CompressedSNARK` + #[allow(clippy::type_complexity)] pub fn setup( pp: &PublicParams, ) -> Result<(ProverKey, VerifierKey), SuperNovaError> { @@ -176,6 +177,7 @@ where } /// Verify the correctness of the `CompressedSNARK` + #[allow(clippy::type_complexity)] pub fn verify( &self, pp: &PublicParams, diff --git a/prover/src/traits/snark.rs b/prover/src/traits/snark.rs index c048348..7af5a43 100644 --- a/prover/src/traits/snark.rs +++ b/prover/src/traits/snark.rs @@ -19,6 +19,7 @@ use crate::{ /// use with these public parameters, and the below is a sensible default, which /// is to not require any more bases then the usual (maximum of the number of /// variables and constraints of the involved R1CS circuit). +#[allow(clippy::type_complexity)] pub fn default_ck_hint() -> Box Fn(&'a R1CSShape) -> usize> { // The default is to not put an additional floor on the size of the commitment // key @@ -42,6 +43,7 @@ pub trait RelaxedR1CSSNARKTrait: /// a minimum sizing cue for the commitment key used by this SNARK /// implementation. The commitment key passed in setup should then /// be at least as large as this hint. + #[allow(clippy::type_complexity)] fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { // The default is to not put an additional floor on the size of the commitment // key @@ -92,6 +94,7 @@ pub trait BatchedRelaxedR1CSSNARKTrait: /// a minimum sizing cue for the commitment key used by this SNARK /// implementation. The commitment key passed in setup should then /// be at least as large as this hint. + #[allow(clippy::type_complexity)] fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { default_ck_hint() } /// Initialize a ProvingKey directly from a CommitmentKey and a From 80fdc6f7584e584d07e9722d284711e7df2180c9 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 13:19:11 -0700 Subject: [PATCH 05/51] Update check.yaml --- .github/workflows/check.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index 55bb363..b4d56f5 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -17,7 +17,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: nightly - components: [rustfmt, clippy] + components: rustfmt, clippy - name: Install cargo-binstall uses: taiki-e/install-action@cargo-binstall From 31d7fbdd4c217e74bd20fc3c060cf5b4fb09c40a Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 13:20:25 -0700 Subject: [PATCH 06/51] Update check.yaml --- .github/workflows/check.yaml | 56 ++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index b4d56f5..23b022a 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -8,35 +8,61 @@ on: jobs: test: - name: test + name: Tests runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: nightly - components: rustfmt, clippy - - - name: Install cargo-binstall - uses: taiki-e/install-action@cargo-binstall - - - name: Install tools - run: | - cargo binstall --no-confirm cargo-udeps taplo-cli - - name: Run tests run: cargo test --all + format: + name: Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + components: rustfmt - name: Check formatting run: cargo fmt --all -- --check + - name: Install taplo + uses: taiki-e/install-action@cargo-binstall + - name: Install tools + run: cargo binstall --no-confirm taplo-cli + - name: Check TOML formatting + run: taplo fmt --check + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + components: clippy - name: Run clippy run: cargo clippy --all + deps: + name: Dependencies + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + - name: Install cargo-binstall + uses: taiki-e/install-action@cargo-binstall + - name: Install tools + run: cargo binstall --no-confirm cargo-udeps - name: Check unused dependencies - run: cargo udeps - - - name: Check TOML formatting - run: taplo fmt --check \ No newline at end of file + run: cargo udeps \ No newline at end of file From 565a2eb76ceeacb82d340f020f43136cbc13c07e Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 13:21:41 -0700 Subject: [PATCH 07/51] cleanup --- .github/workflows/check.yaml | 2 +- Cargo.toml | 4 ++-- frontend/Cargo.toml | 20 ++++++++++---------- prover/Cargo.toml | 2 -- 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index 23b022a..1621d78 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -1,4 +1,4 @@ -name: rust +name: Check on: pull_request: diff --git a/Cargo.toml b/Cargo.toml index 9e90af8..97ece87 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ rand ="0.8.5" ref-cast ="1.0.20" # allocation-less conversion in multilinear polys # lightens impl macros for pasta static_assertions="1.1.0" rayon-scan ="0.1.0" -hex ="0.4.3" +hex ="0.4.3" # grumpkin-msm has been patched to support MSMs for the pasta curve cycle # see: https://github.com/argumentcomputer/grumpkin-msm/pull/3 @@ -63,4 +63,4 @@ serde_json ="1.0.1" [profile.release] lto =true codegen-units=1 -panic ="abort" \ No newline at end of file +panic ="abort" diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index 3286edb..a641383 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -6,19 +6,19 @@ edition="2021" [dependencies] client-side-prover={ path="../prover" } -serde ={ workspace=true } -serde_json ={ workspace=true } -thiserror ={ workspace=true } -tracing ={ workspace=true } -hex ={ workspace=true } -ff ={ workspace=true } -bellpepper-core ={ workspace=true } -halo2curves ={ workspace=true } +serde ={ workspace=true } +serde_json ={ workspace=true } +thiserror ={ workspace=true } +tracing ={ workspace=true } +hex ={ workspace=true } +ff ={ workspace=true } +bellpepper-core={ workspace=true } +halo2curves ={ workspace=true } byteorder ={ workspace=true } num-bigint={ workspace=true } itertools ={ workspace=true } -bincode={ workspace=true } +bincode ={ workspace=true } # noir acvm ={ git="https://github.com/noir-lang/noir", rev="v1.0.0-beta.2" } @@ -37,7 +37,7 @@ circom_witnesscalc={ git="https://github.com/pluto/circom-witnesscalc" } # Fork [dev-dependencies] tracing-test="0.2.5" tempdir ="0.3.7" -tokio = { version = "1.43", features = ["full"] } +tokio ={ version="1.43", features=["full"] } [features] verify-steps=[] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 485d137..3fc21c7 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -77,5 +77,3 @@ default=["grumpkin-msm/portable"] # Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems. # portable = ["grumpkin-msm/portable"] # cuda = ["grumpkin-msm/cuda"] - - From 7434f11b39b2c895c7bced366024243ae625aff7 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 15:12:02 -0700 Subject: [PATCH 08/51] feat: noir circuits --- .github/workflows/check.yaml | 2 +- examples/add_external/Nargo.toml | 6 + examples/add_external/src/main.nr | 8 + .../add_external/target/add_external.json | 1 + examples/square_zeroth/Nargo.toml | 6 + examples/square_zeroth/src/main.nr | 4 + examples/swap_memory/Nargo.toml | 6 + examples/swap_memory/src/main.nr | 4 + examples/swap_memory/target/swap_memory.json | 1 + .../noir_circuit_data/add_external.json | 1 - .../noir_circuit_data/square_zeroth.json | 1 - .../noir_circuit_data/swap_memory.json | 1 - frontend/mock/fold.json | 1 - frontend/mock/mock.json | 1 - frontend/src/circom/mod.rs | 172 ----------- frontend/src/circom/r1cs.rs | 277 ------------------ frontend/src/circom/wasm_witness.rs | 36 --- frontend/src/circom/witness.rs | 261 ----------------- frontend/src/noir/tests.rs | 6 - frontend/src/program/mod.rs | 7 +- frontend/src/program/utils.rs | 182 ------------ .../examples/circuit_data/add_external.bin | Bin .../examples/circuit_data/add_external.circom | 0 .../examples/circuit_data/add_external.r1cs | Bin .../examples/circuit_data/square_zeroth.bin | Bin .../circuit_data/square_zeroth.circom | 0 .../examples/circuit_data/square_zeroth.r1cs | Bin .../examples/circuit_data/swap_memory.bin | Bin .../examples/circuit_data/swap_memory.circom | 0 .../examples/circuit_data/swap_memory.r1cs | Bin frontend/{src => }/tests/inputs.rs | 0 frontend/{src => }/tests/mod.rs | 0 frontend/{src => }/tests/witnesscalc.rs | 0 33 files changed, 38 insertions(+), 946 deletions(-) create mode 100644 examples/add_external/Nargo.toml create mode 100644 examples/add_external/src/main.nr create mode 100644 examples/add_external/target/add_external.json create mode 100644 examples/square_zeroth/Nargo.toml create mode 100644 examples/square_zeroth/src/main.nr create mode 100644 examples/swap_memory/Nargo.toml create mode 100644 examples/swap_memory/src/main.nr create mode 100644 examples/swap_memory/target/swap_memory.json delete mode 100644 frontend/examples/noir_circuit_data/add_external.json delete mode 100644 frontend/examples/noir_circuit_data/square_zeroth.json delete mode 100644 frontend/examples/noir_circuit_data/swap_memory.json delete mode 100644 frontend/mock/fold.json delete mode 100644 frontend/mock/mock.json delete mode 100644 frontend/src/circom/mod.rs delete mode 100644 frontend/src/circom/r1cs.rs delete mode 100644 frontend/src/circom/wasm_witness.rs delete mode 100644 frontend/src/circom/witness.rs delete mode 100644 frontend/src/program/utils.rs rename frontend/{src => }/tests/examples/circuit_data/add_external.bin (100%) rename frontend/{src => }/tests/examples/circuit_data/add_external.circom (100%) rename frontend/{src => }/tests/examples/circuit_data/add_external.r1cs (100%) rename frontend/{src => }/tests/examples/circuit_data/square_zeroth.bin (100%) rename frontend/{src => }/tests/examples/circuit_data/square_zeroth.circom (100%) rename frontend/{src => }/tests/examples/circuit_data/square_zeroth.r1cs (100%) rename frontend/{src => }/tests/examples/circuit_data/swap_memory.bin (100%) rename frontend/{src => }/tests/examples/circuit_data/swap_memory.circom (100%) rename frontend/{src => }/tests/examples/circuit_data/swap_memory.r1cs (100%) rename frontend/{src => }/tests/inputs.rs (100%) rename frontend/{src => }/tests/mod.rs (100%) rename frontend/{src => }/tests/witnesscalc.rs (100%) diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index 1621d78..3c35b71 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -1,4 +1,4 @@ -name: Check +name: Checkta on: pull_request: diff --git a/examples/add_external/Nargo.toml b/examples/add_external/Nargo.toml new file mode 100644 index 0000000..0b0a015 --- /dev/null +++ b/examples/add_external/Nargo.toml @@ -0,0 +1,6 @@ +[package] +authors = ["Colin Roberts"] +compiler_version = ">=0.36.0" +name = "add_external" +type = "bin" +version = "0.1.0" diff --git a/examples/add_external/src/main.nr b/examples/add_external/src/main.nr new file mode 100644 index 0000000..1562a66 --- /dev/null +++ b/examples/add_external/src/main.nr @@ -0,0 +1,8 @@ +/// Add two external values to two registers that are folded across circuits. +pub fn main( + external: [Field; 2], + registers: [Field; 2], + next_pc: Field, +) -> pub ([Field; 2], Field) { + ([external[0] + registers[0], external[1] + registers[1]], next_pc) +} diff --git a/examples/add_external/target/add_external.json b/examples/add_external/target/add_external.json new file mode 100644 index 0000000..d3c7a02 --- /dev/null +++ b/examples/add_external/target/add_external.json @@ -0,0 +1 @@ +{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":10862577844004020612,"abi":{"parameters":[{"name":"external","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"array","length":2,"type":{"kind":"field"}},{"kind":"field"}]},"visibility":"public"},"error_types":{"6485997221020871071":{"error_kind":"string","string":"call to assert_max_bit_size"},"17843811134343075018":{"error_kind":"string","string":"Stack too deep"}}},"bytecode":"H4sIAAAAAAAA/+1bXXLbNhBeiXJi2XF+bDmJf6I6sWtP00wLECRBTqcznmmSQ/SNEslz6CZ97gF6ox4kpkNEK5oEI2PXljLeGQ8lrfRh91tgdwmYPfgq8urvY/V6cPXnXf314KaYzy6rqxJREOTaz6WSqfCTSRyKIJxEsYxlGIeZHyuVx0Gsk0miRSIDlcsiTFQhvsoAYQk3UdjeTU6DS/B+DVe4idqkw5JDQn+5OBw24Ao3UUM6rGsbAcW5SajsxpjEnEjCBSbwAttCtpJPEMJAXhPQg+/LZsJNSMnGE2+7uj4pbeYge4uBjC1CMrgmWWljn9FvVxt3mDkUbiLLibnDEJunQJu1TDtT4s4qXGounjBx8YyJi2cWLlxt5poXv8C95pRph31sc+A9rHYeGFR+U+dSwnhLSg4Hla8U3WHXnMKYHPmbihPcYD0HS3co3ERSFkWcELHRS9osu8bZqeEbXNfE8CvQJgYjgyVjtkwj4+rzC1jNBEMZCzwvS39nsCjLxqeLc8r47CIsqdTV2si0LLJChTrxJzJSUVQEhY7iICvCIM10LoNU+UmuRSHjPNehmuqoSLJpVOCkLTOlgiyZTGXoR+lExJlKRRFo5Ys0UzrLVBxFqVJZFBdxEvt+WqhYhFonIvJV4nPFZ7chPtTbQ5Tx2WPiYY+Ahzbb8+JaSOfpiImHEQEPd7le95l42K94sDVLq9zQNJjLslf9EtawWcJGczRLL4G+WfoAd9Ms3dZOjiT3ihCLslmijAWel6W/M1gUj3gevQa6poSyweHi9DWw7ZopswtLFaNqDeVDQizKHd0DoC0iJkYHDjHqyke3jVHHsMGQEIsyRoeE3OEYHTrEqMunW8ZoWuhpkSRZmMSxzkSk40ki0yLLolznqU6XiVEnFmWMjgi5wzE6gnnTuu7HhsfV9Q00HBsKN5Ftx2eUZ7+uWIRHkJIj8NTFj5tDV6zxisejXDBjoL/x+Al4GrMSd1bhUnPxhomLEyYuToDvOJNrXvxGyMUt/O48zuSaA78z+y3c5Logl35TH2cSxltScvijHGceA21uMfIWGHfoCIviQkLERlPv0I1r+AbXNTEIoE0MRqiPM8eEPr+D1UwwlLHA87L0dwaLQn2cSRmfU6A7zsRJ23W3jys+p9B9bANLxqtDWP8DmqsonMEaFgVsNEdROAP6oiBhNYpCh/iUSednWM2iQBkLPC9Lf2ewKNS7Iuewmomci9NzmCfydX+e64LT4AugP9e6ILQRLAETbkL+lIYR6sVLidUn5G9jTfjrEWJ5hPw9YuKvT8wfYWKTj4E24ZtGZgP536s48JAez6vyuyX3pS27SP+4uv7z7/i/o//7fyPV9TObbboXFt2eRbdv0b2y6A4surFFd2LRvbPoziy6c4vuvUX3waILLLrIovvDovuzRWfmx2F1NXE2a8g0FZfVe+EmcohwqfFjEeghLAqx/WqIMBnwfYM/4MEXZo1/nC3iQ21c8yQTbsTMb4wO7w58qnTls+xPYf76OcIr5TMa12APa3jEcyJijpnca7Hf+FbKXzP4Jrcc98ZN+HbF8WgOvTD2oxrP2MadBp353TayezCj50zH890Ur8LfQONjGSA9/r6pAVvIfnN1qdeFTmWh0iIN0ywLpuluDR8QhyVPpjY85E2rKM41Lip+GNd4Y95sWu+2vNmVGz8h7F5N97lhXE6fr+ZEyFyL/D1orz93kTdNr+g12FHPJx60xxwaPutBe+6tj+E14Bm/PbgZgzbbmniEhs+abPM6bBs02FbH7Vv8aBsH21SvTf0Ge9a9bp1W7++7bpn7nYe6ZZWHfh8e+v2a3Hu/fzyHhvoeUFvN2ah91+yvlPcIoxa877l/8Fp+h/cT6hgOnNzgqMmOXoMd61433lbv77tuGJ7MdR/o+RKobox48L/lRSz76PWo5ifOfZdENhg8vC9bl35NZ767WbOvR2+frNviNYxlxOzz4lxi+PwC+7s5zGlSAAA=","debug_symbols":"7ZzbbuIwEED/Jc95sD3jG7+yWlXcWiEhQFxWWiH+fRNUJ2ygoSYGu5O8NTCZOieO7WPsHLPZfHL4eFus3te7bPTrmC3X0/F+sV4VR8eMnT/abcar8mi3H2/32UgwlWfz1az4i6tTnr0vlvNspMQpvwq1YPAz1iLTVTC3N4JBMfkZDIpDHWxuBQsULliguQz+nWf8h5Zb/NByQ7rlBu0yA1jZKDemW27kVblR2PZgYcBlFgb1nWCOWF8fbxCRfSQiOLhgAao9WGoXK629vLwCnhrgPQ5PD/Aeh2d6CU/aCp4Rj8OzA7zH4fGEx4jPoweau2Cwujn6THj4GQtJwiPbWEgSHjQ/D4lkTiOERN4ezKVxmbli0OSHA79O/Ho51Pfip8E9v1xL7NBH9lIM4qDupUbEQd1L6fBDXeHj2nbQO95LRYmCWpDxmXrwaQy8aB5CkFGfKPTIWJIPvSidlyBjX+mjxgH1qzovMk6XPmoqTmexmgi29yaC22cUBBX3CoiEiiN5IQnWc1DRnij0gIrJxKFHxWT86AXqHYGKycShl7KcWOaCJZMhp3tCWTTgQK8DvZQV4mn0oogxpKwQxFCnrCaJoA7VeaWsPMRQp+xHXqh9pnta1y9jytLzNCShOn9MWXrSp5ey9KRPj4z0eNELtAwXcaDXgR4Z6fGh17o8FcnISTgkZCQiHBIyg/2n/eDY+vsQ9nIEH46f7OVwP8oUiuylG8RB3UuRiIO6l9YRZbZK4oD6Vaip+Ey4lTiSis8ERELFZ6IsEJFU1CcOPSriE4WeoqI9UZbXKComE4feXTmxl/TKM8D7DPQ7oziYbBfL5eLjrfn+suMXW0I5Z7rqGzmz5r/LPJ3fIfbluYxXzzGDO3dJM1dXNBeNHvj2Fspw6W339OBqgkbbSH97q5xnejc61upq7TTvnt64B0RfTV3e3mwVLj10Tm+Eq+8Gm7f29qYaz/Su/hupmull9/Ta3VpzDUcFTG8b9f50fo9ey3Mv6sfefK9Kc6ybCnWn7bzTsBRHf8bbxXiynJdNVPnlYTV1LVZxuP+7cd+4Nm2zXU/ns8N2XrZuFw1bWVjBciHr9qr8CFgOssZwjjI5QPHPiwL8Aw==","file_map":{"18":{"source":"use crate::field::field_less_than;\nuse crate::runtime::is_unconstrained;\n\n// The low and high decomposition of the field modulus\nglobal PLO: Field = 53438638232309528389504892708671455233;\nglobal PHI: Field = 64323764613183177041862057485226039389;\n\npub(crate) global TWO_POW_128: Field = 0x100000000000000000000000000000000;\nglobal TWO_POW_64: Field = 0x10000000000000000;\n\n// Decomposes a single field into two 16 byte fields.\nfn compute_decomposition(mut x: Field) -> (Field, Field) {\n // Here's we're taking advantage of truncating 64 bit limbs from the input field\n // and then subtracting them from the input such the field division is equivalent to integer division.\n let low_lower_64 = (x as u64) as Field;\n x = (x - low_lower_64) / TWO_POW_64;\n let low_upper_64 = (x as u64) as Field;\n\n let high = (x - low_upper_64) / TWO_POW_64;\n let low = low_upper_64 * TWO_POW_64 + low_lower_64;\n\n (low, high)\n}\n\npub(crate) unconstrained fn decompose_hint(x: Field) -> (Field, Field) {\n compute_decomposition(x)\n}\n\nunconstrained fn lte_hint(x: Field, y: Field) -> bool {\n if x == y {\n true\n } else {\n field_less_than(x, y)\n }\n}\n\n// Assert that (alo > blo && ahi >= bhi) || (alo <= blo && ahi > bhi)\nfn assert_gt_limbs(a: (Field, Field), b: (Field, Field)) {\n let (alo, ahi) = a;\n let (blo, bhi) = b;\n /// Safety: borrow is enforced to be boolean due to its type.\n /// if borrow is 0, it asserts that (alo > blo && ahi >= bhi)\n /// if borrow is 1, it asserts that (alo <= blo && ahi > bhi)\n unsafe {\n let borrow = lte_hint(alo, blo);\n\n let rlo = alo - blo - 1 + (borrow as Field) * TWO_POW_128;\n let rhi = ahi - bhi - (borrow as Field);\n\n rlo.assert_max_bit_size::<128>();\n rhi.assert_max_bit_size::<128>();\n }\n}\n\n/// Decompose a single field into two 16 byte fields.\npub fn decompose(x: Field) -> (Field, Field) {\n if is_unconstrained() {\n compute_decomposition(x)\n } else {\n /// Safety: decomposition is properly checked below\n unsafe {\n // Take hints of the decomposition\n let (xlo, xhi) = decompose_hint(x);\n\n // Range check the limbs\n xlo.assert_max_bit_size::<128>();\n xhi.assert_max_bit_size::<128>();\n\n // Check that the decomposition is correct\n assert_eq(x, xlo + TWO_POW_128 * xhi);\n\n // Assert that the decomposition of P is greater than the decomposition of x\n assert_gt_limbs((PLO, PHI), (xlo, xhi));\n (xlo, xhi)\n }\n }\n}\n\npub fn assert_gt(a: Field, b: Field) {\n if is_unconstrained() {\n assert(\n /// Safety: already unconstrained\n unsafe { field_less_than(b, a) },\n );\n } else {\n // Decompose a and b\n let a_limbs = decompose(a);\n let b_limbs = decompose(b);\n\n // Assert that a_limbs is greater than b_limbs\n assert_gt_limbs(a_limbs, b_limbs)\n }\n}\n\npub fn assert_lt(a: Field, b: Field) {\n assert_gt(b, a);\n}\n\npub fn gt(a: Field, b: Field) -> bool {\n if is_unconstrained() {\n /// Safety: unsafe in unconstrained\n unsafe {\n field_less_than(b, a)\n }\n } else if a == b {\n false\n } else {\n /// Safety: Take a hint of the comparison and verify it\n unsafe {\n if field_less_than(a, b) {\n assert_gt(b, a);\n false\n } else {\n assert_gt(a, b);\n true\n }\n }\n }\n}\n\npub fn lt(a: Field, b: Field) -> bool {\n gt(b, a)\n}\n\nmod tests {\n // TODO: Allow imports from \"super\"\n use crate::field::bn254::{assert_gt, decompose, gt, lte_hint, PHI, PLO, TWO_POW_128};\n\n #[test]\n fn check_decompose() {\n assert_eq(decompose(TWO_POW_128), (0, 1));\n assert_eq(decompose(TWO_POW_128 + 0x1234567890), (0x1234567890, 1));\n assert_eq(decompose(0x1234567890), (0x1234567890, 0));\n }\n\n #[test]\n unconstrained fn check_decompose_unconstrained() {\n assert_eq(decompose(TWO_POW_128), (0, 1));\n assert_eq(decompose(TWO_POW_128 + 0x1234567890), (0x1234567890, 1));\n assert_eq(decompose(0x1234567890), (0x1234567890, 0));\n }\n\n #[test]\n unconstrained fn check_lte_hint() {\n assert(lte_hint(0, 1));\n assert(lte_hint(0, 0x100));\n assert(lte_hint(0x100, TWO_POW_128 - 1));\n assert(!lte_hint(0 - 1, 0));\n\n assert(lte_hint(0, 0));\n assert(lte_hint(0x100, 0x100));\n assert(lte_hint(0 - 1, 0 - 1));\n }\n\n #[test]\n fn check_assert_gt() {\n assert_gt(1, 0);\n assert_gt(0x100, 0);\n assert_gt((0 - 1), (0 - 2));\n assert_gt(TWO_POW_128, 0);\n assert_gt(0 - 1, 0);\n }\n\n #[test]\n unconstrained fn check_assert_gt_unconstrained() {\n assert_gt(1, 0);\n assert_gt(0x100, 0);\n assert_gt((0 - 1), (0 - 2));\n assert_gt(TWO_POW_128, 0);\n assert_gt(0 - 1, 0);\n }\n\n #[test]\n fn check_gt() {\n assert(gt(1, 0));\n assert(gt(0x100, 0));\n assert(gt((0 - 1), (0 - 2)));\n assert(gt(TWO_POW_128, 0));\n assert(!gt(0, 0));\n assert(!gt(0, 0x100));\n assert(gt(0 - 1, 0 - 2));\n assert(!gt(0 - 2, 0 - 1));\n }\n\n #[test]\n unconstrained fn check_gt_unconstrained() {\n assert(gt(1, 0));\n assert(gt(0x100, 0));\n assert(gt((0 - 1), (0 - 2)));\n assert(gt(TWO_POW_128, 0));\n assert(!gt(0, 0));\n assert(!gt(0, 0x100));\n assert(gt(0 - 1, 0 - 2));\n assert(!gt(0 - 2, 0 - 1));\n }\n\n #[test]\n fn check_plo_phi() {\n assert_eq(PLO + PHI * TWO_POW_128, 0);\n let p_bytes = crate::field::modulus_le_bytes();\n let mut p_low: Field = 0;\n let mut p_high: Field = 0;\n\n let mut offset = 1;\n for i in 0..16 {\n p_low += (p_bytes[i] as Field) * offset;\n p_high += (p_bytes[i + 16] as Field) * offset;\n offset *= 256;\n }\n assert_eq(p_low, PLO);\n assert_eq(p_high, PHI);\n }\n}\n","path":"std/field/bn254.nr"},"19":{"source":"pub mod bn254;\nuse crate::{runtime::is_unconstrained, static_assert};\nuse bn254::lt as bn254_lt;\n\nimpl Field {\n /// Asserts that `self` can be represented in `bit_size` bits.\n ///\n /// # Failures\n /// Causes a constraint failure for `Field` values exceeding `2^{bit_size}`.\n // docs:start:assert_max_bit_size\n pub fn assert_max_bit_size(self) {\n // docs:end:assert_max_bit_size\n static_assert(\n BIT_SIZE < modulus_num_bits() as u32,\n \"BIT_SIZE must be less than modulus_num_bits\",\n );\n self.__assert_max_bit_size(BIT_SIZE);\n }\n\n #[builtin(apply_range_constraint)]\n fn __assert_max_bit_size(self, bit_size: u32) {}\n\n /// Decomposes `self` into its little endian bit decomposition as a `[u1; N]` array.\n /// This slice will be zero padded should not all bits be necessary to represent `self`.\n ///\n /// # Failures\n /// Causes a constraint failure for `Field` values exceeding `2^N` as the resulting slice will not\n /// be able to represent the original `Field`.\n ///\n /// # Safety\n /// Values of `N` equal to or greater than the number of bits necessary to represent the `Field` modulus\n /// (e.g. 254 for the BN254 field) allow for multiple bit decompositions. This is due to how the `Field` will\n /// wrap around due to overflow when verifying the decomposition.\n #[builtin(to_le_bits)]\n fn _to_le_bits(self: Self) -> [u1; N] {}\n\n /// Decomposes `self` into its big endian bit decomposition as a `[u1; N]` array.\n /// This array will be zero padded should not all bits be necessary to represent `self`.\n ///\n /// # Failures\n /// Causes a constraint failure for `Field` values exceeding `2^N` as the resulting slice will not\n /// be able to represent the original `Field`.\n ///\n /// # Safety\n /// Values of `N` equal to or greater than the number of bits necessary to represent the `Field` modulus\n /// (e.g. 254 for the BN254 field) allow for multiple bit decompositions. This is due to how the `Field` will\n /// wrap around due to overflow when verifying the decomposition.\n #[builtin(to_be_bits)]\n fn _to_be_bits(self: Self) -> [u1; N] {}\n\n /// Decomposes `self` into its little endian bit decomposition as a `[u1; N]` array.\n /// This slice will be zero padded should not all bits be necessary to represent `self`.\n ///\n /// # Failures\n /// Causes a constraint failure for `Field` values exceeding `2^N` as the resulting slice will not\n /// be able to represent the original `Field`.\n ///\n /// # Safety\n /// The bit decomposition returned is canonical and is guaranteed to not overflow the modulus.\n // docs:start:to_le_bits\n pub fn to_le_bits(self: Self) -> [u1; N] {\n // docs:end:to_le_bits\n let bits = self._to_le_bits();\n\n if !is_unconstrained() {\n // Ensure that the byte decomposition does not overflow the modulus\n let p = modulus_le_bits();\n assert(bits.len() <= p.len());\n let mut ok = bits.len() != p.len();\n for i in 0..N {\n if !ok {\n if (bits[N - 1 - i] != p[N - 1 - i]) {\n assert(p[N - 1 - i] == 1);\n ok = true;\n }\n }\n }\n assert(ok);\n }\n bits\n }\n\n /// Decomposes `self` into its big endian bit decomposition as a `[u1; N]` array.\n /// This array will be zero padded should not all bits be necessary to represent `self`.\n ///\n /// # Failures\n /// Causes a constraint failure for `Field` values exceeding `2^N` as the resulting slice will not\n /// be able to represent the original `Field`.\n ///\n /// # Safety\n /// The bit decomposition returned is canonical and is guaranteed to not overflow the modulus.\n // docs:start:to_be_bits\n pub fn to_be_bits(self: Self) -> [u1; N] {\n // docs:end:to_be_bits\n let bits = self._to_be_bits();\n\n if !is_unconstrained() {\n // Ensure that the decomposition does not overflow the modulus\n let p = modulus_be_bits();\n assert(bits.len() <= p.len());\n let mut ok = bits.len() != p.len();\n for i in 0..N {\n if !ok {\n if (bits[i] != p[i]) {\n assert(p[i] == 1);\n ok = true;\n }\n }\n }\n assert(ok);\n }\n bits\n }\n\n /// Decomposes `self` into its little endian byte decomposition as a `[u8;N]` array\n /// This array will be zero padded should not all bytes be necessary to represent `self`.\n ///\n /// # Failures\n /// The length N of the array must be big enough to contain all the bytes of the 'self',\n /// and no more than the number of bytes required to represent the field modulus\n ///\n /// # Safety\n /// The result is ensured to be the canonical decomposition of the field element\n // docs:start:to_le_bytes\n pub fn to_le_bytes(self: Self) -> [u8; N] {\n // docs:end:to_le_bytes\n static_assert(\n N <= modulus_le_bytes().len(),\n \"N must be less than or equal to modulus_le_bytes().len()\",\n );\n // Compute the byte decomposition\n let bytes = self.to_le_radix(256);\n\n if !is_unconstrained() {\n // Ensure that the byte decomposition does not overflow the modulus\n let p = modulus_le_bytes();\n assert(bytes.len() <= p.len());\n let mut ok = bytes.len() != p.len();\n for i in 0..N {\n if !ok {\n if (bytes[N - 1 - i] != p[N - 1 - i]) {\n assert(bytes[N - 1 - i] < p[N - 1 - i]);\n ok = true;\n }\n }\n }\n assert(ok);\n }\n bytes\n }\n\n /// Decomposes `self` into its big endian byte decomposition as a `[u8;N]` array of length required to represent the field modulus\n /// This array will be zero padded should not all bytes be necessary to represent `self`.\n ///\n /// # Failures\n /// The length N of the array must be big enough to contain all the bytes of the 'self',\n /// and no more than the number of bytes required to represent the field modulus\n ///\n /// # Safety\n /// The result is ensured to be the canonical decomposition of the field element\n // docs:start:to_be_bytes\n pub fn to_be_bytes(self: Self) -> [u8; N] {\n // docs:end:to_be_bytes\n static_assert(\n N <= modulus_le_bytes().len(),\n \"N must be less than or equal to modulus_le_bytes().len()\",\n );\n // Compute the byte decomposition\n let bytes = self.to_be_radix(256);\n\n if !is_unconstrained() {\n // Ensure that the byte decomposition does not overflow the modulus\n let p = modulus_be_bytes();\n assert(bytes.len() <= p.len());\n let mut ok = bytes.len() != p.len();\n for i in 0..N {\n if !ok {\n if (bytes[i] != p[i]) {\n assert(bytes[i] < p[i]);\n ok = true;\n }\n }\n }\n assert(ok);\n }\n bytes\n }\n\n // docs:start:to_le_radix\n pub fn to_le_radix(self: Self, radix: u32) -> [u8; N] {\n // Brillig does not need an immediate radix\n if !crate::runtime::is_unconstrained() {\n static_assert(1 < radix, \"radix must be greater than 1\");\n static_assert(radix <= 256, \"radix must be less than or equal to 256\");\n static_assert(radix & (radix - 1) == 0, \"radix must be a power of 2\");\n }\n self.__to_le_radix(radix)\n }\n // docs:end:to_le_radix\n\n // docs:start:to_be_radix\n pub fn to_be_radix(self: Self, radix: u32) -> [u8; N] {\n // Brillig does not need an immediate radix\n if !crate::runtime::is_unconstrained() {\n crate::assert_constant(radix);\n }\n self.__to_be_radix(radix)\n }\n // docs:end:to_be_radix\n\n // `_radix` must be less than 256\n #[builtin(to_le_radix)]\n fn __to_le_radix(self, radix: u32) -> [u8; N] {}\n\n // `_radix` must be less than 256\n #[builtin(to_be_radix)]\n fn __to_be_radix(self, radix: u32) -> [u8; N] {}\n\n // Returns self to the power of the given exponent value.\n // Caution: we assume the exponent fits into 32 bits\n // using a bigger bit size impacts negatively the performance and should be done only if the exponent does not fit in 32 bits\n pub fn pow_32(self, exponent: Field) -> Field {\n let mut r: Field = 1;\n let b: [u1; 32] = exponent.to_le_bits();\n\n for i in 1..33 {\n r *= r;\n r = (b[32 - i] as Field) * (r * self) + (1 - b[32 - i] as Field) * r;\n }\n r\n }\n\n // Parity of (prime) Field element, i.e. sgn0(x mod p) = 0 if x `elem` {0, ..., p-1} is even, otherwise sgn0(x mod p) = 1.\n pub fn sgn0(self) -> u1 {\n self as u1\n }\n\n pub fn lt(self, another: Field) -> bool {\n if crate::compat::is_bn254() {\n bn254_lt(self, another)\n } else {\n lt_fallback(self, another)\n }\n }\n\n /// Convert a little endian byte array to a field element.\n /// If the provided byte array overflows the field modulus then the Field will silently wrap around.\n pub fn from_le_bytes(bytes: [u8; N]) -> Field {\n static_assert(\n N <= modulus_le_bytes().len(),\n \"N must be less than or equal to modulus_le_bytes().len()\",\n );\n let mut v = 1;\n let mut result = 0;\n\n for i in 0..N {\n result += (bytes[i] as Field) * v;\n v = v * 256;\n }\n result\n }\n\n /// Convert a big endian byte array to a field element.\n /// If the provided byte array overflows the field modulus then the Field will silently wrap around.\n pub fn from_be_bytes(bytes: [u8; N]) -> Field {\n let mut v = 1;\n let mut result = 0;\n\n for i in 0..N {\n result += (bytes[N - 1 - i] as Field) * v;\n v = v * 256;\n }\n result\n }\n}\n\n#[builtin(modulus_num_bits)]\npub comptime fn modulus_num_bits() -> u64 {}\n\n#[builtin(modulus_be_bits)]\npub comptime fn modulus_be_bits() -> [u1] {}\n\n#[builtin(modulus_le_bits)]\npub comptime fn modulus_le_bits() -> [u1] {}\n\n#[builtin(modulus_be_bytes)]\npub comptime fn modulus_be_bytes() -> [u8] {}\n\n#[builtin(modulus_le_bytes)]\npub comptime fn modulus_le_bytes() -> [u8] {}\n\n/// An unconstrained only built in to efficiently compare fields.\n#[builtin(field_less_than)]\nunconstrained fn __field_less_than(x: Field, y: Field) -> bool {}\n\npub(crate) unconstrained fn field_less_than(x: Field, y: Field) -> bool {\n __field_less_than(x, y)\n}\n\n// Convert a 32 byte array to a field element by modding\npub fn bytes32_to_field(bytes32: [u8; 32]) -> Field {\n // Convert it to a field element\n let mut v = 1;\n let mut high = 0 as Field;\n let mut low = 0 as Field;\n\n for i in 0..16 {\n high = high + (bytes32[15 - i] as Field) * v;\n low = low + (bytes32[16 + 15 - i] as Field) * v;\n v = v * 256;\n }\n // Abuse that a % p + b % p = (a + b) % p and that low < p\n low + high * v\n}\n\nfn lt_fallback(x: Field, y: Field) -> bool {\n if is_unconstrained() {\n /// Safety: unconstrained context\n unsafe {\n field_less_than(x, y)\n }\n } else {\n let x_bytes: [u8; 32] = x.to_le_bytes();\n let y_bytes: [u8; 32] = y.to_le_bytes();\n let mut x_is_lt = false;\n let mut done = false;\n for i in 0..32 {\n if (!done) {\n let x_byte = x_bytes[32 - 1 - i] as u8;\n let y_byte = y_bytes[32 - 1 - i] as u8;\n let bytes_match = x_byte == y_byte;\n if !bytes_match {\n x_is_lt = x_byte < y_byte;\n done = true;\n }\n }\n }\n x_is_lt\n }\n}\n\nmod tests {\n use crate::{panic::panic, runtime};\n use super::field_less_than;\n\n #[test]\n // docs:start:to_be_bits_example\n fn test_to_be_bits() {\n let field = 2;\n let bits: [u1; 8] = field.to_be_bits();\n assert_eq(bits, [0, 0, 0, 0, 0, 0, 1, 0]);\n }\n // docs:end:to_be_bits_example\n\n #[test]\n // docs:start:to_le_bits_example\n fn test_to_le_bits() {\n let field = 2;\n let bits: [u1; 8] = field.to_le_bits();\n assert_eq(bits, [0, 1, 0, 0, 0, 0, 0, 0]);\n }\n // docs:end:to_le_bits_example\n\n #[test]\n // docs:start:to_be_bytes_example\n fn test_to_be_bytes() {\n let field = 2;\n let bytes: [u8; 8] = field.to_be_bytes();\n assert_eq(bytes, [0, 0, 0, 0, 0, 0, 0, 2]);\n assert_eq(Field::from_be_bytes::<8>(bytes), field);\n }\n // docs:end:to_be_bytes_example\n\n #[test]\n // docs:start:to_le_bytes_example\n fn test_to_le_bytes() {\n let field = 2;\n let bytes: [u8; 8] = field.to_le_bytes();\n assert_eq(bytes, [2, 0, 0, 0, 0, 0, 0, 0]);\n assert_eq(Field::from_le_bytes::<8>(bytes), field);\n }\n // docs:end:to_le_bytes_example\n\n #[test]\n // docs:start:to_be_radix_example\n fn test_to_be_radix() {\n // 259, in base 256, big endian, is [1, 3].\n // i.e. 3 * 256^0 + 1 * 256^1\n let field = 259;\n\n // The radix (in this example, 256) must be a power of 2.\n // The length of the returned byte array can be specified to be\n // >= the amount of space needed.\n let bytes: [u8; 8] = field.to_be_radix(256);\n assert_eq(bytes, [0, 0, 0, 0, 0, 0, 1, 3]);\n assert_eq(Field::from_be_bytes::<8>(bytes), field);\n }\n // docs:end:to_be_radix_example\n\n #[test]\n // docs:start:to_le_radix_example\n fn test_to_le_radix() {\n // 259, in base 256, little endian, is [3, 1].\n // i.e. 3 * 256^0 + 1 * 256^1\n let field = 259;\n\n // The radix (in this example, 256) must be a power of 2.\n // The length of the returned byte array can be specified to be\n // >= the amount of space needed.\n let bytes: [u8; 8] = field.to_le_radix(256);\n assert_eq(bytes, [3, 1, 0, 0, 0, 0, 0, 0]);\n assert_eq(Field::from_le_bytes::<8>(bytes), field);\n }\n // docs:end:to_le_radix_example\n\n #[test(should_fail_with = \"radix must be greater than 1\")]\n fn test_to_le_radix_1() {\n // this test should only fail in constrained mode\n if !runtime::is_unconstrained() {\n let field = 2;\n let _: [u8; 8] = field.to_le_radix(1);\n } else {\n panic(f\"radix must be greater than 1\");\n }\n }\n\n // TODO: Update this test to account for the Brillig restriction that the radix must be greater than 2\n //#[test]\n //fn test_to_le_radix_brillig_1() {\n // // this test should only fail in constrained mode\n // if runtime::is_unconstrained() {\n // let field = 1;\n // let out: [u8; 8] = field.to_le_radix(1);\n // crate::println(out);\n // let expected = [0; 8];\n // assert(out == expected, \"unexpected result\");\n // }\n //}\n\n #[test(should_fail_with = \"radix must be a power of 2\")]\n fn test_to_le_radix_3() {\n // this test should only fail in constrained mode\n if !runtime::is_unconstrained() {\n let field = 2;\n let _: [u8; 8] = field.to_le_radix(3);\n } else {\n panic(f\"radix must be a power of 2\");\n }\n }\n\n #[test]\n fn test_to_le_radix_brillig_3() {\n // this test should only fail in constrained mode\n if runtime::is_unconstrained() {\n let field = 1;\n let out: [u8; 8] = field.to_le_radix(3);\n let mut expected = [0; 8];\n expected[0] = 1;\n assert(out == expected, \"unexpected result\");\n }\n }\n\n #[test(should_fail_with = \"radix must be less than or equal to 256\")]\n fn test_to_le_radix_512() {\n // this test should only fail in constrained mode\n if !runtime::is_unconstrained() {\n let field = 2;\n let _: [u8; 8] = field.to_le_radix(512);\n } else {\n panic(f\"radix must be less than or equal to 256\")\n }\n }\n\n // TODO: Update this test to account for the Brillig restriction that the radix must be less than 512\n //#[test]\n //fn test_to_le_radix_brillig_512() {\n // // this test should only fail in constrained mode\n // if runtime::is_unconstrained() {\n // let field = 1;\n // let out: [u8; 8] = field.to_le_radix(512);\n // let mut expected = [0; 8];\n // expected[0] = 1;\n // assert(out == expected, \"unexpected result\");\n // }\n //}\n\n #[test]\n unconstrained fn test_field_less_than() {\n assert(field_less_than(0, 1));\n assert(field_less_than(0, 0x100));\n assert(field_less_than(0x100, 0 - 1));\n assert(!field_less_than(0 - 1, 0));\n }\n}\n","path":"std/field/mod.nr"},"62":{"source":"/// Add two external values to two registers that are folded across circuits.\npub fn main(\n external: [Field; 2],\n registers: [Field; 2],\n next_pc: Field,\n) -> pub ([Field; 2], Field) {\n assert(next_pc.lt(3));\n ([external[0] + registers[0], external[1] + registers[1]], next_pc)\n}\n","path":"/Users/autoparallel/Code/client-side-prover/examples/add_external/src/main.nr"}},"names":["main"],"brillig_names":["field_less_than","decompose_hint","lte_hint","directive_invert"]} \ No newline at end of file diff --git a/examples/square_zeroth/Nargo.toml b/examples/square_zeroth/Nargo.toml new file mode 100644 index 0000000..04e65ad --- /dev/null +++ b/examples/square_zeroth/Nargo.toml @@ -0,0 +1,6 @@ +[package] +authors = ["Colin Roberts"] +compiler_version = ">=0.36.0" +name = "square_zeroth" +type = "bin" +version = "0.1.0" diff --git a/examples/square_zeroth/src/main.nr b/examples/square_zeroth/src/main.nr new file mode 100644 index 0000000..8934167 --- /dev/null +++ b/examples/square_zeroth/src/main.nr @@ -0,0 +1,4 @@ +/// Square only the first register. +pub fn main(registers: [Field; 2], next_pc: Field) -> pub ([Field; 2], Field) { + ([registers[0] * registers[0], registers[1]], next_pc) +} diff --git a/examples/swap_memory/Nargo.toml b/examples/swap_memory/Nargo.toml new file mode 100644 index 0000000..d37da1c --- /dev/null +++ b/examples/swap_memory/Nargo.toml @@ -0,0 +1,6 @@ +[package] +authors = ["Colin Roberts"] +compiler_version = ">=0.36.0" +name = "swap_memory" +type = "bin" +version = "0.1.0" diff --git a/examples/swap_memory/src/main.nr b/examples/swap_memory/src/main.nr new file mode 100644 index 0000000..d262ea7 --- /dev/null +++ b/examples/swap_memory/src/main.nr @@ -0,0 +1,4 @@ +/// Swap the two registers. +pub fn main(registers: [Field; 2], next_pc: Field) -> pub ([Field; 2], Field) { + ([registers[1], registers[0]], next_pc) +} diff --git a/examples/swap_memory/target/swap_memory.json b/examples/swap_memory/target/swap_memory.json new file mode 100644 index 0000000..ed95743 --- /dev/null +++ b/examples/swap_memory/target/swap_memory.json @@ -0,0 +1 @@ +{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":6520664474973846018,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"array","length":2,"type":{"kind":"field"}},{"kind":"field"}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WSQQ6EMAhFW2vvAwUs7OYq01jvfwQ1dtHEpXXhSwgLyE8ewbuLeFRwd6bWf60TLMw1p4qEf0hWVIClLIqKorImJarKmq1YBkOmipsYbXDhuyx4BoZxWfCmsxvoPH/EeRroHAc6n/83t6z+3313g34W2n50d3Yk6PqGPAMAAA==","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/examples/noir_circuit_data/add_external.json b/frontend/examples/noir_circuit_data/add_external.json deleted file mode 100644 index 9dc5d4c..0000000 --- a/frontend/examples/noir_circuit_data/add_external.json +++ /dev/null @@ -1 +0,0 @@ -{"noir_version":"1.0.0-beta.1+03b58fa2dfcc8acc8cf5198b1b23b55676fbdb02","hash":6167922121693004072,"abi":{"parameters":[{"name":"external","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"step_in","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"}],"return_type":{"abi_type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WSQQoDMQhFJ5nMfTTqRHe9SkMz9z9CW8ZCaJd1Hoi4eeDnp+Vke01efll933wT7Myj1YGEd6jWVYCl74qKovKoSjSUtVm3BoZMAw8xOuAk0pUnF/wHljgXXJlfCnStgfltgfm9fyzumjuZpjt7tp+7eIe/eQIHkT9F4AIAAA==","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/examples/noir_circuit_data/square_zeroth.json b/frontend/examples/noir_circuit_data/square_zeroth.json deleted file mode 100644 index 70f42dc..0000000 --- a/frontend/examples/noir_circuit_data/square_zeroth.json +++ /dev/null @@ -1 +0,0 @@ -{"noir_version":"1.0.0-beta.1+03b58fa2dfcc8acc8cf5198b1b23b55676fbdb02","hash":2978522905452580593,"abi":{"parameters":[{"name":"step_in","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"}],"return_type":{"abi_type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/62SSw6FMAhF+3kLggIWZm8rNtb9L0GNxm8c2ZMQEgY3HIJ3K3Gu4A781v9bJ+iYa04VCXtIVlSApXSKiqIyJCWqypqtWAZDpoqjGI2w8pYL38DQLgtO+vstWvn7hs6xofOy189duf9BOM2jezIBi8Z/+kQCAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/examples/noir_circuit_data/swap_memory.json b/frontend/examples/noir_circuit_data/swap_memory.json deleted file mode 100644 index 41bfe20..0000000 --- a/frontend/examples/noir_circuit_data/swap_memory.json +++ /dev/null @@ -1 +0,0 @@ -{"noir_version":"1.0.0-beta.1+03b58fa2dfcc8acc8cf5198b1b23b55676fbdb02","hash":16112970499548868061,"abi":{"parameters":[{"name":"step_in","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"}],"return_type":{"abi_type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WSQQqAMAwE2+qDkiaxyc2vWKz/f4KKCkXxZB0IITksLLveHXTbBPfk+o3nJhiYS4oFCSeIllWAJQ+KiqIyRyUqyposWwJDpoKLGC1w4Cst+AaGdlrwp2fX0HPX0POeRf+S946v7nB25M4K4O7a9UACAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/mock/fold.json b/frontend/mock/fold.json deleted file mode 100644 index 919d152..0000000 --- a/frontend/mock/fold.json +++ /dev/null @@ -1 +0,0 @@ -{"noir_version":"1.0.0-beta.1+03b58fa2dfcc8acc8cf5198b1b23b55676fbdb02","hash":12961822853839078970,"abi":{"parameters":[{"name":"x0","type":{"kind":"field"},"visibility":"private"},{"name":"w","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"}],"return_type":{"abi_type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WSWwoDMQhFJ48FaYwT/etWGmr2v4S2MxMa+lt7IEhADlw1bCf59eL2IVz1dlWCvVZrxZDwDkW7MFTuu6AgCz+KEJlUadq1gWIlw8FKA06mMzp74+KC38D0h7zeM3znTc7e4OCycWCe+8h+riNj/rrtOc/1Nuc/Lf0rT0Upl3gwAwAA","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/mock/mock.json b/frontend/mock/mock.json deleted file mode 100644 index d1a9128..0000000 --- a/frontend/mock/mock.json +++ /dev/null @@ -1 +0,0 @@ -{"noir_version":"1.0.0-beta.1+03b58fa2dfcc8acc8cf5198b1b23b55676fbdb02","hash":2759475469106495053,"abi":{"parameters":[{"name":"x0","type":{"kind":"field"},"visibility":"public"},{"name":"w0","type":{"kind":"field"},"visibility":"private"},{"name":"w1","type":{"kind":"field"},"visibility":"private"}],"return_type":null,"error_types":{}},"bytecode":"H4sIAAAAAAAA/62QQQoAIAgEVXpQ9YL+/6qKWhTBkw6IexqWZXrIOSYFef3fcww4q71S55q3WzN7oLPfJmIDlIN1nEwBAAA=","debug_symbols":"TYxLCsMwDAXvonUWTVuy8FVKCf7IQWBsI9uFYnL3KCGB7DRv0HRwaNoyU/SpgPp0CMnqSikKdXgcU8k67lSq5gpqfI8DYHRyTc91AE8BQU2v9StgmEKgZb5nZP5pJm0CnuhbtDdb//ky13/mZNE1xr10OMlv","file_map":{"63":{"source":"use http::parse;\n\n// pub fn main(data: str<1024>) {\n// parse(data);\n// }\n\npub fn main(x0: pub Field, w0: Field, w1: Field) {\n assert(x0 * w0 + w1 + 2 == 0);\n}","path":"/Users/autoparallel/Code/noir-web-prover-circuits/bin/src/main.nr"}},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/src/circom/mod.rs b/frontend/src/circom/mod.rs deleted file mode 100644 index b243310..0000000 --- a/frontend/src/circom/mod.rs +++ /dev/null @@ -1,172 +0,0 @@ -//! # Circom Module -//! -//! The `circom` module provides utilities for working with Circom circuits within the `proofs` -//! crate. It includes functionalities for handling R1CS (Rank-1 Constraint System) representations -//! of circuits, managing circuit inputs, and generating witnesses for the circuits. -//! -//! ## Modules -//! -//! - `r1cs`: Contains the implementation and utilities for working with R1CS representations of -//! Circom circuits. -//! - `wasm_witness`: Provides functionalities for generating witnesses using WebAssembly (only -//! available for `wasm32` target). -//! - `witness`: Contains utilities for generating witnesses for Circom circuits. -//! -//! ## Structs -//! -//! - `CircomInput`: Represents the input structure for Circom circuits, including step inputs and -//! additional parameters. -//! - `CircuitJson`: Represents the JSON structure of a Circom circuit, including constraints, -//! number of inputs, outputs, and variables. -//! - `CircomCircuit`: Represents a Circom circuit, including its R1CS representation and optional -//! witness data. - -use std::{ - collections::{BTreeMap, HashMap}, - env::current_dir, - fs, - io::{BufReader, Cursor, Read, Seek, SeekFrom}, - path::PathBuf, - process::Command, - sync::Arc, -}; - -use bellpepper_core::{num::AllocatedNum, ConstraintSystem, LinearCombination, SynthesisError}; -use byteorder::{LittleEndian, ReadBytesExt}; -use ff::PrimeField; -use r1cs::R1CS; -use serde::{Deserialize, Serialize}; - -use super::*; - -pub mod r1cs; -#[cfg(target_arch = "wasm32")] pub mod wasm_witness; -pub mod witness; - -/// Circom input -#[derive(Debug, Serialize, Deserialize)] -pub struct CircomInput { - /// Step inputs - pub step_in: Vec, - /// Extra parameters - #[serde(flatten)] - pub extra: HashMap, -} - -/// Circuit JSON -#[derive(Serialize, Deserialize)] -pub struct CircuitJson { - /// Constraints - pub constraints: Vec>>, - /// Number of inputs - #[serde(rename = "nPubInputs")] - pub num_inputs: usize, - /// Number of outputs - #[serde(rename = "nOutputs")] - pub num_outputs: usize, - /// Number of variables - #[serde(rename = "nVars")] - pub num_variables: usize, -} - -/// Circom circuit -#[derive(Clone)] -pub struct CircomCircuit { - /// R1CS - pub r1cs: Arc, - /// Witness - pub witness: Option>>, -} - -// NOTE (Colin): This is added so we can cache only the active circuits we are using. -#[allow(clippy::derivable_impls)] -impl Default for CircomCircuit { - fn default() -> Self { Self { r1cs: Arc::new(R1CS::default()), witness: None } } -} - -impl CircomCircuit { - /// Return the arity of the circuit ie the number of public inputs - pub fn arity(&self) -> usize { self.r1cs.num_public_inputs } - - /// Vanilla synthesize - /// - /// This function synthesizes the circuit using the provided constraint system. - /// - /// # Arguments - /// - /// * `cs`: The constraint system to use for synthesis. - /// * `z`: The witness values to use for synthesis. - pub fn vanilla_synthesize>>( - &self, - cs: &mut CS, - z: &[AllocatedNum>], - ) -> Result>>, SynthesisError> { - let witness = &self.witness; - - let mut vars: Vec>> = vec![]; - let mut z_out: Vec>> = vec![]; - let pub_output_count = self.r1cs.num_public_outputs; - - for i in 1..self.r1cs.num_inputs { - // Public inputs do not exist, so we alloc, and later enforce equality from z values - let f: F = { - match witness { - None => F::::ONE, - Some(w) => w[i], - } - }; - let v = AllocatedNum::alloc(cs.namespace(|| format!("public_{}", i)), || Ok(f))?; - - vars.push(v.clone()); - if i <= pub_output_count { - // public output - z_out.push(v); - } - } - for i in 0..self.r1cs.num_aux { - // Private witness trace - let f: F = { - match witness { - None => F::::ONE, - Some(w) => w[i + self.r1cs.num_inputs], - } - }; - - let v = AllocatedNum::alloc(cs.namespace(|| format!("aux_{}", i)), || Ok(f))?; - vars.push(v); - } - - let make_lc = |lc_data: Vec<(usize, F)>| { - let res = lc_data.iter().fold( - LinearCombination::>::zero(), - |lc: LinearCombination>, (index, coeff)| { - lc + if *index > 0 { - (*coeff, vars[*index - 1].get_variable()) - } else { - (*coeff, CS::one()) - } - }, - ); - res - }; - for (i, constraint) in self.r1cs.constraints.iter().enumerate() { - cs.enforce( - || format!("constraint {}", i), - |_| make_lc(constraint.0.clone()), - |_| make_lc(constraint.1.clone()), - |_| make_lc(constraint.2.clone()), - ); - } - - for i in (pub_output_count + 1)..self.r1cs.num_inputs { - cs.enforce( - || format!("pub input enforce {}", i), - |lc| lc + z[i - 1 - pub_output_count].get_variable(), - |lc| lc + CS::one(), - |lc| lc + vars[i - 1].get_variable(), - ); - } - - Ok(z_out) - } -} diff --git a/frontend/src/circom/r1cs.rs b/frontend/src/circom/r1cs.rs deleted file mode 100644 index 031dbb8..0000000 --- a/frontend/src/circom/r1cs.rs +++ /dev/null @@ -1,277 +0,0 @@ -//! # R1CS Module -//! -//! The `r1cs` module provides functionalities for handling Rank-1 Constraint System (R1CS) -//! representations of Circom circuits. It includes utilities for reading, parsing, and -//! managing R1CS data, which is essential for circuit synthesis and proof generation. -//! -//! ## Structs -//! -//! - `R1CS`: Represents the R1CS structure, including the number of inputs, outputs, variables, and -//! constraints. -//! - `Header`: Represents the header of an R1CS file, including field size and various counts. -//! -//! ## Type Definitions -//! -//! - `Constraint`: A type alias for a tuple representing a constraint in the R1CS, consisting of -//! vectors of pairs of indices and field elements. -//! -//! ## Functions -//! -//! - `read_field`: Reads a field element from a reader. -//! - `read_header`: Reads the header of an R1CS file from a reader. -//! - `read_constraint_vec`: Reads a vector of constraints from a reader. -//! - `read_constraints`: Reads all constraints from a reader based on the R1CS header. -use fs::OpenOptions; - -use super::*; -// This was borrowed from `nova-scotia`. Big thank you for this middleware! -// some codes borrowed from https://github.com/poma/zkutil/blob/master/src/r1cs_reader.rs - -/// Constraint type -pub type Constraint = (Vec<(usize, F)>, Vec<(usize, F)>, Vec<(usize, F)>); - -/// R1CS type -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -pub struct R1CS { - /// Number of private inputs - pub num_private_inputs: usize, - /// Number of public inputs - pub num_public_inputs: usize, - /// Number of public outputs - pub num_public_outputs: usize, - /// Number of inputs - pub num_inputs: usize, - /// Number of auxiliary variables - pub num_aux: usize, - /// Number of variables - pub num_variables: usize, - /// Constraints - pub constraints: Vec, -} - -// NOTE (Colin): This is added so we can cache only the active circuits we are using. -#[allow(clippy::derivable_impls)] -impl Default for R1CS { - fn default() -> Self { - Self { - num_private_inputs: 0, - num_public_inputs: 0, - num_public_outputs: 0, - num_inputs: 0, - num_aux: 0, - num_variables: 0, - constraints: vec![], - } - } -} - -/// R1CSFile's header -#[derive(Debug, Default)] -pub struct Header { - /// Field size - pub field_size: u32, - /// Prime size - pub prime_size: Vec, - /// Number of wires - pub n_wires: u32, - /// Number of public outputs - pub n_pub_out: u32, - /// Number of public inputs - pub n_pub_in: u32, - /// Number of private inputs - pub n_prv_in: u32, - /// Number of labels - pub n_labels: u64, - /// Number of constraints - pub n_constraints: u32, -} - -impl TryFrom<&R1CSType> for R1CS { - type Error = ProofError; - - fn try_from(value: &R1CSType) -> Result { - match value { - R1CSType::File(path) => R1CS::try_from(path), - R1CSType::Raw(bytes) => R1CS::try_from(&bytes[..]), - } - } -} - -impl TryFrom<&[u8]> for R1CS { - type Error = ProofError; - - fn try_from(value: &[u8]) -> Result { - let cursor = BufReader::new(Cursor::new(value)); - from_reader(cursor) - } -} - -impl TryFrom<&PathBuf> for R1CS { - type Error = ProofError; - - fn try_from(filename: &PathBuf) -> Result { - let reader = BufReader::new(OpenOptions::new().read(true).open(filename)?); - from_reader(reader) - } -} - -/// Reads an R1CS from a reader -fn from_reader(mut reader: R) -> Result { - let mut magic = [0u8; 4]; - reader.read_exact(&mut magic)?; - assert_eq!(magic, [0x72, 0x31, 0x63, 0x73]); - - let version = reader.read_u32::()?; - assert_eq!(version, 1); - - let num_sections = reader.read_u32::()?; - - // section type -> file offset - let mut section_offsets = HashMap::::new(); - let mut section_sizes = HashMap::::new(); - - // get file offset of each section - for _ in 0..num_sections { - let section_type = reader.read_u32::()?; - let section_size = reader.read_u64::()?; - let offset = reader.stream_position()?; - section_offsets.insert(section_type, offset); - section_sizes.insert(section_type, section_size); - reader.seek(SeekFrom::Current(section_size as i64))?; - } - - let header_type = 1; - let constraint_type = 2; - let wire2label_type = 3; - - reader - .seek(SeekFrom::Start(*section_offsets.get(&header_type).ok_or(ProofError::MissingSection)?))?; - let header_size = section_sizes.get(&header_type).ok_or(ProofError::MissingSection)?; - let header = read_header(&mut reader, *header_size)?; - assert_eq!(header.field_size, 32); - - reader.seek(SeekFrom::Start( - *section_offsets.get(&constraint_type).ok_or(ProofError::MissingSection)?, - ))?; - let constraints = read_constraints(&mut reader, &header)?; - - reader.seek(SeekFrom::Start( - *section_offsets.get(&wire2label_type).ok_or(ProofError::MissingSection)?, - ))?; - - let num_public_inputs = header.n_pub_in as usize; - let num_private_inputs = header.n_prv_in as usize; - let num_public_outputs = header.n_pub_out as usize; - let num_variables = header.n_wires as usize; - let num_inputs = (1 + header.n_pub_in + header.n_pub_out) as usize; // TODO: This seems... odd... - let num_aux = num_variables - num_inputs; - Ok(R1CS { - num_private_inputs, - num_public_inputs, - num_public_outputs, - num_inputs, - num_aux, - num_variables, - constraints, - }) -} - -/// Reads a field from a reader -fn read_field(mut reader: R) -> Result, ProofError> { - let mut repr = F::::ZERO.to_repr(); - for digit in repr.as_mut().iter_mut() { - *digit = reader.read_u8()?; - } - let fr = F::::from_repr(repr); - if fr.is_some().into() { - Ok(fr.unwrap()) - } else { - Err(ProofError::Other("Failed to convert representation to field element".to_string())) - } -} - -/// Reads a header from a reader -/// -/// # Arguments -/// -/// * `reader`: The reader to read the header from. -/// * `size`: The size of the header. -/// -/// # Returns -/// -/// The header. -fn read_header(mut reader: R, size: u64) -> Result { - let field_size = reader.read_u32::()?; - let mut prime_size = vec![0u8; field_size as usize]; - reader.read_exact(&mut prime_size)?; - assert_eq!(size, 32 + field_size as u64); - - Ok(Header { - field_size, - prime_size, - n_wires: reader.read_u32::()?, - n_pub_out: reader.read_u32::()?, - n_pub_in: reader.read_u32::()?, - n_prv_in: reader.read_u32::()?, - n_labels: reader.read_u64::()?, - n_constraints: reader.read_u32::()?, - }) -} - -/// Reads a constraint vector from a reader -/// -/// # Arguments -/// -/// * `reader`: The reader to read the constraint vector from. -/// -/// # Returns -/// -/// The constraint vector. -fn read_constraint_vec(mut reader: R) -> Result)>, ProofError> { - let n_vec = reader.read_u32::()? as usize; - let mut vec = Vec::with_capacity(n_vec); - for _ in 0..n_vec { - vec.push((reader.read_u32::()? as usize, read_field::<&mut R>(&mut reader)?)); - } - Ok(vec) -} - -/// Reads constraints from a reader -/// -/// # Arguments -/// -/// * `reader`: The reader to read the constraints from. -/// * `header`: The header of the R1CS. -/// -/// # Returns -/// -/// The constraints. -fn read_constraints( - mut reader: R, - header: &Header, -) -> Result, ProofError> { - // todo check section size - let mut vec = Vec::with_capacity(header.n_constraints as usize); - for _ in 0..header.n_constraints { - let a = read_constraint_vec(&mut reader)?; - let b = read_constraint_vec(&mut reader)?; - let c = read_constraint_vec(&mut reader)?; - vec.push((a, b, c)); - } - Ok(vec) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - #[tracing_test::traced_test] - fn test_r1cs_from_bin() { - let r1cs = R1CS::try_from(crate::tests::inputs::ADD_EXTERNAL_R1CS).unwrap(); - assert_eq!(r1cs.num_inputs, 5); // TODO: What is the 5th input?? - assert_eq!(r1cs.num_private_inputs, 2); - assert_eq!(r1cs.num_public_inputs, 2); - assert_eq!(r1cs.num_public_outputs, 2); - } -} diff --git a/frontend/src/circom/wasm_witness.rs b/frontend/src/circom/wasm_witness.rs deleted file mode 100644 index 34042d4..0000000 --- a/frontend/src/circom/wasm_witness.rs +++ /dev/null @@ -1,36 +0,0 @@ -use tracing::{debug, info}; -use wasm_bindgen::prelude::*; - -#[wasm_bindgen(getter_with_clone)] -#[derive(Debug)] -pub struct WitnessOutput { - pub data: js_sys::Uint8Array, -} - -#[wasm_bindgen] -impl WitnessOutput { - #[wasm_bindgen(constructor)] - pub fn new(wit: js_sys::Uint8Array) -> WitnessOutput { Self { data: wit } } -} - -#[wasm_bindgen] -extern "C" { - #[wasm_bindgen(js_namespace = witness, js_name = createWitness)] - async fn create_witness_js(input: &JsValue, opcode: u64) -> JsValue; -} - -#[wasm_bindgen] -pub async fn create_witness(input: JsValue, opcode: u64) -> Result { - // Convert the Rust WitnessInput to a JsValue - let js_witnesses_output = create_witness_js(&input, opcode).await; - - // Call JavaScript function and await the Promise - info!("result: {:?}", js_witnesses_output); - let js_obj = js_sys::Object::from(js_witnesses_output); - let data_value = js_sys::Reflect::get(&js_obj, &JsValue::from_str("data"))?; - let array = js_sys::Array::from(&data_value); - let data = js_sys::Uint8Array::new(&array); - - debug!("data: {:?}", data); - Ok(WitnessOutput { data }) -} diff --git a/frontend/src/circom/witness.rs b/frontend/src/circom/witness.rs deleted file mode 100644 index 8d68452..0000000 --- a/frontend/src/circom/witness.rs +++ /dev/null @@ -1,261 +0,0 @@ -//! # Witness Module -//! -//! The `witness` module provides functionalities for generating and loading witnesses from various -//! sources. It includes functions for generating witnesses from browser types, WASM files, and -//! witnesscalc files. -//! -//! ## Functions -//! -//! - `generate_witness_from_browser_type`: Generates a witness from a browser type. -//! - `generate_witness_from_generator_type`: Generates a witness from a generator type. -//! - `generate_witness_from_graph`: Generates a witness from a graph. -//! - `generate_witness_from_witnesscalc_file`: Generates a witness from a witnesscalc file. -//! - `load_witness_from_bin_reader`: Loads a witness from a bin reader. -//! - `read_field`: Reads a field from a reader. - -use fs::OpenOptions; - -use super::*; - -/// Generates a witness from a browser type -/// -/// # Arguments -/// -/// * `circom_input` - A `CircomInput` struct. -/// * `opcode` - A `u64` representing the opcode. -/// -/// # Returns -/// -/// A `Result` containing a vector of field elements. -#[allow(unused_variables)] -pub async fn generate_witness_from_browser_type( - circom_input: CircomInput, - opcode: u64, -) -> Result>, ProofError> { - #[cfg(target_arch = "wasm32")] - { - let js_witness_input = serde_wasm_bindgen::to_value(&circom_input).map_err(ProofError::from)?; - - let js_witness = - crate::circom::wasm_witness::create_witness(js_witness_input, opcode).await.unwrap(); - - let js_computed_witnesses: Vec = js_witness.data.to_vec(); - let witnesses = - load_witness_from_bin_reader(BufReader::new(Cursor::new(js_computed_witnesses)))?; - - return Ok(witnesses); - } - #[cfg(not(target_arch = "wasm32"))] - Err(ProofError::Other(String::from( - "Browser type witness generation cannot be generated in process", - ))) -} - -/// Generates a witness from a generator type -/// -/// # Arguments -/// -/// * `input_json` - A string slice that holds the input JSON. -/// * `witness_generator_type` - A `WitnessGeneratorType` enum. -/// -/// # Returns -/// -/// A `Result` containing a vector of field elements. -pub fn generate_witness_from_generator_type( - input_json: &str, - witness_generator_type: &WitnessGeneratorType, -) -> Result>, ProofError> { - match witness_generator_type { - WitnessGeneratorType::Browser => { - panic!("browser type witness generation cannot be generated in process") - }, - WitnessGeneratorType::Wasm { path, wtns_path } => - generate_witness_from_wasm_file(input_json, &PathBuf::from(path), &PathBuf::from(wtns_path)), - WitnessGeneratorType::Path(path) => generate_witness_from_witnesscalc_file(input_json, path), - WitnessGeneratorType::Raw(graph_data) => generate_witness_from_graph(input_json, graph_data), - } -} - -/// Generates a witness from a graph -/// -/// # Arguments -/// -/// * `input_json` - A string slice that holds the input JSON. -/// * `graph_data` - A reference to the graph data. -/// -/// # Returns -/// -/// A `Result` containing a vector of field elements. -pub fn generate_witness_from_graph( - input_json: &str, - graph_data: &[u8], -) -> Result::Scalar>, ProofError> { - #[cfg(not(target_arch = "wasm32"))] - { - let witness = circom_witnesscalc::calc_witness(input_json, graph_data)?; - let result = witness - .iter() - .map(|elem| { - as PrimeField>::from_str_vartime(elem.to_string().as_str()) - .ok_or_else(|| ProofError::Other("Failed to parse field element".to_string())) - }) - .collect::>, ProofError>>()?; - Ok(result) - } - #[cfg(target_arch = "wasm32")] - todo!("circom_witnesscalc not supported in wasm"); -} - -/// Generates a witness from a witnesscalc file -/// -/// # Arguments -/// -/// * `witness_input_json` - A string slice that holds the witness input JSON. -/// * `graph_path` - A reference to the path of the witnesscalc file. -/// -/// # Returns -/// -/// A `Result` containing a vector of field elements. -pub fn generate_witness_from_witnesscalc_file( - witness_input_json: &str, - graph_path: &PathBuf, -) -> Result>, ProofError> { - #[cfg(not(target_arch = "wasm32"))] - { - let mut file = std::fs::File::open(graph_path)?; - let mut graph_data = Vec::new(); - file.read_to_end(&mut graph_data)?; - - let witness = circom_witnesscalc::calc_witness(witness_input_json, &graph_data)?; - let result = witness - .iter() - .map(|elem| { - as PrimeField>::from_str_vartime(elem.to_string().as_str()) - .ok_or_else(|| ProofError::Other("Failed to parse field element".to_string())) - }) - .collect::>, ProofError>>()?; - Ok(result) - } - #[cfg(target_arch = "wasm32")] - todo!("circom_witnesscalc not supported in wasm"); -} - -#[warn(missing_docs, clippy::missing_docs_in_private_items)] -/// Generates a witness from a WASM file. -/// -/// # Arguments -/// -/// * `input_json` - A string slice that holds the input JSON. -/// * `wasm_path` - A reference to the path of the WASM file. -/// * `wtns_path` - A reference to the path of the witness file. -/// -/// # Returns -/// -/// A vector of field elements. -pub fn generate_witness_from_wasm_file( - input_json: &str, - wasm_path: &PathBuf, - wtns_path: &PathBuf, -) -> Result>, ProofError> { - let root = current_dir()?; - let witness_generator_input = root.join("circom_input.json"); - fs::write(&witness_generator_input, input_json)?; - - let witness_js = wasm_path - .parent() - .ok_or_else(|| ProofError::Other("Invalid wasm path".to_string()))? - .join("generate_witness.js"); - - let output = Command::new("node") - .arg(witness_js) - .arg(wasm_path) - .arg(&witness_generator_input) - .arg(wtns_path) - .output() - .expect("failed to execute process"); - if !output.stdout.is_empty() || !output.stderr.is_empty() { - debug!( - "{}", - std::str::from_utf8(&output.stdout).map_err(|e| ProofError::Other(e.to_string()))? - ); - error!( - "{}", - std::str::from_utf8(&output.stderr).map_err(|e| ProofError::Other(e.to_string()))? - ); - } - fs::remove_file(witness_generator_input)?; - let reader = OpenOptions::new().read(true).open(wtns_path).expect("unable to open."); - let witness = load_witness_from_bin_reader(BufReader::new(reader)); - fs::remove_file(wtns_path)?; - witness -} - -/// Loads a witness from a bin reader -/// -/// # Arguments -/// -/// * `reader` - A reference to the reader. -/// -/// # Returns -/// -/// A `Result` containing a vector of field elements. -pub fn load_witness_from_bin_reader(mut reader: R) -> Result>, ProofError> { - let mut wtns_header = [0u8; 4]; - reader.read_exact(&mut wtns_header)?; - assert_eq!(wtns_header, [119, 116, 110, 115]); - - let version = reader.read_u32::()?; - assert!(version <= 2); - - let num_sections = reader.read_u32::()?; - assert_eq!(num_sections, 2); - - // read the first section - let sec_type = reader.read_u32::()?; - assert_eq!(sec_type, 1); - - let sec_size = reader.read_u64::()?; - assert_eq!(sec_size, 4 + 32 + 4); - - let field_size = reader.read_u32::()?; - assert_eq!(field_size, 32); - - let mut prime = vec![0u8; field_size as usize]; - reader.read_exact(&mut prime)?; - - let witness_len = reader.read_u32::()?; - - let sec_type = reader.read_u32::()?; - assert_eq!(sec_type, 2); - - let sec_size = reader.read_u64::()?; - assert_eq!(sec_size, (witness_len * field_size) as u64); - - let mut result = Vec::with_capacity(witness_len as usize); - for _ in 0..witness_len { - result.push(read_field(&mut reader)?); - } - Ok(result) -} - -/// Reads a field from a reader -/// -/// # Arguments -/// -/// * `reader` - A reference to the reader. -/// -/// # Returns -/// -/// A `Result` containing a field element. -pub(crate) fn read_field(mut reader: R) -> Result, ProofError> { - let mut repr = F::::ZERO.to_repr(); - for digit in repr.as_mut().iter_mut() { - *digit = reader.read_u8()?; - } - let fr = F::::from_repr(repr); - if fr.is_some().into() { - Ok(fr.unwrap()) - } else { - Err(ProofError::Other("Failed to convert representation to field element".to_string())) - } -} diff --git a/frontend/src/noir/tests.rs b/frontend/src/noir/tests.rs index cae9cef..e575543 100644 --- a/frontend/src/noir/tests.rs +++ b/frontend/src/noir/tests.rs @@ -1,8 +1,3 @@ -// TODO: This module is so I can see if we can actually fold these noir circuits properly. I'm going -// to write code here to make it work that doesn't effect the circom build at all. I found bringing -// those together in some generic way is quite painful and truth be told would likely be easier to -// just completely rebuild. - use std::path::Path; use client_side_prover::{ @@ -13,7 +8,6 @@ use tracing::trace; use tracing_test::traced_test; use super::*; -use crate::program::utils; const ADD_EXTERNAL: &[u8] = include_bytes!("../../examples/noir_circuit_data/add_external.json"); const SQUARE_ZEROTH: &[u8] = include_bytes!("../../examples/noir_circuit_data/square_zeroth.json"); diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index 07eba9d..58c1ff2 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -22,19 +22,14 @@ use client_side_prover::{ use data::{Expanded, InitializedSetup}; use proof::FoldingProof; use tracing::trace; -use utils::into_input_json; use super::*; use crate::{ circom::witness::generate_witness_from_browser_type, - program::{ - data::{ProofParams, SetupParams}, - utils::into_circom_input, - }, + program::data::{ProofParams, SetupParams}, }; pub mod data; -pub mod utils; // TODO: Consider moving contents of mod.rs files to a separate files. mod.rs // files should only be used to adjust the visibility of exported items. diff --git a/frontend/src/program/utils.rs b/frontend/src/program/utils.rs deleted file mode 100644 index a295d40..0000000 --- a/frontend/src/program/utils.rs +++ /dev/null @@ -1,182 +0,0 @@ -//! # Utils Module -//! -//! The `utils` module contains utility functions used throughout the proof system. -//! -//! ## Functions -//! -//! - `next_rom_index_and_pc`: Computes the next ROM index and program counter. -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - LinearCombination, -}; -use itertools::Itertools; -use num_bigint::BigInt; - -use super::*; -use crate::circom::CircomInput; - -/// Computes the next ROM index and program counter. -#[allow(clippy::type_complexity)] -pub fn next_rom_index_and_pc>>( - cs: &mut CS, - rom_index: &AllocatedNum>, - allocated_rom: &[AllocatedNum>], - pc: &AllocatedNum>, -) -> Result<(AllocatedNum>, AllocatedNum>), SynthesisError> { - // Compute a selector for the current rom_index in allocated_rom - let current_rom_selector = - get_selector_vec_from_index(cs.namespace(|| "rom selector"), rom_index, allocated_rom.len())?; - - // Enforce that allocated_rom[rom_index] = pc - for (rom, bit) in allocated_rom.iter().zip_eq(current_rom_selector.iter()) { - // if bit = 1, then rom = pc - // bit * (rom - pc) = 0 - cs.enforce( - || "enforce bit = 1 => rom = pc", - |lc| lc + &bit.lc(CS::one(), F::::ONE), - |lc| lc + rom.get_variable() - pc.get_variable(), - |lc| lc, - ); - } - - // Get the index of the current rom, or the index of the invalid rom if no match - let current_rom_index = current_rom_selector - .iter() - .position(|bit| bit.get_value().is_some_and(|v| v)) - .unwrap_or_default(); - let next_rom_index = current_rom_index + 1; - - let rom_index_next = AllocatedNum::alloc_infallible(cs.namespace(|| "next rom index"), || { - F::::from(next_rom_index as u64) - }); - cs.enforce( - || " rom_index + 1 - next_rom_index_num = 0", - |lc| lc, - |lc| lc, - |lc| lc + rom_index.get_variable() + CS::one() - rom_index_next.get_variable(), - ); - - // Allocate the next pc without checking. - // The next iteration will check whether the next pc is valid. - let pc_next = AllocatedNum::alloc(cs.namespace(|| "next pc"), || { - let next_value = allocated_rom - .get(next_rom_index) - .and_then(|v| v.get_value()) - .and_then(|value| if value == F::::from(u64::MAX) { None } else { Some(value) }); - - let current_value = allocated_rom - .get(current_rom_index) - .and_then(|v| v.get_value()) - .ok_or(SynthesisError::AssignmentMissing)?; - - Ok(next_value.unwrap_or(current_value)) - })?; - - Ok((rom_index_next, pc_next)) -} - -/// Computes the selector vector from the given index. -pub fn get_selector_vec_from_index>>( - mut cs: CS, - target_index: &AllocatedNum>, - num_indices: usize, -) -> Result, SynthesisError> { - // TODO (Colin): This breaks currently with the hacky way of handling circuit in pp - // assert_ne!(num_indices, 0); - - // Compute the selector vector non-deterministically - let selector = (0..num_indices) - .map(|idx| { - // b <- idx == target_index - Ok(Boolean::Is(AllocatedBit::alloc( - cs.namespace(|| format!("allocate s_{:?}", idx)), - target_index.get_value().map(|v| v == F::::from(idx as u64)), - )?)) - }) - .collect::, SynthesisError>>()?; - - // Enforce ∑ selector[i] = 1 - { - let selected_sum = selector - .iter() - .fold(LinearCombination::zero(), |lc, bit| lc + &bit.lc(CS::one(), F::::ONE)); - cs.enforce( - || "exactly-one-selection", - |_| selected_sum, - |lc| lc + CS::one(), - |lc| lc + CS::one(), - ); - } - - // Enforce `target_index - ∑ i * selector[i] = 0`` - { - let selected_value = - selector.iter().enumerate().fold(LinearCombination::zero(), |lc, (i, bit)| { - lc + &bit.lc(CS::one(), F::::from(i as u64)) - }); - cs.enforce( - || "target_index - ∑ i * selector[i] = 0", - |lc| lc, - |lc| lc, - |lc| lc + target_index.get_variable() - &selected_value, - ); - } - - Ok(selector) -} - -/// Converts the given public and private inputs into a `CircomInput` struct. -pub fn into_circom_input( - public_input: &[F], - private_input: &HashMap, -) -> CircomInput { - let decimal_stringified_input: Vec = public_input - .iter() - .map(|x| BigInt::from_bytes_le(num_bigint::Sign::Plus, &x.to_bytes()).to_str_radix(10)) - .collect(); - - CircomInput { step_in: decimal_stringified_input, extra: private_input.clone() } -} - -/// Converts the given public and private inputs into a JSON string. -pub fn into_input_json( - public_input: &[F], - private_input: &HashMap, -) -> Result { - let decimal_stringified_input: Vec = public_input - .iter() - .map(|x| BigInt::from_bytes_le(num_bigint::Sign::Plus, &x.to_bytes()).to_str_radix(10)) - .collect(); - - let input = CircomInput { step_in: decimal_stringified_input, extra: private_input.clone() }; - Ok(serde_json::to_string(&input)?) -} - -/// Remaps the given input JSON string into a vector of tuples containing the key and value. -pub fn remap_inputs(input_json: &str) -> Result)>, ProofError> { - let circom_input: CircomInput = serde_json::from_str(input_json)?; - let mut remapped = vec![]; - - let step_in_values: Result, _> = circom_input - .step_in - .into_iter() - .map(|s| BigInt::from_str(&s).map_err(ProofError::from)) - .collect(); - remapped.push(("step_in".to_string(), step_in_values?)); - - for (k, v) in circom_input.extra { - let val = v - .as_array() - .ok_or_else(|| ProofError::Other(format!("Expected array for key {}", k)))? - .iter() - .map(|x| { - x.as_str() - .ok_or_else(|| ProofError::Other(format!("Expected string for key {}", k))) - .and_then(|s| BigInt::from_str(s).map_err(ProofError::from)) - }) - .collect::, ProofError>>()?; - remapped.push((k, val)); - } - - Ok(remapped) -} diff --git a/frontend/src/tests/examples/circuit_data/add_external.bin b/frontend/tests/examples/circuit_data/add_external.bin similarity index 100% rename from frontend/src/tests/examples/circuit_data/add_external.bin rename to frontend/tests/examples/circuit_data/add_external.bin diff --git a/frontend/src/tests/examples/circuit_data/add_external.circom b/frontend/tests/examples/circuit_data/add_external.circom similarity index 100% rename from frontend/src/tests/examples/circuit_data/add_external.circom rename to frontend/tests/examples/circuit_data/add_external.circom diff --git a/frontend/src/tests/examples/circuit_data/add_external.r1cs b/frontend/tests/examples/circuit_data/add_external.r1cs similarity index 100% rename from frontend/src/tests/examples/circuit_data/add_external.r1cs rename to frontend/tests/examples/circuit_data/add_external.r1cs diff --git a/frontend/src/tests/examples/circuit_data/square_zeroth.bin b/frontend/tests/examples/circuit_data/square_zeroth.bin similarity index 100% rename from frontend/src/tests/examples/circuit_data/square_zeroth.bin rename to frontend/tests/examples/circuit_data/square_zeroth.bin diff --git a/frontend/src/tests/examples/circuit_data/square_zeroth.circom b/frontend/tests/examples/circuit_data/square_zeroth.circom similarity index 100% rename from frontend/src/tests/examples/circuit_data/square_zeroth.circom rename to frontend/tests/examples/circuit_data/square_zeroth.circom diff --git a/frontend/src/tests/examples/circuit_data/square_zeroth.r1cs b/frontend/tests/examples/circuit_data/square_zeroth.r1cs similarity index 100% rename from frontend/src/tests/examples/circuit_data/square_zeroth.r1cs rename to frontend/tests/examples/circuit_data/square_zeroth.r1cs diff --git a/frontend/src/tests/examples/circuit_data/swap_memory.bin b/frontend/tests/examples/circuit_data/swap_memory.bin similarity index 100% rename from frontend/src/tests/examples/circuit_data/swap_memory.bin rename to frontend/tests/examples/circuit_data/swap_memory.bin diff --git a/frontend/src/tests/examples/circuit_data/swap_memory.circom b/frontend/tests/examples/circuit_data/swap_memory.circom similarity index 100% rename from frontend/src/tests/examples/circuit_data/swap_memory.circom rename to frontend/tests/examples/circuit_data/swap_memory.circom diff --git a/frontend/src/tests/examples/circuit_data/swap_memory.r1cs b/frontend/tests/examples/circuit_data/swap_memory.r1cs similarity index 100% rename from frontend/src/tests/examples/circuit_data/swap_memory.r1cs rename to frontend/tests/examples/circuit_data/swap_memory.r1cs diff --git a/frontend/src/tests/inputs.rs b/frontend/tests/inputs.rs similarity index 100% rename from frontend/src/tests/inputs.rs rename to frontend/tests/inputs.rs diff --git a/frontend/src/tests/mod.rs b/frontend/tests/mod.rs similarity index 100% rename from frontend/src/tests/mod.rs rename to frontend/tests/mod.rs diff --git a/frontend/src/tests/witnesscalc.rs b/frontend/tests/witnesscalc.rs similarity index 100% rename from frontend/src/tests/witnesscalc.rs rename to frontend/tests/witnesscalc.rs From a441dd015621d4a0a00abd7351c00065e091c636 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 15:53:25 -0700 Subject: [PATCH 09/51] recompiles again --- Cargo.lock | 1236 +---------------- examples/add_external/src/main.nr | 4 +- examples/square_zeroth/src/main.nr | 4 +- examples/swap_memory/src/main.nr | 4 +- frontend/Cargo.toml | 3 - frontend/src/{errors.rs => error.rs} | 31 +- frontend/src/lib.rs | 10 +- frontend/src/{noir/mod.rs => noir.rs} | 58 +- frontend/src/noir/tests.rs | 239 ---- frontend/src/program/data.rs | 1211 ++++++++-------- frontend/src/program/mod.rs | 601 +++----- frontend/src/setup.rs | 57 +- .../examples/circuit_data/add_external.bin | Bin 114 -> 0 bytes .../examples/circuit_data/add_external.circom | 13 - .../examples/circuit_data/add_external.r1cs | Bin 152 -> 0 bytes .../examples/circuit_data/square_zeroth.bin | Bin 75 -> 0 bytes .../circuit_data/square_zeroth.circom | 12 - .../examples/circuit_data/square_zeroth.r1cs | Bin 356 -> 0 bytes .../examples/circuit_data/swap_memory.bin | Bin 68 -> 0 bytes .../examples/circuit_data/swap_memory.circom | 12 - .../examples/circuit_data/swap_memory.r1cs | Bin 320 -> 0 bytes frontend/tests/inputs.rs | 8 - frontend/tests/mod.rs | 94 +- frontend/tests/witnesscalc.rs | 152 -- 24 files changed, 1012 insertions(+), 2737 deletions(-) rename frontend/src/{errors.rs => error.rs} (72%) rename frontend/src/{noir/mod.rs => noir.rs} (80%) delete mode 100644 frontend/src/noir/tests.rs delete mode 100644 frontend/tests/examples/circuit_data/add_external.bin delete mode 100644 frontend/tests/examples/circuit_data/add_external.circom delete mode 100644 frontend/tests/examples/circuit_data/add_external.r1cs delete mode 100644 frontend/tests/examples/circuit_data/square_zeroth.bin delete mode 100644 frontend/tests/examples/circuit_data/square_zeroth.circom delete mode 100644 frontend/tests/examples/circuit_data/square_zeroth.r1cs delete mode 100644 frontend/tests/examples/circuit_data/swap_memory.bin delete mode 100644 frontend/tests/examples/circuit_data/swap_memory.circom delete mode 100644 frontend/tests/examples/circuit_data/swap_memory.r1cs delete mode 100644 frontend/tests/inputs.rs delete mode 100644 frontend/tests/witnesscalc.rs diff --git a/Cargo.lock b/Cargo.lock index 9745808..154bc40 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24,8 +24,8 @@ name = "acir_field" version = "1.0.0-beta.2" source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" dependencies = [ - "ark-bn254 0.5.0", - "ark-ff 0.5.0", + "ark-bn254", + "ark-ff", "cfg-if", "hex", "num-bigint 0.4.6", @@ -41,7 +41,7 @@ dependencies = [ "acvm_blackbox_solver", "brillig_vm", "fxhash", - "indexmap 1.9.3", + "indexmap", "serde", "thiserror", "tracing", @@ -117,31 +117,12 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" -[[package]] -name = "alloy-rlp" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6c1d995bff8d011f7cd6c81820d51825e6e06d6db73914c1630ecf544d83d6" -dependencies = [ - "arrayvec", - "bytes", -] - [[package]] name = "anes" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "anstyle" version = "1.0.8" @@ -154,43 +135,15 @@ version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" -[[package]] -name = "ark-bn254" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" -dependencies = [ - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-std 0.4.0", -] - [[package]] name = "ark-bn254" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" dependencies = [ - "ark-ec 0.5.0", - "ark-ff 0.5.0", - "ark-std 0.5.0", -] - -[[package]] -name = "ark-ec" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" -dependencies = [ - "ark-ff 0.4.2", - "ark-poly 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", - "derivative", - "hashbrown 0.13.2", - "itertools 0.10.5", - "num-traits", - "zeroize", + "ark-ec", + "ark-ff", + "ark-std", ] [[package]] @@ -200,10 +153,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" dependencies = [ "ahash", - "ark-ff 0.5.0", - "ark-poly 0.5.0", - "ark-serialize 0.5.0", - "ark-std 0.5.0", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", "educe", "fnv", "hashbrown 0.15.2", @@ -214,56 +167,18 @@ dependencies = [ "zeroize", ] -[[package]] -name = "ark-ff" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" -dependencies = [ - "ark-ff-asm 0.3.0", - "ark-ff-macros 0.3.0", - "ark-serialize 0.3.0", - "ark-std 0.3.0", - "derivative", - "num-bigint 0.4.6", - "num-traits", - "paste", - "rustc_version 0.3.3", - "zeroize", -] - -[[package]] -name = "ark-ff" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" -dependencies = [ - "ark-ff-asm 0.4.2", - "ark-ff-macros 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", - "derivative", - "digest 0.10.7", - "itertools 0.10.5", - "num-bigint 0.4.6", - "num-traits", - "paste", - "rustc_version 0.4.1", - "zeroize", -] - [[package]] name = "ark-ff" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" dependencies = [ - "ark-ff-asm 0.5.0", - "ark-ff-macros 0.5.0", - "ark-serialize 0.5.0", - "ark-std 0.5.0", + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", "arrayvec", - "digest 0.10.7", + "digest", "educe", "itertools 0.13.0", "num-bigint 0.4.6", @@ -272,26 +187,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "ark-ff-asm" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-ff-asm" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "ark-ff-asm" version = "0.5.0" @@ -302,31 +197,6 @@ dependencies = [ "syn 2.0.98", ] -[[package]] -name = "ark-ff-macros" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-ff-macros" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "ark-ff-macros" version = "0.5.0" @@ -340,19 +210,6 @@ dependencies = [ "syn 2.0.98", ] -[[package]] -name = "ark-poly" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" -dependencies = [ - "ark-ff 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", - "derivative", - "hashbrown 0.13.2", -] - [[package]] name = "ark-poly" version = "0.5.0" @@ -360,60 +217,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" dependencies = [ "ahash", - "ark-ff 0.5.0", - "ark-serialize 0.5.0", - "ark-std 0.5.0", + "ark-ff", + "ark-serialize", + "ark-std", "educe", "fnv", "hashbrown 0.15.2", ] -[[package]] -name = "ark-serialize" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" -dependencies = [ - "ark-std 0.3.0", - "digest 0.9.0", -] - -[[package]] -name = "ark-serialize" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" -dependencies = [ - "ark-serialize-derive 0.4.2", - "ark-std 0.4.0", - "digest 0.10.7", - "num-bigint 0.4.6", -] - [[package]] name = "ark-serialize" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" dependencies = [ - "ark-serialize-derive 0.5.0", - "ark-std 0.5.0", + "ark-serialize-derive", + "ark-std", "arrayvec", - "digest 0.10.7", + "digest", "num-bigint 0.4.6", ] -[[package]] -name = "ark-serialize-derive" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "ark-serialize-derive" version = "0.5.0" @@ -425,26 +249,6 @@ dependencies = [ "syn 2.0.98", ] -[[package]] -name = "ark-std" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" -dependencies = [ - "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "ark-std" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" -dependencies = [ - "num-traits", - "rand 0.8.5", -] - [[package]] name = "ark-std" version = "0.5.0" @@ -467,35 +271,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" -[[package]] -name = "ascii-canvas" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" -dependencies = [ - "term", -] - -[[package]] -name = "auto_impl" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.98", -] - -[[package]] -name = "autocfg" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" -dependencies = [ - "autocfg 1.4.0", -] - [[package]] name = "autocfg" version = "1.4.0" @@ -568,26 +343,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" -dependencies = [ - "bitflags", - "cexpr", - "clang-sys", - "itertools 0.13.0", - "log", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "syn 2.0.98", -] - [[package]] name = "bit-set" version = "0.5.3" @@ -627,7 +382,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -762,15 +517,6 @@ dependencies = [ "shlex", ] -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - [[package]] name = "cfg-if" version = "1.0.0" @@ -804,56 +550,6 @@ dependencies = [ "half", ] -[[package]] -name = "circom_algebra" -version = "2.1.4" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "constant_tracking", - "num-bigint-dig", - "num-traits", -] - -[[package]] -name = "circom_witnesscalc" -version = "0.2.0" -source = "git+https://github.com/pluto/circom-witnesscalc#722a3936999a8dd894f53cd498b019b65c90ac10" -dependencies = [ - "ark-bn254 0.4.0", - "ark-ff 0.4.2", - "ark-serialize 0.4.2", - "bindgen", - "byteorder", - "code_producers", - "compiler", - "constraint_generation", - "hex", - "lazy_static", - "libc", - "num-bigint 0.4.6", - "parser", - "program_structure", - "prost", - "prost-build", - "rand 0.8.5", - "ruint", - "serde", - "serde_json", - "type_analysis", - "wtns-file", -] - -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", - "libloading", -] - [[package]] name = "clap" version = "4.5.19" @@ -891,16 +587,16 @@ dependencies = [ "byteorder", "cfg-if", "criterion", - "digest 0.10.7", + "digest", "expect-test", "ff 0.13.0", "flate2", "generic-array 1.1.0", - "getrandom 0.2.15", + "getrandom", "group 0.13.0", "grumpkin-msm", "halo2curves", - "handlebars 5.1.2", + "handlebars", "hex", "itertools 0.13.0", "neptune", @@ -911,7 +607,7 @@ dependencies = [ "pairing", "proptest", "rand 0.8.5", - "rand_chacha 0.3.1", + "rand_chacha", "rand_core 0.6.4", "rayon", "rayon-scan", @@ -935,11 +631,10 @@ name = "client-side-prover-frontend" version = "0.1.0" dependencies = [ "acvm", - "ark-bn254 0.5.0", + "ark-bn254", "bellpepper-core", "bincode", "byteorder", - "circom_witnesscalc", "client-side-prover", "ff 0.13.0", "halo2curves", @@ -959,48 +654,6 @@ dependencies = [ "wasm-bindgen-futures", ] -[[package]] -name = "code_producers" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "handlebars 4.5.0", - "lz_fnv", - "num-bigint-dig", - "serde_json", -] - -[[package]] -name = "codespan" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ebaf6bb6a863ad6aa3a18729e9710c53d75df03306714d9cc1f7357a00cd789" -dependencies = [ - "codespan-reporting", -] - -[[package]] -name = "codespan-reporting" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0762455306b1ed42bc651ef6a2197aabda5e1d4a43c34d5eab5c1a3634e81d" -dependencies = [ - "termcolor", - "unicode-width", -] - -[[package]] -name = "compiler" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "code_producers", - "constant_tracking", - "num-bigint-dig", - "num-traits", - "program_structure", -] - [[package]] name = "const-oid" version = "0.9.6" @@ -1013,49 +666,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" -[[package]] -name = "constant_tracking" -version = "2.0.0" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" - -[[package]] -name = "constraint_generation" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "ansi_term", - "circom_algebra", - "compiler", - "constraint_list", - "constraint_writers", - "dag", - "num-bigint-dig", - "num-traits", - "program_structure", -] - -[[package]] -name = "constraint_list" -version = "2.1.8" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "circom_algebra", - "constraint_writers", - "json", - "num_cpus", - "program_structure", - "threadpool", -] - -[[package]] -name = "constraint_writers" -version = "2.1.8" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "circom_algebra", - "json", -] - [[package]] name = "cpufeatures" version = "0.2.14" @@ -1163,18 +773,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "dag" -version = "2.1.8" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "circom_algebra", - "constraint_list", - "constraint_writers", - "json", - "program_structure", -] - [[package]] name = "der" version = "0.6.1" @@ -1185,32 +783,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "diff" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "digest" version = "0.10.7" @@ -1222,27 +794,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if", - "dirs-sys-next", -] - -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - [[package]] name = "dissimilar" version = "1.0.9" @@ -1288,7 +839,7 @@ dependencies = [ "base16ct", "crypto-bigint", "der", - "digest 0.10.7", + "digest", "ff 0.12.1", "generic-array 0.14.7", "group 0.12.1", @@ -1299,15 +850,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "ena" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" -dependencies = [ - "log", -] - [[package]] name = "enum-ordinalize" version = "4.3.0" @@ -1328,12 +870,6 @@ dependencies = [ "syn 2.0.98", ] -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - [[package]] name = "errno" version = "0.3.9" @@ -1360,28 +896,6 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" -[[package]] -name = "fastrlp" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" -dependencies = [ - "arrayvec", - "auto_impl", - "bytes", -] - -[[package]] -name = "fastrlp" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" -dependencies = [ - "arrayvec", - "auto_impl", - "bytes", -] - [[package]] name = "ff" version = "0.12.1" @@ -1421,30 +935,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "fixed-hash" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - -[[package]] -name = "fixedbitset" -version = "0.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" - [[package]] name = "flate2" version = "1.0.34" @@ -1501,17 +991,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.15" @@ -1521,7 +1000,7 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] @@ -1568,11 +1047,11 @@ source = "git+https://github.com/argumentcomputer/grumpkin-msm?branch=dev#414da3 dependencies = [ "blst", "cc", - "getrandom 0.2.15", + "getrandom", "halo2curves", "pasta_curves", "rand 0.8.5", - "rand_chacha 0.3.1", + "rand_chacha", "rayon", "semolina", "sppark", @@ -1614,20 +1093,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "handlebars" -version = "4.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa67bab9ff362228eb3d00bd024a4965d8231bbb7921167f0cfa66c6626b225" -dependencies = [ - "log", - "pest", - "pest_derive", - "serde", - "serde_json", - "thiserror", -] - [[package]] name = "handlebars" version = "5.1.2" @@ -1648,15 +1113,6 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -[[package]] -name = "hashbrown" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" -dependencies = [ - "ahash", -] - [[package]] name = "hashbrown" version = "0.15.2" @@ -1672,12 +1128,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - [[package]] name = "hermit-abi" version = "0.3.9" @@ -1705,7 +1155,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -1717,46 +1167,16 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "impl-codec" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.98", -] - [[package]] name = "indexmap" version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "autocfg 1.4.0", + "autocfg", "hashbrown 0.12.3", ] -[[package]] -name = "indexmap" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" -dependencies = [ - "equivalent", - "hashbrown 0.15.2", -] - [[package]] name = "is-terminal" version = "0.4.13" @@ -1810,12 +1230,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "json" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" - [[package]] name = "k256" version = "0.11.6" @@ -1837,37 +1251,6 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "lalrpop" -version = "0.19.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a1cbf952127589f2851ab2046af368fd20645491bb4b376f04b7f94d7a9837b" -dependencies = [ - "ascii-canvas", - "bit-set", - "diff", - "ena", - "is-terminal", - "itertools 0.10.5", - "lalrpop-util", - "petgraph 0.6.5", - "regex", - "regex-syntax 0.6.29", - "string_cache", - "term", - "tiny-keccak", - "unicode-xid", -] - -[[package]] -name = "lalrpop-util" -version = "0.19.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3c48237b9604c5a4702de6b824e02006c3214327564636aef27c1028a8fa0ed" -dependencies = [ - "regex", -] - [[package]] name = "lazy_static" version = "1.5.0" @@ -1889,32 +1272,12 @@ version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" -[[package]] -name = "libloading" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" -dependencies = [ - "cfg-if", - "windows-targets", -] - [[package]] name = "libm" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" -[[package]] -name = "libredox" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" -dependencies = [ - "bitflags", - "libc", -] - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -1927,7 +1290,7 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ - "autocfg 1.4.0", + "autocfg", "scopeguard", ] @@ -1937,12 +1300,6 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" -[[package]] -name = "lz_fnv" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bbb1b0dbe51f0976eaa466f4e0bdc11856fe8008aee26f30ccec8de15b28e38" - [[package]] name = "matchers" version = "0.1.0" @@ -1958,12 +1315,6 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -1980,16 +1331,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.52.0", ] -[[package]] -name = "multimap" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" - [[package]] name = "neptune" version = "13.0.0" @@ -2007,22 +1352,6 @@ dependencies = [ "trait-set", ] -[[package]] -name = "new_debug_unreachable" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2039,7 +1368,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" dependencies = [ - "autocfg 1.4.0", + "autocfg", "num-integer", "num-traits", ] @@ -2056,24 +1385,6 @@ dependencies = [ "serde", ] -[[package]] -name = "num-bigint-dig" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d51546d704f52ef14b3c962b5776e53d5b862e5790e40a350d366c209bd7f7a" -dependencies = [ - "autocfg 0.1.8", - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand 0.7.3", - "serde", - "smallvec", -] - [[package]] name = "num-integer" version = "0.1.46" @@ -2083,24 +1394,13 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-iter" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" -dependencies = [ - "autocfg 1.4.0", - "num-integer", - "num-traits", -] - [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ - "autocfg 1.4.0", + "autocfg", "libm", ] @@ -2157,34 +1457,8 @@ name = "pairing" version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" -dependencies = [ - "group 0.13.0", -] - -[[package]] -name = "parity-scale-codec" -version = "3.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" -dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 1.0.109", +dependencies = [ + "group 0.13.0", ] [[package]] @@ -2210,22 +1484,6 @@ dependencies = [ "windows-targets", ] -[[package]] -name = "parser" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "lalrpop", - "lalrpop-util", - "num-bigint-dig", - "num-traits", - "program_structure", - "regex", - "rustc-hex", - "serde", - "serde_derive", -] - [[package]] name = "pasta_curves" version = "0.5.1" @@ -2294,35 +1552,6 @@ dependencies = [ "sha2", ] -[[package]] -name = "petgraph" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" -dependencies = [ - "fixedbitset 0.4.2", - "indexmap 2.7.1", -] - -[[package]] -name = "petgraph" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" -dependencies = [ - "fixedbitset 0.5.7", - "indexmap 2.7.1", -] - -[[package]] -name = "phf_shared" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" -dependencies = [ - "siphasher", -] - [[package]] name = "pin-project-lite" version = "0.2.14" @@ -2376,42 +1605,6 @@ dependencies = [ "zerocopy", ] -[[package]] -name = "precomputed-hash" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" - -[[package]] -name = "prettyplease" -version = "0.2.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" -dependencies = [ - "proc-macro2", - "syn 2.0.98", -] - -[[package]] -name = "primitive-types" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" -dependencies = [ - "fixed-hash", - "impl-codec", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" -dependencies = [ - "toml_edit", -] - [[package]] name = "proc-macro2" version = "1.0.93" @@ -2421,21 +1614,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "program_structure" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "codespan", - "codespan-reporting", - "num-bigint-dig", - "num-traits", - "regex", - "rustc-hex", - "serde", - "serde_derive", -] - [[package]] name = "proptest" version = "1.5.0" @@ -2448,7 +1626,7 @@ dependencies = [ "lazy_static", "num-traits", "rand 0.8.5", - "rand_chacha 0.3.1", + "rand_chacha", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -2456,58 +1634,6 @@ dependencies = [ "unarray", ] -[[package]] -name = "prost" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" -dependencies = [ - "heck 0.5.0", - "itertools 0.13.0", - "log", - "multimap", - "once_cell", - "petgraph 0.7.1", - "prettyplease", - "prost", - "prost-types", - "regex", - "syn 2.0.98", - "tempfile", -] - -[[package]] -name = "prost-derive" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" -dependencies = [ - "anyhow", - "itertools 0.13.0", - "proc-macro2", - "quote", - "syn 2.0.98", -] - -[[package]] -name = "prost-types" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" -dependencies = [ - "prost", -] - [[package]] name = "quick-error" version = "1.2.3" @@ -2542,19 +1668,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - [[package]] name = "rand" version = "0.8.5" @@ -2562,20 +1675,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", + "rand_chacha", "rand_core 0.6.4", ] -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - [[package]] name = "rand_chacha" version = "0.3.1" @@ -2601,31 +1704,13 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "getrandom", ] [[package]] @@ -2684,17 +1769,6 @@ dependencies = [ "bitflags", ] -[[package]] -name = "redox_users" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" -dependencies = [ - "getrandom 0.2.15", - "libredox", - "thiserror", -] - [[package]] name = "ref-cast" version = "1.0.23" @@ -2779,84 +1853,12 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rlp" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" -dependencies = [ - "bytes", - "rustc-hex", -] - -[[package]] -name = "ruint" -version = "1.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5ef8fb1dd8de3870cb8400d51b4c2023854bbafd5431a3ac7e7317243e22d2f" -dependencies = [ - "alloy-rlp", - "ark-ff 0.3.0", - "ark-ff 0.4.2", - "bytes", - "fastrlp 0.3.1", - "fastrlp 0.4.0", - "num-bigint 0.4.6", - "num-integer", - "num-traits", - "parity-scale-codec", - "primitive-types", - "proptest", - "rand 0.8.5", - "rlp", - "ruint-macro", - "serde", - "valuable", - "zeroize", -] - -[[package]] -name = "ruint-macro" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" - [[package]] name = "rustc-demangle" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", -] - -[[package]] -name = "rustc_version" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" -dependencies = [ - "semver 1.0.25", -] - [[package]] name = "rustix" version = "0.38.37" @@ -2933,30 +1935,6 @@ dependencies = [ "glob", ] -[[package]] -name = "semver" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" - -[[package]] -name = "semver-parser" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" -dependencies = [ - "pest", -] - [[package]] name = "serde" version = "1.0.210" @@ -3026,7 +2004,7 @@ checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -3035,7 +2013,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest 0.10.7", + "digest", "keccak", ] @@ -3069,16 +2047,10 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.7", + "digest", "rand_core 0.6.4", ] -[[package]] -name = "siphasher" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" - [[package]] name = "smallvec" version = "1.13.2" @@ -3127,18 +2099,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "string_cache" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938d512196766101d333398efde81bc1f37b00cb42c2f8350e5df639f040bbbe" -dependencies = [ - "new_debug_unreachable", - "parking_lot", - "phf_shared", - "precomputed-hash", -] - [[package]] name = "strum" version = "0.24.1" @@ -3151,7 +2111,7 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", "rustversion", @@ -3215,17 +2175,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "term" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" -dependencies = [ - "dirs-next", - "rustversion", - "winapi", -] - [[package]] name = "term_size" version = "0.3.2" @@ -3236,15 +2185,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - [[package]] name = "thiserror" version = "1.0.64" @@ -3284,15 +2224,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - [[package]] name = "tinytemplate" version = "1.2.1" @@ -3332,23 +2263,6 @@ dependencies = [ "syn 2.0.98", ] -[[package]] -name = "toml_datetime" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" - -[[package]] -name = "toml_edit" -version = "0.22.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" -dependencies = [ - "indexmap 2.7.1", - "toml_datetime", - "winnow", -] - [[package]] name = "tracing" version = "0.1.40" @@ -3455,16 +2369,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "type_analysis" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "num-bigint-dig", - "num-traits", - "program_structure", -] - [[package]] name = "typenum" version = "1.17.0" @@ -3477,18 +2381,6 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - [[package]] name = "unarray" version = "0.1.4" @@ -3501,18 +2393,6 @@ version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" -[[package]] -name = "unicode-width" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" - -[[package]] -name = "unicode-xid" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" - [[package]] name = "valuable" version = "0.1.0" @@ -3544,12 +2424,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -3758,24 +2632,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "winnow" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" -dependencies = [ - "memchr", -] - -[[package]] -name = "wtns-file" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d3b856452298f68a5879e3901918bac5d753ca9fa4be8a983a37a3d25dabf0a" -dependencies = [ - "byteorder", -] - [[package]] name = "wyz" version = "0.5.1" diff --git a/examples/add_external/src/main.nr b/examples/add_external/src/main.nr index 1562a66..f3b1e05 100644 --- a/examples/add_external/src/main.nr +++ b/examples/add_external/src/main.nr @@ -3,6 +3,6 @@ pub fn main( external: [Field; 2], registers: [Field; 2], next_pc: Field, -) -> pub ([Field; 2], Field) { - ([external[0] + registers[0], external[1] + registers[1]], next_pc) +) -> pub (Field, [Field; 2]) { + (next_pc, [external[0] + registers[0], external[1] + registers[1]]) } diff --git a/examples/square_zeroth/src/main.nr b/examples/square_zeroth/src/main.nr index 8934167..41171cf 100644 --- a/examples/square_zeroth/src/main.nr +++ b/examples/square_zeroth/src/main.nr @@ -1,4 +1,4 @@ /// Square only the first register. -pub fn main(registers: [Field; 2], next_pc: Field) -> pub ([Field; 2], Field) { - ([registers[0] * registers[0], registers[1]], next_pc) +pub fn main(registers: [Field; 2], next_pc: Field) -> pub (Field, [Field; 2]) { + (next_pc, [registers[0] * registers[0], registers[1]]) } diff --git a/examples/swap_memory/src/main.nr b/examples/swap_memory/src/main.nr index d262ea7..592af5c 100644 --- a/examples/swap_memory/src/main.nr +++ b/examples/swap_memory/src/main.nr @@ -1,4 +1,4 @@ /// Swap the two registers. -pub fn main(registers: [Field; 2], next_pc: Field) -> pub ([Field; 2], Field) { - ([registers[1], registers[0]], next_pc) +pub fn main(registers: [Field; 2], next_pc: Field) -> pub (Field, [Field; 2]) { + (next_pc, [registers[1], registers[0]]) } diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index a641383..a9c152c 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -31,9 +31,6 @@ wasm-bindgen ="0.2.87" js-sys ="0.3.64" wasm-bindgen-futures="0.4.37" -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] -circom_witnesscalc={ git="https://github.com/pluto/circom-witnesscalc" } # Fork is needed apparently?? - [dev-dependencies] tracing-test="0.2.5" tempdir ="0.3.7" diff --git a/frontend/src/errors.rs b/frontend/src/error.rs similarity index 72% rename from frontend/src/errors.rs rename to frontend/src/error.rs index 1f69d24..b4b8c6c 100644 --- a/frontend/src/errors.rs +++ b/frontend/src/error.rs @@ -18,30 +18,6 @@ //! - `Bincode`: Represents a Bincode serialization or deserialization error. use thiserror::Error; -#[cfg(not(target_arch = "wasm32"))] -#[derive(Debug, Error)] -/// Wrapper for circom_witnesscalc::Error since it doesn't implement display -pub enum WitnessCalcError { - /// The error is a circom_witnesscalc::Error - Circom(circom_witnesscalc::Error), -} - -#[cfg(not(target_arch = "wasm32"))] -impl std::fmt::Display for WitnessCalcError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } -} - -#[cfg(not(target_arch = "wasm32"))] -impl From for ProofError { - fn from(err: circom_witnesscalc::Error) -> ProofError { - ProofError::WitnessCalc(WitnessCalcError::Circom(err)) - } -} - -impl From> for ProofError { - fn from(err: Box) -> ProofError { ProofError::Bincode(*err) } -} - /// Represents the various error conditions that can occur within the `proofs` /// crate. #[derive(Debug, Error)] @@ -70,18 +46,13 @@ pub enum ProofError { #[error(transparent)] Parse(#[from] num_bigint::ParseBigIntError), - /// The error is a WitnessCalcError - #[cfg(not(target_arch = "wasm32"))] - #[error(transparent)] - WitnessCalc(#[from] WitnessCalcError), - /// The error is a missing header section #[error("Missing header section")] MissingSection, /// The error is a bincode::ErrorKind #[error(transparent)] - Bincode(#[from] bincode::ErrorKind), + Bincode(#[from] Box), /// The error is a client_side_prover::supernova::error::SuperNovaError #[error(transparent)] diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index b40d742..19b0723 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -35,7 +35,6 @@ use std::{collections::HashMap, path::PathBuf, str::FromStr}; -use circom::CircomCircuit; use client_side_prover::{ provider::GrumpkinEngine, spartan::batched::BatchedRelaxedR1CSSNARK, @@ -47,18 +46,13 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; use tracing::{debug, error, info}; -use crate::{ - errors::ProofError, - program::data::{InstanceParams, Online, R1CSType, UninitializedSetup, WitnessGeneratorType}, -}; +use crate::error::ProofError; -pub mod circom; -pub mod errors; +pub mod error; pub mod noir; pub mod program; pub mod proof; pub mod setup; -#[cfg(test)] pub(crate) mod tests; /// Represents the first elliptic curve engine used in the proof system. pub type E1 = client_side_prover::provider::Bn256EngineKZG; diff --git a/frontend/src/noir/mod.rs b/frontend/src/noir.rs similarity index 80% rename from frontend/src/noir/mod.rs rename to frontend/src/noir.rs index 9eefa9d..66f5d22 100644 --- a/frontend/src/noir/mod.rs +++ b/frontend/src/noir.rs @@ -12,12 +12,12 @@ use ark_bn254::Fr; use bellpepper_core::{ num::AllocatedNum, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable, }; +use client_side_prover::supernova::StepCircuit; use ff::PrimeField; use tracing::trace; use super::*; - -#[cfg(test)] mod tests; +use crate::program::SwitchboardWitness; // TODO: If we deserialize more here and get metadata, we could more easily look at witnesses, etc. // Especially if we want to output a constraint to the PC. Using the abi would be handy for @@ -29,7 +29,8 @@ pub struct NoirProgram { deserialize_with = "Program::deserialize_program_base64" )] pub bytecode: Program>, - pub witness: Option>>, + pub witness: Option, + pub index: usize, // TODO: To make this more efficient, we could just store an option of the `&mut CS` inside of // here so we don't actually need to rebuild it always, though the enforcement for the public // inputs is tougher @@ -46,18 +47,28 @@ impl NoirProgram { &self.bytecode.unconstrained_functions } - pub fn set_private_inputs(&mut self, inputs: Vec>) { self.witness = Some(inputs); } + pub fn set_inputs(&mut self, switchboard_witness: SwitchboardWitness) { + self.witness = Some(switchboard_witness); + } +} + +impl StepCircuit> for NoirProgram { + // NOTE: +1 for the PC + fn arity(&self) -> usize { self.arity() + 1 } + + fn circuit_index(&self) -> usize { self.index } // TODO: we now need to shift this to use the `z` values as the sole public inputs, the struct // should only hold witness // TODO: We should check if the constraints for z are actually done properly // tell clippy to shut up #[allow(clippy::too_many_lines)] - pub fn vanilla_synthesize>>( + fn synthesize>>( &self, cs: &mut CS, + pc: Option<&AllocatedNum>>, z: &[AllocatedNum>], - ) -> Result>>, SynthesisError> { + ) -> Result<(Option>>, Vec>>), SynthesisError> { dbg!(z); let mut acvm = if self.witness.is_some() { Some(ACVM::new( @@ -105,7 +116,7 @@ impl NoirProgram { // Set up private inputs self.circuit().private_parameters.iter().for_each(|witness| { let f = self.witness.as_ref().map(|inputs| { - let f = convert_to_acir_field(inputs[witness.as_usize()]); + let f = convert_to_acir_field(inputs.witness[witness.as_usize()]); acvm.as_mut().unwrap().overwrite_witness(*witness, f); f }); @@ -193,8 +204,23 @@ impl NoirProgram { z_out.push(allocated_vars.get(ret).unwrap().clone()); } - Ok(dbg!(z_out)) + // TODO: fix the pc + Ok((z_out.last().cloned(), z_out)) } + // TODO: fix the pc + // fn synthesize>>( + // &self, + // cs: &mut CS, + // pc: Option<&AllocatedNum>>, + // z: &[AllocatedNum>], + // ) -> Result<(Option>>, Vec>>), SynthesisError> { + // let rom_index = &z[self.arity()]; // jump to where we pushed pc data into CS + // let allocated_rom = &z[self.arity() + 1..]; // jump to where we pushed rom data into C + // let mut circuit_constraints = self.vanilla_synthesize(cs, z)?; + // circuit_constraints.push(rom_index_next); + // circuit_constraints.extend(z[self.arity() + 1..].iter().cloned()); + // Ok((Some(pc_next), circuit_constraints)) + // } } fn convert_to_halo2_field(f: GenericFieldElement) -> F { @@ -210,3 +236,19 @@ fn convert_to_acir_field(f: F) -> GenericFieldElement { bytes.reverse(); GenericFieldElement::from_be_bytes_reduce(&bytes) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_conversions() { + let f = F::::from(5); + let acir_f = convert_to_acir_field(f); + assert_eq!(acir_f, GenericFieldElement::from_repr(Fr::from(5))); + + let f = GenericFieldElement::from_repr(Fr::from(3)); + let halo2_f = convert_to_halo2_field(f); + assert_eq!(halo2_f, F::::from(3)); + } +} diff --git a/frontend/src/noir/tests.rs b/frontend/src/noir/tests.rs deleted file mode 100644 index e575543..0000000 --- a/frontend/src/noir/tests.rs +++ /dev/null @@ -1,239 +0,0 @@ -use std::path::Path; - -use client_side_prover::{ - supernova::{NonUniformCircuit, RecursiveSNARK, StepCircuit}, - traits::snark::default_ck_hint, -}; -use tracing::trace; -use tracing_test::traced_test; - -use super::*; - -const ADD_EXTERNAL: &[u8] = include_bytes!("../../examples/noir_circuit_data/add_external.json"); -const SQUARE_ZEROTH: &[u8] = include_bytes!("../../examples/noir_circuit_data/square_zeroth.json"); -const SWAP_MEMORY: &[u8] = include_bytes!("../../examples/noir_circuit_data/swap_memory.json"); - -#[derive(Debug, Clone)] -pub struct NoirMemory { - // TODO: Using a BTreeSet here would perhaps be preferable, or just some kind of set that checks - // over circuit indices - pub circuits: Vec, - // TODO: I really think the ROM can just be removed and we can clean this up, but leaving it for - // now is a bit easier - pub rom: Vec, - pub public_input: Vec>, -} - -#[derive(Clone, Debug)] -pub struct NoirRomCircuit { - pub circuit: NoirProgram, - // TODO: It would be nice to have the circuit index automatically be used in the memory, but - // perhaps we don't even need memory - pub circuit_index: usize, - // TODO: Not having ROM size here would be nice, but mayabe we don't even need ROM - pub rom_size: usize, -} - -impl NonUniformCircuit for NoirMemory { - type C1 = NoirRomCircuit; - type C2 = TrivialCircuit>; - - fn num_circuits(&self) -> usize { self.circuits.len() } - - fn primary_circuit(&self, circuit_index: usize) -> Self::C1 { - self.circuits[circuit_index].clone() - } - - fn secondary_circuit(&self) -> Self::C2 { TrivialCircuit::default() } - - // Use the initial input to set this - fn initial_circuit_index(&self) -> usize { self.rom[0] as usize } -} - -impl StepCircuit> for NoirRomCircuit { - fn arity(&self) -> usize { self.circuit.arity() + 1 + self.rom_size } - - fn circuit_index(&self) -> usize { self.circuit_index } - - fn synthesize>>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>>, - z: &[AllocatedNum>], - ) -> Result<(Option>>, Vec>>), SynthesisError> { - let rom_index = &z[self.circuit.arity()]; // jump to where we pushed pc data into CS - let allocated_rom = &z[self.circuit.arity() + 1..]; // jump to where we pushed rom data into C - let (rom_index_next, pc_next) = utils::next_rom_index_and_pc( - &mut cs.namespace(|| "next and rom_index and pc"), - rom_index, - allocated_rom, - pc.ok_or(SynthesisError::AssignmentMissing)?, - )?; - let mut circuit_constraints = self.circuit.vanilla_synthesize(cs, z)?; - circuit_constraints.push(rom_index_next); - circuit_constraints.extend(z[self.circuit.arity() + 1..].iter().cloned()); - Ok((Some(pc_next), circuit_constraints)) - } -} - -pub fn run(memory: &NoirMemory) -> Result, ProofError> { - info!("Starting SuperNova program..."); - - info!("Setting up PublicParams..."); - // TODO: This is stupid to do, but I need to get around the original setting of the witness. - // Having separate setup is the way (we already know this) - let mut memory_clone = memory.clone(); - memory_clone.circuits.iter_mut().for_each(|circ| circ.circuit.witness = None); - let public_params = PublicParams::setup(&memory_clone, &*default_ck_hint(), &*default_ck_hint()); - - let z0_primary = &memory.public_input; - let z0_secondary = &[F::::ZERO]; - - let mut recursive_snark_option = None; - - let time = std::time::Instant::now(); - for (idx, &op_code) in memory.rom.iter().enumerate() { - info!("Step {} of ROM", idx); - debug!("Opcode = {:?}", op_code); - - let circuit_primary = memory.primary_circuit(op_code as usize); - let circuit_secondary = memory.secondary_circuit(); - - let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { - RecursiveSNARK::new( - &public_params, - memory, - &circuit_primary, - &circuit_secondary, - z0_primary, - z0_secondary, - ) - })?; - - info!("Proving single step..."); - recursive_snark.prove_step(&public_params, &circuit_primary, &circuit_secondary)?; - info!("Done proving single step..."); - - // TODO: For some reason this is failing - // info!("Verifying single step..."); - // recursive_snark.verify(&public_params, recursive_snark.z0_primary(), z0_secondary)?; - // info!("Single step verification done"); - - recursive_snark_option = Some(Ok(recursive_snark)); - } - // Note, this unwrap cannot fail - let recursive_snark = recursive_snark_option.unwrap(); - trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); - - Ok(recursive_snark?) -} - -// `fold.json` is: -// pub fn main(x0: Field, w: pub [Field;2]) -> pub [Field;2] { -// [x0 * w[0] + w[1] + 1, (x0 + 3) * w[1] + w[0]] -// } -fn noir_fold() -> NoirProgram { - let json_path = Path::new("./mock").join(format!("fold.json")); - let noir_json = std::fs::read(&json_path).unwrap(); - - NoirProgram::new(&noir_json) -} - -#[test] -fn test_conversions() { - let f = F::::from(5); - let acir_f = convert_to_acir_field(f); - assert_eq!(acir_f, GenericFieldElement::from_repr(Fr::from(5))); - - let f = GenericFieldElement::from_repr(Fr::from(3)); - let halo2_f = convert_to_halo2_field(f); - assert_eq!(halo2_f, F::::from(3)); -} - -#[test] -#[traced_test] -fn test_mock_noir_ivc() { - let mut circuit = noir_fold(); - circuit.set_private_inputs(vec![F::::from(3)]); - - let rom_circuit = NoirRomCircuit { circuit, circuit_index: 0, rom_size: 2 }; - - let memory = NoirMemory { - circuits: vec![rom_circuit], - rom: vec![0, 0], - public_input: vec![ - F::::from(1), // Actual input - F::::from(2), // Actual input - F::::from(0), // PC - F::::from(0), // ROM - F::::from(0), // ROM - ], - }; - - let snark = run(&memory).unwrap(); - let zi = snark.zi_primary(); - dbg!(zi); - // First fold: - // step_out[0] == 3 * 1 + 2 + 1 == 6 - // step_out[1] == (3 + 3) * 2 + 1 == 13 - // Second fold: - // step_out[0] == 3 * 6 + 13 + 1 == 32 - // step_out[1] == (3 + 3) * 13 + 6 == 84 - assert_eq!(zi[0], F::::from(32)); - assert_eq!(zi[1], F::::from(84)); - assert_eq!(zi[2], F::::from(2)); - assert_eq!(zi[3], F::::from(0)); - assert_eq!(zi[4], F::::from(0)); -} - -#[test] -#[traced_test] -fn test_mock_noir_nivc() { - let mut add_external = NoirProgram::new(ADD_EXTERNAL); - add_external.set_private_inputs(vec![F::::from(5), F::::from(7)]); - let add_external = - NoirRomCircuit { circuit: add_external, circuit_index: 0, rom_size: 3 }; - - // TODO: The issue is the private inputs need to be an empty vector or else this isn't computed at - // all. Be careful, this is insanely touchy and I hate that it is this way. - let mut square_zeroth = NoirProgram::new(SQUARE_ZEROTH); - square_zeroth.set_private_inputs(vec![]); - let square_zeroth = - NoirRomCircuit { circuit: square_zeroth, circuit_index: 1, rom_size: 3 }; - let mut swap_memory = NoirProgram::new(SWAP_MEMORY); - swap_memory.set_private_inputs(vec![]); - let swap_memory = - NoirRomCircuit { circuit: swap_memory, circuit_index: 2, rom_size: 3 }; - - let memory = NoirMemory { - circuits: vec![add_external, square_zeroth, swap_memory], - rom: vec![0, 1, 2], - public_input: vec![ - F::::from(1), // Actual input - F::::from(2), // Actual input - F::::from(0), // PC - F::::from(0), // ROM - F::::from(1), // ROM - F::::from(2), // ROM - ], - }; - - let snark = run(&memory).unwrap(); - let zi = snark.zi_primary(); - dbg!(zi); - // First fold: - // step_out[0] == 1 + 5 == 6 - // step_out[1] == 2 + 7 == 9 - // Second fold: - // step_out[0] == 6 ** 2 == 36 - // step_out[1] == 9 - // Third fold: - // step_out[0] == 9 - // step_out[1] == 36 - assert_eq!(zi[0], F::::from(9)); - assert_eq!(zi[1], F::::from(36)); - assert_eq!(zi[2], F::::from(3)); - assert_eq!(zi[3], F::::from(0)); - assert_eq!(zi[4], F::::from(1)); - assert_eq!(zi[5], F::::from(2)); -} diff --git a/frontend/src/program/data.rs b/frontend/src/program/data.rs index 318a481..5a96c35 100644 --- a/frontend/src/program/data.rs +++ b/frontend/src/program/data.rs @@ -1,636 +1,575 @@ -//! # Data Module -//! -//! The `data` module contains data structures and types used in the proof system. -//! -//! ## Structs -//! -//! - `FoldInput`: Represents the fold input for any circuit containing signal names and values. -//! - `R1CSType`: Represents the R1CS file type, which can be either a file path or raw bytes. - -use std::{ - fs::{self, File}, - io::Write, - sync::Arc, -}; - -use client_side_prover::{fast_serde::FastSerde, supernova::get_circuit_shapes}; -use serde_json::json; - -use super::*; -use crate::setup::ProvingParams; - -/// Fold input for any circuit containing signals name and vector of values. Inputs are distributed -/// evenly across folds after the ROM is finalised by the prover. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct FoldInput { - /// circuit name and consolidated values - #[serde(flatten)] - pub value: HashMap>, -} - -impl FoldInput { - /// splits the inputs evenly across folds as per instruction frequency - pub fn split(&self, freq: usize) -> Vec> { - let mut res = vec![HashMap::new(); freq]; - - for (key, value) in self.value.clone().into_iter() { - debug!("key: {:?}, freq: {}, value_len: {}", key, freq, value.len()); - assert_eq!(value.len() % freq, 0); - let chunk_size = value.len() / freq; - let chunks: Vec> = value.chunks(chunk_size).map(|chunk| chunk.to_vec()).collect(); - for i in 0..freq { - res[i].insert(key.clone(), json!(chunks[i].clone())); - } - } - - res - } -} - -/// R1CS file type -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -pub enum R1CSType { - /// file path to the R1CS file - #[serde(rename = "file")] - File(PathBuf), - /// raw bytes of the R1CS file - #[serde(rename = "raw")] - Raw(Vec), -} - -/// Witness generator type -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -pub enum WitnessGeneratorType { - /// Browser witness generator - #[serde(rename = "browser")] - Browser, - /// Wasm witness generator - #[serde(rename = "wasm")] - Wasm { - /// Path to the Wasm binary for witness generation - path: String, - /// Path where the witness files are stored - wtns_path: String, - }, - /// Path to the witness generator - Path(PathBuf), - /// Raw bytes of the witness generator - #[serde(skip)] - Raw(Vec), // TODO: Would prefer to not alloc here, but i got lifetime hell lol -} - -/// Uninitialized Circuit Setup data, in this configuration the R1CS objects have not -/// been initialized and require a bulky initialize process. -#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)] -pub struct UninitializedSetup { - /// vector of all circuits' r1cs - pub r1cs_types: Vec, - /// vector of all circuits' witness generator - pub witness_generator_types: Vec, - /// NIVC max ROM length - pub max_rom_length: usize, -} - -/// Initialized Circuit Setup data, in this configuration the R1CS objects have been -/// fully loaded for proving. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -pub struct InitializedSetup { - /// vector of all circuits' r1cs - pub r1cs: Vec>, - /// vector of all circuits' witness generator - pub witness_generator_types: Vec, - /// NIVC max ROM length - pub max_rom_length: usize, -} - -// Note, the below are typestates that prevent misuse of our current API. -/// Setup status trait -pub trait SetupStatus { - /// Public parameters type - type PublicParams; - /// Setup data type - type SetupData; -} - -/// Online setup status -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct Online; -impl SetupStatus for Online { - type PublicParams = Arc>; - type SetupData = Arc; -} - -/// Offline setup status -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Offline; -impl SetupStatus for Offline { - type PublicParams = Vec; - type SetupData = UninitializedSetup; -} - -/// Witness status trait -pub trait WitnessStatus { - /// Private input for a circuit containing signals name and vector of values - /// - For [`Expanded`] status, it is a vector of private inputs for each fold of a circuit - /// - For [`NotExpanded`] status, it is a tuple of private input and fold input of a circuit - type PrivateInputs; -} - -/// Expanded witness status -pub struct Expanded; -impl WitnessStatus for Expanded { - /// expanded input for each fold of each circuit in the ROM - type PrivateInputs = Vec>; -} - -/// Not expanded witness status -pub struct NotExpanded; -impl WitnessStatus for NotExpanded { - /// Private input and fold input for each circuit in the ROM - type PrivateInputs = (Vec>, HashMap); -} - -/// Auxiliary circuit data required to execute the ROM -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -pub struct CircuitData { - /// circuit instruction opcode in [`S::SetupData`] - pub opcode: u64, -} - -/// ROM data type -pub type RomData = HashMap; -/// ROM type -pub type Rom = Vec; -/// NIVC input type -pub type NivcInput = Vec>; - -/// Represents configuration and circuit data required for initializing the proving system. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct SetupParams { - /// Public parameters of the proving system. Maps to the client-side prover parameters. - pub public_params: S::PublicParams, - // TODO: Refactor this onto the PublicParams object and share the ProvingParams abstraction - /// Setup-specific verification key digest for the primary elliptic curve. - pub vk_digest_primary: ::Scalar, - /// Setup-specific verification key digest for the secondary elliptic curve. - pub vk_digest_secondary: as Engine>::Scalar, - /// Describes R1CS configurations used in proving setup. - pub setup_data: S::SetupData, - /// A mapping between ROM opcodes and circuit configuration. - pub rom_data: RomData, -} - -impl PartialEq for SetupParams -where S::SetupData: PartialEq -{ - fn eq(&self, other: &Self) -> bool { - // TODO: Supernova types are not supporting PartialEq - // self.public_params == other.public_params && - self.vk_digest_primary == other.vk_digest_primary - && self.vk_digest_secondary == other.vk_digest_secondary - && self.setup_data == other.setup_data - && self.rom_data == other.rom_data - } -} - -/// Defines the logic of the proof program. -pub struct ProofParams { - /// Represents sequence of circuit operations (circuit "bytecode") - pub rom: Rom, -} - -/// Contains inputs and state specific to a single proof generation instance. -#[derive(Debug)] -pub struct InstanceParams { - /// Initial public input for NIVC - pub nivc_input: NivcInput, - /// Private inputs for each fold - pub private_inputs: W::PrivateInputs, -} - -impl InstanceParams { - /// Converts proving instance parameters into an expanded form by distributing fold inputs across - /// their corresponding circuit instances in the ROM. - /// - /// This method performs the following steps: - /// 1. Creates a map of circuit names to their positions in the ROM - /// 2. Collects private inputs from each ROM opcode configuration - /// 3. Distributes fold inputs across matching circuit instances based on their labels - /// 4. Combines the distributed inputs with existing private inputs for each ROM position - /// - /// # Arguments - /// - /// * `self` - The program data instance to expand - /// - /// # Returns - /// - /// Returns a `Result` containing either: - /// * `Ok(InstanceParams)` - The expanded instance with distributed inputs - /// * `Err(ProofError)` - If the expansion process fails - /// - /// # Errors - /// - /// This function will return an error if: - /// * A circuit label in the inputs is not found in the ROM - /// * Input distribution fails - /// - /// # Details - /// - /// The expansion process handles fold inputs, which are inputs that need to be distributed across - /// multiple instances of the same circuit in the ROM. For each circuit label in the inputs: - /// 1. Finds all positions of that circuit in the ROM - /// 2. Splits the fold inputs into equal parts - /// 3. Assigns each part to the corresponding circuit instance - /// - /// The resulting expanded form contains individual private inputs for each ROM position, with - /// fold inputs properly distributed according to circuit usage. - pub fn into_expanded( - self, - proof_params: &ProofParams, - ) -> Result, ProofError> { - assert_eq!(self.private_inputs.0.len(), proof_params.rom.len()); - - let mut instruction_usage: HashMap> = HashMap::new(); - for (index, circuit) in proof_params.rom.iter().enumerate() { - if let Some(usage) = instruction_usage.get_mut(circuit.as_str()) { - usage.push(index); - } else { - instruction_usage.insert(circuit.clone(), vec![index]); - } - } - let mut private_inputs: Vec> = self.private_inputs.0; - - // add fold input sliced to chunks and add to private input - for (circuit_label, fold_inputs) in self.private_inputs.1.iter() { - let inputs = match instruction_usage.get(circuit_label) { - Some(inputs) => inputs, - None => - Err(ProofError::Other(format!("Circuit label '{}' not found in rom", circuit_label)))?, - }; - let split_inputs = fold_inputs.split(inputs.len()); - for (idx, input) in inputs.iter().zip(split_inputs) { - private_inputs[*idx].extend(input); - } - } - - assert_eq!(private_inputs.len(), proof_params.rom.len()); - - let Self { nivc_input: initial_nivc_input, .. } = self; - Ok(InstanceParams { nivc_input: initial_nivc_input, private_inputs }) - } -} - -impl SetupParams { - /// Converts an offline setup parameters instance back into an online version by decompressing and - /// deserializing the public parameters and reconstructing the circuit shapes. - /// - /// This method performs the following steps: - /// 1. Deserializes raw bytes into an AuxParams object - /// 2. Initializes the circuit list from setup data - /// 3. Generates circuit shapes from the initialized memory - /// 4. Reconstructs full public parameters from circuit shapes and auxiliary parameters - /// 5. Constructs a new online program data instance - /// - /// # Arguments - /// - /// * `self` - The offline program data instance to convert - /// - /// # Returns - /// - /// Returns a `Result` containing either: - /// * `Ok(SetupParams)` - The converted online program data - /// * `Err(ProofError)` - If any step in the conversion process fails - /// - /// # Errors - /// - /// This function will return an error if: - /// * Circuit initialization fails - /// * Circuit shape generation fails - /// - /// # Features - /// - /// When compiled with the "timing" feature, this function will output timing information for: - /// * Reading and deserializing auxiliary parameters - /// * Generating circuit shapes - pub fn into_online(self) -> Result, ProofError> { - debug!("init proving params, proving_param_bytes={:?}", self.public_params.len()); - let proving_params = ProvingParams::from_bytes(&self.public_params).unwrap(); - - info!("init setup"); - let initialized_setup = initialize_setup_data(&self.setup_data).unwrap(); - - let circuits = initialize_circuit_list(&initialized_setup); - let memory = Memory { circuits, rom: vec![0; self.setup_data.max_rom_length] }; - - // TODO: This converts the r1cs memory into sparse matrices, which doubles - // the memory usage. Can we re-used these sparse matrices in our constraint - // system? - info!("init circuit shapes"); - let circuit_shapes = get_circuit_shapes(&memory); - - info!("init public params from parts"); - let public_params = - PublicParams::::from_parts_unchecked(circuit_shapes, proving_params.aux_params); - let Self { rom_data, .. } = self; - - Ok(SetupParams { - public_params: Arc::new(public_params), - vk_digest_primary: proving_params.vk_digest_primary, - vk_digest_secondary: proving_params.vk_digest_secondary, - setup_data: Arc::new(initialized_setup), - rom_data, - }) - } -} - -impl SetupParams { - /// Converts an online setup parameters instance into an offline version by serializing - /// the public parameters to disk. - /// - /// This method performs the following steps: - /// 1. Extracts auxiliary parameters from the public parameters - /// 2. Serializes the auxiliary parameters to bytes - /// 3. Writes the compressed data to the specified path - /// 4. Constructs a new offline program data instance - /// - /// # Arguments - /// - /// * `self` - The online program data instance to convert - /// * `path` - The file path where compressed public parameters will be saved - /// - /// # Returns - /// - /// Returns a `Result` containing either: - /// * `Ok(SetupParams)` - The converted offline program data - /// * `Err(ProofError)` - If any step in the conversion process fails - /// - /// # Errors - /// - /// This function will return an error if: - /// * Bytes serialization fails - /// * File system operations fail (creating directories or writing file) - pub fn into_offline(self, path: PathBuf) -> Result, ProofError> { - let exclusive = Arc::try_unwrap(self.public_params).unwrap(); - let (_, aux_params) = exclusive.into_parts(); - let vk_digest_primary = self.vk_digest_primary; - let vk_digest_secondary = self.vk_digest_secondary; - let proving_param_bytes = - ProvingParams { aux_params, vk_digest_primary, vk_digest_secondary }.to_bytes(); - - if let Some(parent) = path.parent() { - fs::create_dir_all(parent)?; - } - - let bytes_path = path.with_extension("bytes"); - debug!("bytes_path={:?}", bytes_path); - File::create(&bytes_path)?.write_all(&proving_param_bytes)?; - - let Self { rom_data, .. } = self; - Ok(SetupParams { - public_params: proving_param_bytes, - vk_digest_primary, - vk_digest_secondary, - // TODO: This approach is odd, refactor with #375 - setup_data: Default::default(), - rom_data, - }) - } - - /// Extends and prepares the public inputs for the zero-knowledge proof circuits. - /// - /// This function performs two main operations: - /// 1. Expands the ROM (Read-Only Memory) to the maximum length specified in `setup_data` - /// 2. Constructs the primary public input vector `z0_primary` by combining: - /// - The initial NIVC (Non-Interactive Verifiable Computation) input - /// - An initial ROM index of zero - /// - The expanded ROM opcodes converted to field elements - /// - /// # Arguments - /// - /// * `rom` - A reference to the ROM (sequence of circuit operations) containing circuit - /// configurations. - /// * `initial_nivc_input` - The initial public input required for NIVC. - /// - /// # Returns - /// - /// Returns a tuple containing: - /// - `Vec>`: The extended primary public input vector (z0_primary) - /// - `Vec`: The expanded ROM containing opcodes - /// - /// # Errors - /// - /// Returns a `ProofError` if: - /// - Any opcode configuration specified in the ROM is not found in `rom_data` - pub fn extend_public_inputs( - &self, - rom: &Rom, - initial_nivc_input: &NivcInput, - ) -> Result<(Vec>, Vec), ProofError> { - // TODO: This is currently enabled for _either_ Expanded or NotExpanded - let mut rom = rom - .iter() - .map(|opcode_config| { - self - .rom_data - .get(opcode_config) - .ok_or_else(|| { - ProofError::Other(format!("Opcode config '{}' not found in rom_data", opcode_config)) - }) - .map(|config| config.opcode) - }) - .collect::, ProofError>>()?; - - rom.resize(self.setup_data.max_rom_length, u64::MAX); - - let mut z0_primary: Vec> = initial_nivc_input.clone(); - z0_primary.push(F::::ZERO); // rom_index = 0 - z0_primary.extend(rom.iter().map(|opcode| ::Scalar::from(*opcode))); - debug!("z0_primary={:?}", z0_primary); - Ok((z0_primary, rom.clone())) - } -} - -impl SetupParams { - /// Generates NIVC proof from [`InstanceParams`] - /// - run NIVC recursive proving - /// - run CompressedSNARK to compress proof - /// - serialize proof - pub async fn generate_proof( - &self, - proof_params: &ProofParams, - instance_params: &InstanceParams, - ) -> Result, String>, ProofError> { - debug!("starting recursive proving"); - let program_output = program::run(self, proof_params, instance_params).await?; - - debug!("starting proof compression"); - let compressed_snark_proof = program::compress_proof_no_setup( - &program_output, - &self.public_params, - self.vk_digest_primary, - self.vk_digest_secondary, - )?; - compressed_snark_proof.serialize() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - const JSON: &str = r#" -{ - "input": [ - [ - {},{},{} - ], - { - "CIRCUIT_1": { - "external": [5,7], - "plaintext": [1,2,3,4] - }, - "CIRCUIT_2": { - "ciphertext": [1, 2, 3, 4], - "external": [2, 4] - }, - "CIRCUIT_3": { - "key": [2, 3], - "value": [4, 5] - } - } - ] -}"#; - - #[derive(Debug, Deserialize)] - struct MockInputs { - input: (Vec>, HashMap), - } - - // Helper function to create test program data - fn create_test_program_data() -> (SetupParams, ProofParams, InstanceParams) { - // Load add.r1cs from examples - let add_r1cs = crate::tests::inputs::ADD_EXTERNAL_R1CS.to_vec(); - let r1cs = R1CSType::Raw(add_r1cs.to_vec()); - // Create ROM data with proper circuit data - let mut rom_data = HashMap::new(); - rom_data.insert("add".to_string(), CircuitData { opcode: 1u64 }); - rom_data.insert("mul".to_string(), CircuitData { opcode: 2u64 }); - - // Rest of the function remains same - let rom: Vec = vec!["add".to_string(), "mul".to_string()]; - - let setup_data = UninitializedSetup { - max_rom_length: 4, - r1cs_types: vec![r1cs], - witness_generator_types: vec![WitnessGeneratorType::Raw(vec![])], - }; - let initialized_setup = initialize_setup_data(&setup_data).unwrap(); - - let public_params = program::setup(&setup_data); - let (prover_key, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); - - let setup_params = SetupParams { - public_params: Arc::new(public_params), - setup_data: Arc::new(initialized_setup), - vk_digest_primary: prover_key.pk_primary.vk_digest, - vk_digest_secondary: prover_key.pk_secondary.vk_digest, - rom_data, - }; - let proof_params = ProofParams { rom }; - let instance_params = InstanceParams { - nivc_input: vec![F::::ONE], - private_inputs: vec![HashMap::new(), HashMap::new()], - }; - - (setup_params, proof_params, instance_params) - } - - #[test] - fn test_extend_public_inputs() { - // Setup test data - let (setup_params, proof_params, instance_params) = create_test_program_data(); - - // Test successful case - let result = setup_params.extend_public_inputs(&proof_params.rom, &instance_params.nivc_input); - assert!(result.is_ok()); - - let (z0_primary, expanded_rom) = result.unwrap(); - - // Verify z0_primary structure - assert_eq!( - z0_primary.len(), - instance_params.nivc_input.len() + 1 + setup_params.setup_data.max_rom_length - ); - assert_eq!(z0_primary[instance_params.nivc_input.len()], F::::ZERO); // Check ROM index is 0 - - // Verify ROM expansion - assert_eq!(expanded_rom.len(), setup_params.setup_data.max_rom_length); - assert_eq!(expanded_rom[0], 1u64); // First opcode - assert_eq!(expanded_rom[1], 2u64); // Second opcode - assert_eq!(expanded_rom[2], u64::MAX); // Padding - } - - #[test] - fn test_extend_public_inputs_missing_opcode() { - let (setup_params, mut proof_params, instance_params) = create_test_program_data(); - - // Add an opcode config that doesn't exist in rom_data - proof_params.rom.push("nonexistent".to_string()); - - let result = setup_params.extend_public_inputs(&proof_params.rom, &instance_params.nivc_input); - assert!(result.is_err()); - assert!(matches!( - result.unwrap_err(), - ProofError::Other(e) if e.contains("not found in rom_data") - )); - } - - #[test] - #[tracing_test::traced_test] - fn test_deserialize_inputs() { - let mock_inputs: MockInputs = serde_json::from_str(JSON).unwrap(); - dbg!(&mock_inputs.input); - assert!(mock_inputs.input.1.contains_key("CIRCUIT_1")); - assert!(mock_inputs.input.1.contains_key("CIRCUIT_2")); - assert!(mock_inputs.input.1.contains_key("CIRCUIT_3")); - } - - #[test] - #[tracing_test::traced_test] - fn test_expand_private_inputs() { - let mock_inputs: MockInputs = serde_json::from_str(JSON).unwrap(); - let proof_params = ProofParams { - rom: vec![String::from("CIRCUIT_1"), String::from("CIRCUIT_2"), String::from("CIRCUIT_3")], - }; - let instance_params = - InstanceParams:: { nivc_input: vec![], private_inputs: mock_inputs.input } - .into_expanded(&proof_params) - .unwrap(); - dbg!(&instance_params.private_inputs); - assert!(!instance_params.private_inputs[0].is_empty()); - assert!(!instance_params.private_inputs[1].is_empty()); - assert!(!instance_params.private_inputs[2].is_empty()); - } - - #[test] - fn test_online_to_offline_serialization_round_trip() { - let temp_dir = tempdir::TempDir::new("setup").unwrap(); - let offline_path = temp_dir.path().join("offline"); - - let (setup_params_online, ..) = create_test_program_data(); - let setup_params_offline = setup_params_online.into_offline(offline_path).unwrap(); - - // Matches itself - assert_eq!(setup_params_offline, setup_params_offline); - - // Verify round-trip serialization for `Offline` - let serialized_offline = serde_json::to_string(&setup_params_offline).unwrap(); - let deserialized_offline: SetupParams = - serde_json::from_str(&serialized_offline).unwrap(); - assert_eq!(setup_params_offline, deserialized_offline); - - // Can be "onlined" - let result = deserialized_offline.into_online(); - assert!(result.is_ok()); - } -} +// //! # Data Module +// //! +// //! The `data` module contains data structures and types used in the proof system. +// //! +// //! ## Structs +// //! +// //! - `FoldInput`: Represents the fold input for any circuit containing signal names and values. +// //! - `R1CSType`: Represents the R1CS file type, which can be either a file path or raw bytes. + +// use std::{ +// fs::{self, File}, +// io::Write, +// sync::Arc, +// }; + +// use client_side_prover::{fast_serde::FastSerde, supernova::get_circuit_shapes}; +// use serde_json::json; + +// use super::*; +// use crate::setup::ProvingParams; + +// /// Fold input for any circuit containing signals name and vector of values. Inputs are +// distributed /// evenly across folds after the ROM is finalised by the prover. +// #[derive(Clone, Debug, Serialize, Deserialize)] +// pub struct FoldInput { +// /// circuit name and consolidated values +// #[serde(flatten)] +// pub value: HashMap>, +// } + +// impl FoldInput { +// /// splits the inputs evenly across folds as per instruction frequency +// pub fn split(&self, freq: usize) -> Vec> { +// let mut res = vec![HashMap::new(); freq]; + +// for (key, value) in self.value.clone().into_iter() { +// debug!("key: {:?}, freq: {}, value_len: {}", key, freq, value.len()); +// assert_eq!(value.len() % freq, 0); +// let chunk_size = value.len() / freq; +// let chunks: Vec> = value.chunks(chunk_size).map(|chunk| +// chunk.to_vec()).collect(); for i in 0..freq { +// res[i].insert(key.clone(), json!(chunks[i].clone())); +// } +// } + +// res +// } +// } + +// // Note, the below are typestates that prevent misuse of our current API. +// /// Setup status trait +// pub trait SetupStatus { +// /// Public parameters type +// type PublicParams; +// /// Setup data type +// type SetupData; +// } + +// /// Online setup status +// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +// pub struct Online; +// impl SetupStatus for Online { +// type PublicParams = Arc>; +// type SetupData = Arc; +// } + +// /// Offline setup status +// #[derive(Serialize, Deserialize, Debug, PartialEq)] +// pub struct Offline; +// impl SetupStatus for Offline { +// type PublicParams = Vec; +// type SetupData = UninitializedSetup; +// } + +// /// Witness status trait +// pub trait WitnessStatus { +// /// Private input for a circuit containing signals name and vector of values +// /// - For [`Expanded`] status, it is a vector of private inputs for each fold of a circuit +// /// - For [`NotExpanded`] status, it is a tuple of private input and fold input of a circuit +// type PrivateInputs; +// } + +// /// Expanded witness status +// pub struct Expanded; +// impl WitnessStatus for Expanded { +// /// expanded input for each fold of each circuit in the ROM +// type PrivateInputs = Vec>; +// } + +// /// Not expanded witness status +// pub struct NotExpanded; +// impl WitnessStatus for NotExpanded { +// /// Private input and fold input for each circuit in the ROM +// type PrivateInputs = (Vec>, HashMap); +// } + +// /// Auxiliary circuit data required to execute the ROM +// #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +// pub struct CircuitData { +// /// circuit instruction opcode in [`S::SetupData`] +// pub opcode: u64, +// } + +// /// ROM data type +// pub type RomData = HashMap; +// /// ROM type +// pub type Rom = Vec; +// /// NIVC input type +// pub type NivcInput = Vec>; + +// /// Represents configuration and circuit data required for initializing the proving system. +// #[derive(Serialize, Deserialize, Clone, Debug)] +// pub struct SetupParams { +// /// Public parameters of the proving system. Maps to the client-side prover parameters. +// pub public_params: S::PublicParams, +// // TODO: Refactor this onto the PublicParams object and share the ProvingParams abstraction +// /// Setup-specific verification key digest for the primary elliptic curve. +// pub vk_digest_primary: ::Scalar, +// /// Setup-specific verification key digest for the secondary elliptic curve. +// pub vk_digest_secondary: as Engine>::Scalar, +// /// Describes R1CS configurations used in proving setup. +// pub setup_data: S::SetupData, +// /// A mapping between ROM opcodes and circuit configuration. +// pub rom_data: RomData, +// } + +// impl PartialEq for SetupParams +// where S::SetupData: PartialEq +// { +// fn eq(&self, other: &Self) -> bool { +// // TODO: Supernova types are not supporting PartialEq +// // self.public_params == other.public_params && +// self.vk_digest_primary == other.vk_digest_primary +// && self.vk_digest_secondary == other.vk_digest_secondary +// && self.setup_data == other.setup_data +// && self.rom_data == other.rom_data +// } +// } + +// /// Contains inputs and state specific to a single proof generation instance. +// #[derive(Debug)] +// pub struct InstanceParams { +// /// Initial public input for NIVC +// pub nivc_input: NivcInput, +// /// Private inputs for each fold +// pub private_inputs: W::PrivateInputs, +// } + +// impl InstanceParams { +// /// Converts proving instance parameters into an expanded form by distributing fold inputs +// across /// their corresponding circuit instances in the ROM. +// /// +// /// This method performs the following steps: +// /// 1. Creates a map of circuit names to their positions in the ROM +// /// 2. Collects private inputs from each ROM opcode configuration +// /// 3. Distributes fold inputs across matching circuit instances based on their labels +// /// 4. Combines the distributed inputs with existing private inputs for each ROM position +// /// +// /// # Arguments +// /// +// /// * `self` - The program data instance to expand +// /// +// /// # Returns +// /// +// /// Returns a `Result` containing either: +// /// * `Ok(InstanceParams)` - The expanded instance with distributed inputs +// /// * `Err(ProofError)` - If the expansion process fails +// /// +// /// # Errors +// /// +// /// This function will return an error if: +// /// * A circuit label in the inputs is not found in the ROM +// /// * Input distribution fails +// /// +// /// # Details +// /// +// /// The expansion process handles fold inputs, which are inputs that need to be distributed +// across /// multiple instances of the same circuit in the ROM. For each circuit label in the +// inputs: /// 1. Finds all positions of that circuit in the ROM +// /// 2. Splits the fold inputs into equal parts +// /// 3. Assigns each part to the corresponding circuit instance +// /// +// /// The resulting expanded form contains individual private inputs for each ROM position, with +// /// fold inputs properly distributed according to circuit usage. +// pub fn into_expanded( +// self, +// proof_params: &ProofParams, +// ) -> Result, ProofError> { +// assert_eq!(self.private_inputs.0.len(), proof_params.rom.len()); + +// let mut instruction_usage: HashMap> = HashMap::new(); +// for (index, circuit) in proof_params.rom.iter().enumerate() { +// if let Some(usage) = instruction_usage.get_mut(circuit.as_str()) { +// usage.push(index); +// } else { +// instruction_usage.insert(circuit.clone(), vec![index]); +// } +// } +// let mut private_inputs: Vec> = self.private_inputs.0; + +// // add fold input sliced to chunks and add to private input +// for (circuit_label, fold_inputs) in self.private_inputs.1.iter() { +// let inputs = match instruction_usage.get(circuit_label) { +// Some(inputs) => inputs, +// None => +// Err(ProofError::Other(format!("Circuit label '{}' not found in rom", circuit_label)))?, +// }; +// let split_inputs = fold_inputs.split(inputs.len()); +// for (idx, input) in inputs.iter().zip(split_inputs) { +// private_inputs[*idx].extend(input); +// } +// } + +// assert_eq!(private_inputs.len(), proof_params.rom.len()); + +// let Self { nivc_input: initial_nivc_input, .. } = self; +// Ok(InstanceParams { nivc_input: initial_nivc_input, private_inputs }) +// } +// } + +// impl SetupParams { +// /// Converts an offline setup parameters instance back into an online version by decompressing +// and /// deserializing the public parameters and reconstructing the circuit shapes. +// /// +// /// This method performs the following steps: +// /// 1. Deserializes raw bytes into an AuxParams object +// /// 2. Initializes the circuit list from setup data +// /// 3. Generates circuit shapes from the initialized memory +// /// 4. Reconstructs full public parameters from circuit shapes and auxiliary parameters +// /// 5. Constructs a new online program data instance +// /// +// /// # Arguments +// /// +// /// * `self` - The offline program data instance to convert +// /// +// /// # Returns +// /// +// /// Returns a `Result` containing either: +// /// * `Ok(SetupParams)` - The converted online program data +// /// * `Err(ProofError)` - If any step in the conversion process fails +// /// +// /// # Errors +// /// +// /// This function will return an error if: +// /// * Circuit initialization fails +// /// * Circuit shape generation fails +// /// +// /// # Features +// /// +// /// When compiled with the "timing" feature, this function will output timing information for: +// /// * Reading and deserializing auxiliary parameters +// /// * Generating circuit shapes +// pub fn into_online(self) -> Result, ProofError> { +// debug!("init proving params, proving_param_bytes={:?}", self.public_params.len()); +// let proving_params = ProvingParams::from_bytes(&self.public_params).unwrap(); + +// info!("init setup"); +// let initialized_setup = initialize_setup_data(&self.setup_data).unwrap(); + +// let circuits = initialize_circuit_list(&initialized_setup); +// let memory = Switchboard { circuits }; + +// // TODO: This converts the r1cs memory into sparse matrices, which doubles +// // the memory usage. Can we re-used these sparse matrices in our constraint +// // system? +// info!("init circuit shapes"); +// let circuit_shapes = get_circuit_shapes(&memory); + +// info!("init public params from parts"); +// let public_params = +// PublicParams::::from_parts_unchecked(circuit_shapes, proving_params.aux_params); +// let Self { rom_data, .. } = self; + +// Ok(SetupParams { +// public_params: Arc::new(public_params), +// vk_digest_primary: proving_params.vk_digest_primary, +// vk_digest_secondary: proving_params.vk_digest_secondary, +// setup_data: Arc::new(initialized_setup), +// rom_data, +// }) +// } +// } + +// impl SetupParams { +// /// Converts an online setup parameters instance into an offline version by serializing +// /// the public parameters to disk. +// /// +// /// This method performs the following steps: +// /// 1. Extracts auxiliary parameters from the public parameters +// /// 2. Serializes the auxiliary parameters to bytes +// /// 3. Writes the compressed data to the specified path +// /// 4. Constructs a new offline program data instance +// /// +// /// # Arguments +// /// +// /// * `self` - The online program data instance to convert +// /// * `path` - The file path where compressed public parameters will be saved +// /// +// /// # Returns +// /// +// /// Returns a `Result` containing either: +// /// * `Ok(SetupParams)` - The converted offline program data +// /// * `Err(ProofError)` - If any step in the conversion process fails +// /// +// /// # Errors +// /// +// /// This function will return an error if: +// /// * Bytes serialization fails +// /// * File system operations fail (creating directories or writing file) +// pub fn into_offline(self, path: PathBuf) -> Result, ProofError> { +// let exclusive = Arc::try_unwrap(self.public_params).unwrap(); +// let (_, aux_params) = exclusive.into_parts(); +// let vk_digest_primary = self.vk_digest_primary; +// let vk_digest_secondary = self.vk_digest_secondary; +// let proving_param_bytes = +// ProvingParams { aux_params, vk_digest_primary, vk_digest_secondary }.to_bytes(); + +// if let Some(parent) = path.parent() { +// fs::create_dir_all(parent)?; +// } + +// let bytes_path = path.with_extension("bytes"); +// debug!("bytes_path={:?}", bytes_path); +// File::create(&bytes_path)?.write_all(&proving_param_bytes)?; + +// let Self { rom_data, .. } = self; +// Ok(SetupParams { +// public_params: proving_param_bytes, +// vk_digest_primary, +// vk_digest_secondary, +// // TODO: This approach is odd, refactor with #375 +// setup_data: Default::default(), +// rom_data, +// }) +// } + +// /// Extends and prepares the public inputs for the zero-knowledge proof circuits. +// /// +// /// This function performs two main operations: +// /// 1. Expands the ROM (Read-Only Memory) to the maximum length specified in `setup_data` +// /// 2. Constructs the primary public input vector `z0_primary` by combining: +// /// - The initial NIVC (Non-Interactive Verifiable Computation) input +// /// - An initial ROM index of zero +// /// - The expanded ROM opcodes converted to field elements +// /// +// /// # Arguments +// /// +// /// * `rom` - A reference to the ROM (sequence of circuit operations) containing circuit +// /// configurations. +// /// * `initial_nivc_input` - The initial public input required for NIVC. +// /// +// /// # Returns +// /// +// /// Returns a tuple containing: +// /// - `Vec>`: The extended primary public input vector (z0_primary) +// /// - `Vec`: The expanded ROM containing opcodes +// /// +// /// # Errors +// /// +// /// Returns a `ProofError` if: +// /// - Any opcode configuration specified in the ROM is not found in `rom_data` +// pub fn extend_public_inputs( +// &self, +// rom: &Rom, +// initial_nivc_input: &NivcInput, +// ) -> Result<(Vec>, Vec), ProofError> { +// // TODO: This is currently enabled for _either_ Expanded or NotExpanded +// let mut rom = rom +// .iter() +// .map(|opcode_config| { +// self +// .rom_data +// .get(opcode_config) +// .ok_or_else(|| { +// ProofError::Other(format!("Opcode config '{}' not found in rom_data", opcode_config)) +// }) +// .map(|config| config.opcode) +// }) +// .collect::, ProofError>>()?; + +// rom.resize(self.setup_data.max_rom_length, u64::MAX); + +// let mut z0_primary: Vec> = initial_nivc_input.clone(); +// z0_primary.push(F::::ZERO); // rom_index = 0 +// z0_primary.extend(rom.iter().map(|opcode| ::Scalar::from(*opcode))); +// debug!("z0_primary={:?}", z0_primary); +// Ok((z0_primary, rom.clone())) +// } +// } + +// impl SetupParams { +// /// Generates NIVC proof from [`InstanceParams`] +// /// - run NIVC recursive proving +// /// - run CompressedSNARK to compress proof +// /// - serialize proof +// pub async fn generate_proof( +// &self, +// proof_params: &ProofParams, +// instance_params: &InstanceParams, +// ) -> Result, String>, ProofError> { +// debug!("starting recursive proving"); +// let program_output = program::run(self, proof_params, instance_params).await?; + +// debug!("starting proof compression"); +// let compressed_snark_proof = program::compress_proof_no_setup( +// &program_output, +// &self.public_params, +// self.vk_digest_primary, +// self.vk_digest_secondary, +// )?; +// compressed_snark_proof.serialize() +// } +// } + +// #[cfg(test)] +// mod tests { +// use super::*; + +// const JSON: &str = r#" +// { +// "input": [ +// [ +// {},{},{} +// ], +// { +// "CIRCUIT_1": { +// "external": [5,7], +// "plaintext": [1,2,3,4] +// }, +// "CIRCUIT_2": { +// "ciphertext": [1, 2, 3, 4], +// "external": [2, 4] +// }, +// "CIRCUIT_3": { +// "key": [2, 3], +// "value": [4, 5] +// } +// } +// ] +// }"#; + +// #[derive(Debug, Deserialize)] +// struct MockInputs { +// input: (Vec>, HashMap), +// } + +// // Helper function to create test program data +// fn create_test_program_data() -> (SetupParams, ProofParams, InstanceParams) { +// // Load add.r1cs from examples +// let add_r1cs = crate::tests::inputs::ADD_EXTERNAL_R1CS.to_vec(); +// let r1cs = R1CSType::Raw(add_r1cs.to_vec()); +// // Create ROM data with proper circuit data +// let mut rom_data = HashMap::new(); +// rom_data.insert("add".to_string(), CircuitData { opcode: 1u64 }); +// rom_data.insert("mul".to_string(), CircuitData { opcode: 2u64 }); + +// // Rest of the function remains same +// let rom: Vec = vec!["add".to_string(), "mul".to_string()]; + +// let setup_data = UninitializedSetup { +// max_rom_length: 4, +// r1cs_types: vec![r1cs], +// witness_generator_types: vec![WitnessGeneratorType::Raw(vec![])], +// }; +// let initialized_setup = initialize_setup_data(&setup_data).unwrap(); + +// let public_params = program::setup(&setup_data); +// let (prover_key, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); + +// let setup_params = SetupParams { +// public_params: Arc::new(public_params), +// setup_data: Arc::new(initialized_setup), +// vk_digest_primary: prover_key.pk_primary.vk_digest, +// vk_digest_secondary: prover_key.pk_secondary.vk_digest, +// rom_data, +// }; +// let proof_params = ProofParams { rom }; +// let instance_params = InstanceParams { +// nivc_input: vec![F::::ONE], +// private_inputs: vec![HashMap::new(), HashMap::new()], +// }; + +// (setup_params, proof_params, instance_params) +// } + +// #[test] +// fn test_extend_public_inputs() { +// // Setup test data +// let (setup_params, proof_params, instance_params) = create_test_program_data(); + +// // Test successful case +// let result = setup_params.extend_public_inputs(&proof_params.rom, +// &instance_params.nivc_input); assert!(result.is_ok()); + +// let (z0_primary, expanded_rom) = result.unwrap(); + +// // Verify z0_primary structure +// assert_eq!( +// z0_primary.len(), +// instance_params.nivc_input.len() + 1 + setup_params.setup_data.max_rom_length +// ); +// assert_eq!(z0_primary[instance_params.nivc_input.len()], F::::ZERO); // Check ROM index +// is 0 + +// // Verify ROM expansion +// assert_eq!(expanded_rom.len(), setup_params.setup_data.max_rom_length); +// assert_eq!(expanded_rom[0], 1u64); // First opcode +// assert_eq!(expanded_rom[1], 2u64); // Second opcode +// assert_eq!(expanded_rom[2], u64::MAX); // Padding +// } + +// #[test] +// fn test_extend_public_inputs_missing_opcode() { +// let (setup_params, mut proof_params, instance_params) = create_test_program_data(); + +// // Add an opcode config that doesn't exist in rom_data +// proof_params.rom.push("nonexistent".to_string()); + +// let result = setup_params.extend_public_inputs(&proof_params.rom, +// &instance_params.nivc_input); assert!(result.is_err()); +// assert!(matches!( +// result.unwrap_err(), +// ProofError::Other(e) if e.contains("not found in rom_data") +// )); +// } + +// #[test] +// #[tracing_test::traced_test] +// fn test_deserialize_inputs() { +// let mock_inputs: MockInputs = serde_json::from_str(JSON).unwrap(); +// dbg!(&mock_inputs.input); +// assert!(mock_inputs.input.1.contains_key("CIRCUIT_1")); +// assert!(mock_inputs.input.1.contains_key("CIRCUIT_2")); +// assert!(mock_inputs.input.1.contains_key("CIRCUIT_3")); +// } + +// #[test] +// #[tracing_test::traced_test] +// fn test_expand_private_inputs() { +// let mock_inputs: MockInputs = serde_json::from_str(JSON).unwrap(); +// let proof_params = ProofParams { +// rom: vec![String::from("CIRCUIT_1"), String::from("CIRCUIT_2"), String::from("CIRCUIT_3")], +// }; +// let instance_params = +// InstanceParams:: { nivc_input: vec![], private_inputs: mock_inputs.input } +// .into_expanded(&proof_params) +// .unwrap(); +// dbg!(&instance_params.private_inputs); +// assert!(!instance_params.private_inputs[0].is_empty()); +// assert!(!instance_params.private_inputs[1].is_empty()); +// assert!(!instance_params.private_inputs[2].is_empty()); +// } + +// #[test] +// fn test_online_to_offline_serialization_round_trip() { +// let temp_dir = tempdir::TempDir::new("setup").unwrap(); +// let offline_path = temp_dir.path().join("offline"); + +// let (setup_params_online, ..) = create_test_program_data(); +// let setup_params_offline = setup_params_online.into_offline(offline_path).unwrap(); + +// // Matches itself +// assert_eq!(setup_params_offline, setup_params_offline); + +// // Verify round-trip serialization for `Offline` +// let serialized_offline = serde_json::to_string(&setup_params_offline).unwrap(); +// let deserialized_offline: SetupParams = +// serde_json::from_str(&serialized_offline).unwrap(); +// assert_eq!(setup_params_offline, deserialized_offline); + +// // Can be "onlined" +// let result = deserialized_offline.into_online(); +// assert!(result.is_ok()); +// } +// } diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index 58c1ff2..39462ae 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -1,33 +1,12 @@ -//! # Program Module -//! -//! The `program` module contains the core logic for setting up and running the -//! proof system. It provides functionality for initializing the setup, -//! generating proofs, and verifying proofs. -//! -//! ## Submodules -//! -//! - `data`: Contains data structures and types used in the proof system. -//! - `http`: Provides utilities for handling HTTP-related operations in the proof system. -//! - `manifest`: Contains the manifest structure and related utilities. -//! - `utils`: Provides utility functions used throughout the module. - -use std::sync::Arc; - -use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; -use circom::{r1cs::R1CS, witness::generate_witness_from_generator_type}; use client_side_prover::{ - supernova::{NonUniformCircuit, RecursiveSNARK, StepCircuit}, + supernova::{NonUniformCircuit, RecursiveSNARK}, traits::{snark::default_ck_hint, Dual}, }; -use data::{Expanded, InitializedSetup}; use proof::FoldingProof; use tracing::trace; use super::*; -use crate::{ - circom::witness::generate_witness_from_browser_type, - program::data::{ProofParams, SetupParams}, -}; +use crate::noir::NoirProgram; pub mod data; @@ -37,54 +16,22 @@ pub mod data; /// Compressed proof type pub type CompressedProof = FoldingProof, F>; -/// Represents the memory used in the proof system. -/// -/// This struct contains the circuits and ROM data required for the proof -/// system. -pub struct Memory { - /// A vector of ROM circuits used in the proof system. - pub circuits: Vec, - /// A vector of ROM data, represented as 64-bit unsigned integers. - pub rom: Vec, -} - -/// Represents a ROM circuit used in the proof system. -/// -/// This struct encapsulates the necessary components and metadata for a ROM -/// circuit, including the circuit itself, its index, size, and various inputs. -#[derive(Clone)] -pub struct RomCircuit { - /// The Circom circuit associated with this ROM circuit. - pub circuit: CircomCircuit, - /// The index of this circuit within the ROM. - pub circuit_index: usize, - /// The size of the ROM. - pub rom_size: usize, - /// Optional NIVC I/O values for the circuit. - pub nivc_io: Option>>, - /// Optional private inputs for the circuit, mapped by their labels. - pub private_input: Option>, - /// The type of witness generator used for this circuit. - pub witness_generator_type: WitnessGeneratorType, +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SwitchboardWitness { + pub witness: Vec>, + pub pc: usize, } -// NOTE (Colin): This is added so we can cache only the active circuits we are -// using. -impl Default for RomCircuit { - fn default() -> Self { - Self { - circuit: CircomCircuit::default(), - circuit_index: usize::MAX - 1, - rom_size: 0, - nivc_io: None, - private_input: None, - witness_generator_type: WitnessGeneratorType::Raw(vec![]), - } - } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Switchboard { + pub circuits: Vec, + pub public_input: Vec>, + pub initial_circuit_index: usize, + pub witnesses: Vec, } -impl NonUniformCircuit for Memory { - type C1 = RomCircuit; +impl NonUniformCircuit for Switchboard { + type C1 = NoirProgram; type C2 = TrivialCircuit>; fn num_circuits(&self) -> usize { self.circuits.len() } @@ -93,180 +40,74 @@ impl NonUniformCircuit for Memory { self.circuits[circuit_index].clone() } - fn secondary_circuit(&self) -> Self::C2 { Default::default() } + fn secondary_circuit(&self) -> Self::C2 { TrivialCircuit::default() } - fn initial_circuit_index(&self) -> usize { self.rom[0] as usize } -} - -impl StepCircuit> for RomCircuit { - fn arity(&self) -> usize { self.circuit.arity() + 1 + self.rom_size } - - fn circuit_index(&self) -> usize { self.circuit_index } - - fn synthesize>>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>>, - z: &[AllocatedNum>], - ) -> Result<(Option>>, Vec>>), SynthesisError> { - let rom_index = &z[self.circuit.arity()]; // jump to where we pushed pc data into CS - let allocated_rom = &z[self.circuit.arity() + 1..]; // jump to where we pushed rom data into C - let (rom_index_next, pc_next) = utils::next_rom_index_and_pc( - &mut cs.namespace(|| "next and rom_index and pc"), - rom_index, - allocated_rom, - pc.ok_or(SynthesisError::AssignmentMissing)?, - )?; - let mut circuit_constraints = self.circuit.vanilla_synthesize(cs, z)?; - circuit_constraints.push(rom_index_next); - circuit_constraints.extend(z[self.circuit.arity() + 1..].iter().cloned()); - Ok((Some(pc_next), circuit_constraints)) - } + fn initial_circuit_index(&self) -> usize { self.initial_circuit_index } } // TODO: This is like a one-time use setup that overlaps some with // `ProgramData::into_online()`. Worth checking out how to make this simpler, // clearer, more efficient. -/// Setup function -pub fn setup(setup_data: &UninitializedSetup) -> PublicParams { - // Optionally time the setup stage for the program - let time = std::time::Instant::now(); +// Setup function +// pub fn setup(setup_data: &UninitializedSetup) -> PublicParams { +// // Optionally time the setup stage for the program +// let time = std::time::Instant::now(); - // TODO: I don't think we want to have to call `initialize_circuit_list` more - // than once on setup ever and it seems like it may get used more - // frequently. - let initilized_setup = initialize_setup_data(setup_data).unwrap(); - let circuits = initialize_circuit_list(&initilized_setup); // TODO, change the type signature of trait to use arbitrary error types. - let memory = Memory { circuits, rom: vec![0; setup_data.max_rom_length] }; // Note, `rom` here is not used in setup, only `circuits` - let public_params = PublicParams::setup(&memory, &*default_ck_hint(), &*default_ck_hint()); +// // TODO: I don't think we want to have to call `initialize_circuit_list` more +// // than once on setup ever and it seems like it may get used more +// // frequently. +// let initilized_setup = initialize_setup_data(setup_data).unwrap(); +// let circuits = initialize_circuit_list(&initilized_setup); // TODO, change the type signature +// of trait to use arbitrary error types. let memory = Switchboard { circuits }; +// let public_params = PublicParams::setup(&memory, &*default_ck_hint(), &*default_ck_hint()); - trace!("`PublicParams::setup()` elapsed: {:?}", time.elapsed()); +// trace!("`PublicParams::setup()` elapsed: {:?}", time.elapsed()); - public_params -} +// public_params +// } -/// Executes the SuperNova program with the provided setup, proof, and instance -/// parameters. -/// -/// This function initializes the public inputs, resizes the ROM, and -/// iteratively processes each step of the ROM to generate a recursive SNARK -/// proof. It handles the private inputs, witness generation, and circuit -/// execution for each opcode in the ROM. -/// -/// # Arguments -/// -/// * `setup_params` - The setup parameters containing the setup data and public parameters. -/// * `proof_params` - The proof parameters containing the ROM and other proof-related data. -/// * `instance_params` - The instance parameters containing the NIVC input and private inputs. -/// -/// # Returns -/// -/// A `Result` containing the generated `RecursiveSNARK` on success, or a -/// `ProofError` on failure. -/// -/// # Errors -/// -/// This function can return a `ProofError` if there are issues with the NIVC -/// input, private inputs, or witness generation. -pub async fn run( - setup_params: &SetupParams, - proof_params: &ProofParams, - instance_params: &InstanceParams, -) -> Result, ProofError> { +pub fn run(switchboard: &Switchboard) -> Result, ProofError> { info!("Starting SuperNova program..."); - // Resize the rom to be the `max_rom_length` committed to in the `S::SetupData` - let (z0_primary, resized_rom) = - setup_params.extend_public_inputs(&proof_params.rom, &instance_params.nivc_input)?; - let z0_secondary = vec![F::::ZERO]; - - let mut recursive_snark_option = None; - let mut next_public_input = z0_primary.clone(); + info!("Setting up PublicParams..."); + // TODO: This is stupid to do, but I need to get around the original setting of the witness. + // Having separate setup is the way (we already know this) + let mut memory_clone = switchboard.clone(); + memory_clone.circuits.iter_mut().for_each(|circ| circ.witness = None); + let public_params = PublicParams::setup(&memory_clone, &*default_ck_hint(), &*default_ck_hint()); - // TODO (Colin): We are basically creating a `R1CS` for each circuit here, then - // also creating `R1CSWithArity` for the circuits in the `PublicParams`. - // Surely we don't need both? - let circuits = initialize_circuit_list(&setup_params.setup_data); // TODO: AwK? + let z0_primary = &switchboard.public_input; + let z0_secondary = &[F::::ZERO]; - let mut memory = Memory { rom: resized_rom.clone(), circuits }; + let mut recursive_snark_option = None; let time = std::time::Instant::now(); - for (idx, &op_code) in - resized_rom.iter().enumerate().take_while(|(_, &op_code)| op_code != u64::MAX) - { - info!("Step {} of ROM", idx); - debug!("Opcode = {:?}", op_code); - memory.circuits[op_code as usize].private_input = - Some(instance_params.private_inputs[idx].clone()); - // trace!("private input: {:?}", memory.circuits[op_code as - // usize].private_input); - memory.circuits[op_code as usize].nivc_io = Some(next_public_input); - - let wit_type = memory.circuits[op_code as usize].witness_generator_type.clone(); - let public_params = &setup_params.public_params; - - memory.circuits[op_code as usize].circuit.witness = - if wit_type == WitnessGeneratorType::Browser { - // When running in browser, the witness is passed as input. - // Some(witnesses[idx].clone()) - let arity = memory.circuits[op_code as usize].circuit.arity(); - let nivc_io = - &memory.circuits[op_code as usize].nivc_io.as_ref().ok_or_else(|| { - ProofError::Other(format!("nivc_io not found for op_code {}", op_code)) - })?[..arity]; - - let private_input = - memory.circuits[op_code as usize].private_input.as_ref().ok_or_else(|| { - ProofError::Other(format!("private_input not found for op_code {}", op_code)) - })?; - - let circom_input = into_circom_input(nivc_io, private_input); - let witness = generate_witness_from_browser_type(circom_input, op_code).await?; - Some(witness) - } else { - let arity = memory.circuits[op_code as usize].circuit.arity(); - let nivc_io = - &memory.circuits[op_code as usize].nivc_io.as_ref().ok_or_else(|| { - ProofError::Other(format!("nivc_io not found for op_code {}", op_code)) - })?[..arity]; - - let private_input = - memory.circuits[op_code as usize].private_input.as_ref().ok_or_else(|| { - ProofError::Other(format!("private_input not found for op_code {}", op_code)) - })?; - let in_json = into_input_json(nivc_io, private_input)?; - let witness = generate_witness_from_generator_type(&in_json, &wit_type)?; - Some(witness) - }; - - let circuit_primary = memory.primary_circuit(op_code as usize); - let circuit_secondary = memory.secondary_circuit(); + for (idx, switchboard_witness) in switchboard.witnesses.iter().enumerate() { + info!("Step {} of {} witnesses", idx, switchboard.witnesses.len()); + debug!("Program counter = {:?}", switchboard_witness.pc); + + let circuit_primary = switchboard.primary_circuit(switchboard_witness.pc); + let circuit_secondary = switchboard.secondary_circuit(); let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { RecursiveSNARK::new( - public_params, - &memory, + &public_params, + switchboard, &circuit_primary, &circuit_secondary, - &z0_primary, - &z0_secondary, + z0_primary, + z0_secondary, ) })?; info!("Proving single step..."); - recursive_snark.prove_step(public_params, &circuit_primary, &circuit_secondary)?; + recursive_snark.prove_step(&public_params, &circuit_primary, &circuit_secondary)?; info!("Done proving single step..."); - #[cfg(feature = "verify-steps")] - { - info!("Verifying single step..."); - recursive_snark.verify(public_params, &z0_primary, &z0_secondary)?; - info!("Single step verification done"); - } - - // Update everything now for next step - next_public_input = recursive_snark.zi_primary().clone(); - next_public_input.truncate(circuit_primary.arity()); + // TODO: For some reason this is failing + // info!("Verifying single step..."); + // recursive_snark.verify(&public_params, recursive_snark.z0_primary(), z0_secondary)?; + // info!("Single step verification done"); recursive_snark_option = Some(Ok(recursive_snark)); } @@ -277,169 +118,169 @@ pub async fn run( Ok(recursive_snark?) } -/// Compresses a proof without performing the setup step. -/// -/// This function takes an existing `RecursiveSNARK` and compresses it into a -/// `CompressedProof` using pre-initialized proving keys. This is useful when -/// the setup step has already been performed and the proving keys are -/// available, allowing for more efficient proof generation. -/// -/// # Arguments -/// -/// * `recursive_snark` - A reference to the `RecursiveSNARK` that needs to be compressed. -/// * `public_params` - The public parameters required for the proof system. -/// * `vk_digest_primary` - The primary verification key digest. -/// * `vk_digest_secondary` - The secondary verification key digest. -/// -/// # Returns -/// -/// A `Result` containing the `CompressedProof` if successful, or a `ProofError` -/// if an error occurs. -/// -/// # Errors -/// -/// This function will return a `ProofError` if the compression process fails at -/// any step. -pub fn compress_proof_no_setup( - recursive_snark: &RecursiveSNARK, - public_params: &PublicParams, - vk_digest_primary: ::Scalar, - vk_digest_secondary: as Engine>::Scalar, -) -> Result { - let pk = CompressedSNARK::::initialize_pk( - public_params, - vk_digest_primary, - vk_digest_secondary, - ) - .unwrap(); - debug!( - "initialized pk pk_primary.digest={:?}, pk_secondary.digest={:?}", - pk.pk_primary.vk_digest, pk.pk_secondary.vk_digest - ); - - debug!("`CompressedSNARK::prove STARTING PROVING!"); - let proof = FoldingProof { - proof: CompressedSNARK::::prove(public_params, &pk, recursive_snark)?, - verifier_digest: pk.pk_primary.vk_digest, - }; - debug!("`CompressedSNARK::prove completed!"); - - Ok(proof) -} - -/// Compresses a proof by performing the setup step and generating a compressed -/// proof. -/// -/// This function initializes the proving keys by performing the setup step, and -/// then uses these keys to generate a compressed proof from an existing -/// `RecursiveSNARK`. This is useful when the setup step has not been performed -/// yet, and the proving keys need to be initialized before generating the -/// proof. -/// -/// # Arguments -/// -/// * `recursive_snark` - A reference to the `RecursiveSNARK` that needs to be compressed. -/// * `public_params` - The public parameters required for the proof system. -/// -/// # Returns -/// -/// A `Result` containing the `CompressedProof` if successful, or a `ProofError` -/// if an error occurs. -/// -/// # Errors -/// -/// This function will return a `ProofError` if the setup or compression process -/// fails at any step. -pub fn compress_proof( - recursive_snark: &RecursiveSNARK, - public_params: &PublicParams, -) -> Result { - debug!("Setting up `CompressedSNARK`"); - let time = std::time::Instant::now(); - let (pk, _vk) = CompressedSNARK::::setup(public_params)?; - debug!("Done setting up `CompressedSNARK`"); - trace!("`CompressedSNARK::setup` elapsed: {:?}", time.elapsed()); - - let time = std::time::Instant::now(); - - let proof = FoldingProof { - proof: CompressedSNARK::::prove(public_params, &pk, recursive_snark)?, - verifier_digest: pk.pk_primary.vk_digest, - }; - debug!("`CompressedSNARK::prove completed!"); - - trace!("`CompressedSNARK::prove` elapsed: {:?}", time.elapsed()); - - Ok(proof) -} - -/// Initializes the setup data for the program. -/// -/// This function takes an `UninitializedSetup` and converts it into an -/// `InitializedSetup` by iterating over the R1CS types and witness generator -/// types, creating `R1CS` instances and collecting them into vectors. It then -/// returns an `InitializedSetup` containing the R1CS and witness generator -/// types, along with the maximum ROM length. -/// -/// # Arguments -/// -/// * `setup_data` - The `UninitializedSetup` to initialize. -/// -/// # Returns -/// -/// A `Result` containing the `InitializedSetup` if successful, or a -/// `ProofError` if an error occurs. -pub fn initialize_setup_data( - setup_data: &UninitializedSetup, -) -> Result { - let (r1cs, witness_generator_types) = setup_data - .r1cs_types - .iter() - .zip(setup_data.witness_generator_types.iter()) - .map(|(r1cs_type, generator)| { - let r1cs = R1CS::try_from(r1cs_type)?; - Ok::<(Arc, data::WitnessGeneratorType), ProofError>(( - Arc::new(r1cs), - generator.clone(), - )) - }) - .collect::, _>>()? - .into_iter() - .unzip(); - - Ok(InitializedSetup { r1cs, witness_generator_types, max_rom_length: setup_data.max_rom_length }) -} - -/// Initializes a list of ROM circuits from the provided setup data. -/// -/// This function takes an `InitializedSetup` and creates a vector of -/// `RomCircuit` instances. Each `RomCircuit` is constructed using the R1CS and -/// witness generator types from the setup data, and is assigned a unique -/// circuit index and the maximum ROM length. -/// -/// # Arguments -/// -/// * `setup_data` - The `InitializedSetup` containing the R1CS and witness generator types. -/// -/// # Returns -/// -/// A vector of `RomCircuit` instances initialized with the provided setup data. -pub fn initialize_circuit_list(setup_data: &InitializedSetup) -> Vec { - setup_data - .r1cs - .iter() - .zip(setup_data.witness_generator_types.iter()) - .enumerate() - .map(|(i, (r1cs, generator))| { - let circuit = circom::CircomCircuit { r1cs: r1cs.clone(), witness: None }; - RomCircuit { - circuit, - circuit_index: i, - rom_size: setup_data.max_rom_length, - nivc_io: None, - private_input: None, - witness_generator_type: generator.clone(), - } - }) - .collect::>() -} +// /// Compresses a proof without performing the setup step. +// /// +// /// This function takes an existing `RecursiveSNARK` and compresses it into a +// /// `CompressedProof` using pre-initialized proving keys. This is useful when +// /// the setup step has already been performed and the proving keys are +// /// available, allowing for more efficient proof generation. +// /// +// /// # Arguments +// /// +// /// * `recursive_snark` - A reference to the `RecursiveSNARK` that needs to be compressed. +// /// * `public_params` - The public parameters required for the proof system. +// /// * `vk_digest_primary` - The primary verification key digest. +// /// * `vk_digest_secondary` - The secondary verification key digest. +// /// +// /// # Returns +// /// +// /// A `Result` containing the `CompressedProof` if successful, or a `ProofError` +// /// if an error occurs. +// /// +// /// # Errors +// /// +// /// This function will return a `ProofError` if the compression process fails at +// /// any step. +// pub fn compress_proof_no_setup( +// recursive_snark: &RecursiveSNARK, +// public_params: &PublicParams, +// vk_digest_primary: ::Scalar, +// vk_digest_secondary: as Engine>::Scalar, +// ) -> Result { +// let pk = CompressedSNARK::::initialize_pk( +// public_params, +// vk_digest_primary, +// vk_digest_secondary, +// ) +// .unwrap(); +// debug!( +// "initialized pk pk_primary.digest={:?}, pk_secondary.digest={:?}", +// pk.pk_primary.vk_digest, pk.pk_secondary.vk_digest +// ); + +// debug!("`CompressedSNARK::prove STARTING PROVING!"); +// let proof = FoldingProof { +// proof: CompressedSNARK::::prove(public_params, &pk, recursive_snark)?, +// verifier_digest: pk.pk_primary.vk_digest, +// }; +// debug!("`CompressedSNARK::prove completed!"); + +// Ok(proof) +// } + +// /// Compresses a proof by performing the setup step and generating a compressed +// /// proof. +// /// +// /// This function initializes the proving keys by performing the setup step, and +// /// then uses these keys to generate a compressed proof from an existing +// /// `RecursiveSNARK`. This is useful when the setup step has not been performed +// /// yet, and the proving keys need to be initialized before generating the +// /// proof. +// /// +// /// # Arguments +// /// +// /// * `recursive_snark` - A reference to the `RecursiveSNARK` that needs to be compressed. +// /// * `public_params` - The public parameters required for the proof system. +// /// +// /// # Returns +// /// +// /// A `Result` containing the `CompressedProof` if successful, or a `ProofError` +// /// if an error occurs. +// /// +// /// # Errors +// /// +// /// This function will return a `ProofError` if the setup or compression process +// /// fails at any step. +// pub fn compress_proof( +// recursive_snark: &RecursiveSNARK, +// public_params: &PublicParams, +// ) -> Result { +// debug!("Setting up `CompressedSNARK`"); +// let time = std::time::Instant::now(); +// let (pk, _vk) = CompressedSNARK::::setup(public_params)?; +// debug!("Done setting up `CompressedSNARK`"); +// trace!("`CompressedSNARK::setup` elapsed: {:?}", time.elapsed()); + +// let time = std::time::Instant::now(); + +// let proof = FoldingProof { +// proof: CompressedSNARK::::prove(public_params, &pk, recursive_snark)?, +// verifier_digest: pk.pk_primary.vk_digest, +// }; +// debug!("`CompressedSNARK::prove completed!"); + +// trace!("`CompressedSNARK::prove` elapsed: {:?}", time.elapsed()); + +// Ok(proof) +// } + +// /// Initializes the setup data for the program. +// /// +// /// This function takes an `UninitializedSetup` and converts it into an +// /// `InitializedSetup` by iterating over the R1CS types and witness generator +// /// types, creating `R1CS` instances and collecting them into vectors. It then +// /// returns an `InitializedSetup` containing the R1CS and witness generator +// /// types, along with the maximum ROM length. +// /// +// /// # Arguments +// /// +// /// * `setup_data` - The `UninitializedSetup` to initialize. +// /// +// /// # Returns +// /// +// /// A `Result` containing the `InitializedSetup` if successful, or a +// /// `ProofError` if an error occurs. +// pub fn initialize_setup_data( +// setup_data: &UninitializedSetup, +// ) -> Result { +// let (r1cs, witness_generator_types) = setup_data +// .r1cs_types +// .iter() +// .zip(setup_data.witness_generator_types.iter()) +// .map(|(r1cs_type, generator)| { +// let r1cs = R1CS::try_from(r1cs_type)?; +// Ok::<(Arc, data::WitnessGeneratorType), ProofError>(( +// Arc::new(r1cs), +// generator.clone(), +// )) +// }) +// .collect::, _>>()? +// .into_iter() +// .unzip(); + +// Ok(InitializedSetup { r1cs, witness_generator_types, max_rom_length: setup_data.max_rom_length +// }) } + +// /// Initializes a list of ROM circuits from the provided setup data. +// /// +// /// This function takes an `InitializedSetup` and creates a vector of +// /// `RomCircuit` instances. Each `RomCircuit` is constructed using the R1CS and +// /// witness generator types from the setup data, and is assigned a unique +// /// circuit index and the maximum ROM length. +// /// +// /// # Arguments +// /// +// /// * `setup_data` - The `InitializedSetup` containing the R1CS and witness generator types. +// /// +// /// # Returns +// /// +// /// A vector of `RomCircuit` instances initialized with the provided setup data. +// pub fn initialize_circuit_list(setup_data: &InitializedSetup) -> Vec { +// setup_data +// .r1cs +// .iter() +// .zip(setup_data.witness_generator_types.iter()) +// .enumerate() +// .map(|(i, (r1cs, generator))| { +// let circuit = circom::CircomCircuit { r1cs: r1cs.clone(), witness: None }; +// RomCircuit { +// circuit, +// circuit_index: i, +// rom_size: setup_data.max_rom_length, +// nivc_io: None, +// private_input: None, +// witness_generator_type: generator.clone(), +// } +// }) +// .collect::>() +// } diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 5d5759a..9dee833 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -29,10 +29,7 @@ use client_side_prover::{ traits::{Dual, Engine}, }; -use crate::{ - errors::ProofError, program, program::data::R1CSType, AuxParams, ProverKey, UninitializedSetup, - WitnessGeneratorType, E1, S1, S2, -}; +use crate::{error::ProofError, program, AuxParams, ProverKey, E1, S1, S2}; /// Proving parameters #[derive(Debug)] @@ -102,29 +99,29 @@ impl ProvingParams { } } -/// Create a setup for a given list of R1CS files including the necessary -/// setup for compressed proving. -/// -/// # Arguments -/// - `r1cs_files`: A list of r1cs files that are accessible by the program using the setup -/// -/// # Returns -/// * `Result, ProofError>` - Bytes ready to be written to disk -pub fn setup(r1cs_files: &[R1CSType], rom_length: usize) -> Vec { - let setup_data = UninitializedSetup { - r1cs_types: r1cs_files.to_vec(), - witness_generator_types: vec![WitnessGeneratorType::Browser; r1cs_files.len()], - max_rom_length: rom_length, - }; - - let public_params = program::setup(&setup_data); - let (pk, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); - let (_, aux_params) = public_params.into_parts(); - - ProvingParams { - aux_params, - vk_digest_primary: pk.pk_primary.vk_digest, - vk_digest_secondary: pk.pk_secondary.vk_digest, - } - .to_bytes() -} +// /// Create a setup for a given list of R1CS files including the necessary +// /// setup for compressed proving. +// /// +// /// # Arguments +// /// - `r1cs_files`: A list of r1cs files that are accessible by the program using the setup +// /// +// /// # Returns +// /// * `Result, ProofError>` - Bytes ready to be written to disk +// pub fn setup(r1cs_files: &[R1CSType], rom_length: usize) -> Vec { +// let setup_data = UninitializedSetup { +// r1cs_types: r1cs_files.to_vec(), +// witness_generator_types: vec![WitnessGeneratorType::Browser; r1cs_files.len()], +// max_rom_length: rom_length, +// }; + +// let public_params = program::setup(&setup_data); +// let (pk, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); +// let (_, aux_params) = public_params.into_parts(); + +// ProvingParams { +// aux_params, +// vk_digest_primary: pk.pk_primary.vk_digest, +// vk_digest_secondary: pk.pk_secondary.vk_digest, +// } +// .to_bytes() +// } diff --git a/frontend/tests/examples/circuit_data/add_external.bin b/frontend/tests/examples/circuit_data/add_external.bin deleted file mode 100644 index 8e7d5348de58af7ffdd4cd8047581539d19aaf52..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 114 zcmXRf$t%`NFG?)P&@(VFWM_Z?CN2gRE+!5}5Wxf@m_Y;!hY}kHlK`UxGlan;!J@^* v%D~FT$Rs4d#gST3l3J9Pm?Ol(!7RWe#LvZET#{N4pP2^~U=(1ohnfulGzty8 diff --git a/frontend/tests/examples/circuit_data/add_external.circom b/frontend/tests/examples/circuit_data/add_external.circom deleted file mode 100644 index 137d8e2..0000000 --- a/frontend/tests/examples/circuit_data/add_external.circom +++ /dev/null @@ -1,13 +0,0 @@ -pragma circom 2.1.9; - -template AddIntoZeroth() { - signal input step_in[2]; - signal input external[2]; - - signal output step_out[2]; - - step_out[0] <== step_in[0] + external[0]; - step_out[1] <== step_in[1] + external[1]; -} - -component main {public [step_in] } = AddIntoZeroth(); \ No newline at end of file diff --git a/frontend/tests/examples/circuit_data/add_external.r1cs b/frontend/tests/examples/circuit_data/add_external.r1cs deleted file mode 100644 index 5c71f869f325dd4475b09be6efa20556d2be4327..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 152 zcmXRiOfF_*U|?VdVkRJl0g#vjl%oKof&34Xzdm%HSg^Cw*J SS`eR^C&a?RD8Qrz)dT>kxCs*g diff --git a/frontend/tests/examples/circuit_data/square_zeroth.circom b/frontend/tests/examples/circuit_data/square_zeroth.circom deleted file mode 100644 index 703a2f5..0000000 --- a/frontend/tests/examples/circuit_data/square_zeroth.circom +++ /dev/null @@ -1,12 +0,0 @@ -pragma circom 2.1.9; - -template SquareZeroth() { - signal input step_in[2]; - - signal output step_out[2]; - - step_out[0] <== step_in[0] * step_in[0]; - step_out[1] <== step_in[1]; -} - -component main { public [step_in] } = SquareZeroth(); \ No newline at end of file diff --git a/frontend/tests/examples/circuit_data/square_zeroth.r1cs b/frontend/tests/examples/circuit_data/square_zeroth.r1cs deleted file mode 100644 index ebfa91bbc5623e36315e8acf62c1c90e2af27611..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 356 zcmXRiOfF_*U|?VdVkRIy1H>Qz69Y0oO#b@Nd1ArNN{<)D8nF?LjoVxUc4#gzd|2d{ zVgOSIqVWNcJX8%{^I&Fx+ycTtfLA>WPysP^JAh0A0tFxjs>f?4D^LxR{R|*J2!O;u W;iLiLAOV>DQ1^n>fTTfg0|5Zm1UwS} diff --git a/frontend/tests/examples/circuit_data/swap_memory.bin b/frontend/tests/examples/circuit_data/swap_memory.bin deleted file mode 100644 index 3da9b5c8f6a2cc05b843b5702bed3ba787451b27..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 68 zcmXRf$t%`NFG?)P&@(VFWM+T>CN2gRE+!5}5WytD#md0M$jBtb&&6I`l3Ea-nJ2`; L!6?9_3{?#Pc@zk< diff --git a/frontend/tests/examples/circuit_data/swap_memory.circom b/frontend/tests/examples/circuit_data/swap_memory.circom deleted file mode 100644 index 68604d9..0000000 --- a/frontend/tests/examples/circuit_data/swap_memory.circom +++ /dev/null @@ -1,12 +0,0 @@ -pragma circom 2.1.9; - -template SwapMemory() { - signal input step_in[2]; - - signal output step_out[2]; - - step_out[0] <== step_in[1]; - step_out[1] <== step_in[0]; -} - -component main {public [step_in] } = SwapMemory(); \ No newline at end of file diff --git a/frontend/tests/examples/circuit_data/swap_memory.r1cs b/frontend/tests/examples/circuit_data/swap_memory.r1cs deleted file mode 100644 index ba2766164c608b830fa07d38817318f081255344..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 320 zcmXRiOfF_*U|?VdVkRJ70mR4vBnFZPf)A6wK6IW~u(Q(Rg|S9#L}TMN*MJ?G3k)9? z`K1`J02M&hffeH;KxTq45a2Zv::from(3)]); -use client_side_prover::supernova::RecursiveSNARK; + let rom_circuit = NoirRomCircuit { circuit, circuit_index: 0, rom_size: 2 }; -use super::*; -use crate::program::{ - data::{CircuitData, NotExpanded, ProofParams, SetupParams}, - initialize_setup_data, -}; + let memory = NoirMemory { + circuits: vec![rom_circuit], + rom: vec![0, 0], + public_input: vec![ + F::::from(1), // Actual input + F::::from(2), // Actual input + F::::from(0), // PC + F::::from(0), // ROM + F::::from(0), // ROM + ], + }; -pub(crate) mod inputs; -mod witnesscalc; + let snark = run(&memory).unwrap(); + let zi = snark.zi_primary(); + dbg!(zi); + // First fold: + // step_out[0] == 3 * 1 + 2 + 1 == 6 + // step_out[1] == (3 + 3) * 2 + 1 == 13 + // Second fold: + // step_out[0] == 3 * 6 + 13 + 1 == 32 + // step_out[1] == (3 + 3) * 13 + 6 == 84 + assert_eq!(zi[0], F::::from(32)); + assert_eq!(zi[1], F::::from(84)); + assert_eq!(zi[2], F::::from(2)); + assert_eq!(zi[3], F::::from(0)); + assert_eq!(zi[4], F::::from(0)); +} + +#[test] +#[traced_test] +fn test_mock_noir_nivc() { + let mut add_external = NoirProgram::new(ADD_EXTERNAL); + add_external.set_private_inputs(vec![F::::from(5), F::::from(7)]); + let add_external = + NoirRomCircuit { circuit: add_external, circuit_index: 0, rom_size: 3 }; + + // TODO: The issue is the private inputs need to be an empty vector or else this isn't computed at + // all. Be careful, this is insanely touchy and I hate that it is this way. + let mut square_zeroth = NoirProgram::new(SQUARE_ZEROTH); + square_zeroth.set_private_inputs(vec![]); + let square_zeroth = + NoirRomCircuit { circuit: square_zeroth, circuit_index: 1, rom_size: 3 }; + let mut swap_memory = NoirProgram::new(SWAP_MEMORY); + swap_memory.set_private_inputs(vec![]); + let swap_memory = + NoirRomCircuit { circuit: swap_memory, circuit_index: 2, rom_size: 3 }; + + let memory = NoirMemory { + circuits: vec![add_external, square_zeroth, swap_memory], + rom: vec![0, 1, 2], + public_input: vec![ + F::::from(1), // Actual input + F::::from(2), // Actual input + F::::from(0), // PC + F::::from(0), // ROM + F::::from(1), // ROM + F::::from(2), // ROM + ], + }; + + let snark = run(&memory).unwrap(); + let zi = snark.zi_primary(); + dbg!(zi); + // First fold: + // step_out[0] == 1 + 5 == 6 + // step_out[1] == 2 + 7 == 9 + // Second fold: + // step_out[0] == 6 ** 2 == 36 + // step_out[1] == 9 + // Third fold: + // step_out[0] == 9 + // step_out[1] == 36 + assert_eq!(zi[0], F::::from(9)); + assert_eq!(zi[1], F::::from(36)); + assert_eq!(zi[2], F::::from(3)); + assert_eq!(zi[3], F::::from(0)); + assert_eq!(zi[4], F::::from(1)); + assert_eq!(zi[5], F::::from(2)); +} diff --git a/frontend/tests/witnesscalc.rs b/frontend/tests/witnesscalc.rs deleted file mode 100644 index a06f627..0000000 --- a/frontend/tests/witnesscalc.rs +++ /dev/null @@ -1,152 +0,0 @@ -use std::{str::FromStr, sync::Arc}; - -use serde_json::json; - -use super::*; -use crate::{ - program::data::{R1CSType, UninitializedSetup, WitnessGeneratorType}, - tests::inputs::{ - ADD_EXTERNAL_GRAPH, ADD_EXTERNAL_R1CS, EXTERNAL_INPUTS, SQUARE_ZEROTH_GRAPH, - SQUARE_ZEROTH_R1CS, SWAP_MEMORY_GRAPH, SWAP_MEMORY_R1CS, - }, -}; - -const MAX_ROM_LENGTH: usize = 10; -const TEST_OFFLINE_PATH: &str = "src/tests/test_run_serialized_verify.bytes"; - -fn get_setup_data() -> UninitializedSetup { - UninitializedSetup { - r1cs_types: vec![ - R1CSType::Raw(ADD_EXTERNAL_R1CS.to_vec()), - R1CSType::Raw(SQUARE_ZEROTH_R1CS.to_vec()), - R1CSType::Raw(SWAP_MEMORY_R1CS.to_vec()), - ], - witness_generator_types: vec![ - WitnessGeneratorType::Raw(ADD_EXTERNAL_GRAPH.to_vec()), - WitnessGeneratorType::Raw(SQUARE_ZEROTH_GRAPH.to_vec()), - WitnessGeneratorType::Raw(SWAP_MEMORY_GRAPH.to_vec()), - ], - max_rom_length: MAX_ROM_LENGTH, - } -} - -async fn run_entry( - setup_data: UninitializedSetup, -) -> Result<(SetupParams, RecursiveSNARK), ProofError> { - let mut external_input0: HashMap = HashMap::new(); - external_input0.insert("external".to_string(), json!(EXTERNAL_INPUTS[0])); - let mut external_input1: HashMap = HashMap::new(); - external_input1.insert("external".to_string(), json!(EXTERNAL_INPUTS[1])); - let rom_data = HashMap::from([ - (String::from("ADD_EXTERNAL"), CircuitData { opcode: 0 }), - (String::from("SQUARE_ZEROTH"), CircuitData { opcode: 1 }), - (String::from("SWAP_MEMORY"), CircuitData { opcode: 2 }), - ]); - - let mut private_inputs = vec![]; - - let mut rom = vec![String::from("ADD_EXTERNAL")]; - private_inputs.push(external_input0); - - rom.push(String::from("SQUARE_ZEROTH")); - private_inputs.push(HashMap::new()); - - rom.push(String::from("SWAP_MEMORY")); - private_inputs.push(HashMap::new()); - - rom.push(String::from("ADD_EXTERNAL")); - private_inputs.push(external_input1); - - rom.push(String::from("SQUARE_ZEROTH")); - private_inputs.push(HashMap::new()); - - rom.push(String::from("SWAP_MEMORY")); - private_inputs.push(HashMap::new()); - let public_params = program::setup(&setup_data); - let initialized_setup = initialize_setup_data(&setup_data)?; - - let setup_params = SetupParams:: { - public_params: Arc::new(public_params), - setup_data: Arc::new(initialized_setup), - rom_data, - vk_digest_primary: F::::ZERO, - vk_digest_secondary: F::::ZERO, - }; - let proof_params = ProofParams { rom }; - let instance_params = InstanceParams:: { - nivc_input: vec![F::::from(1), F::::from(2)], - private_inputs: (private_inputs, HashMap::new()), - } - .into_expanded(&proof_params)?; - let recursive_snark = program::run(&setup_params, &proof_params, &instance_params).await?; - Ok((setup_params, recursive_snark)) -} - -#[tokio::test] -#[tracing_test::traced_test] -async fn test_run() { - let setup_data = get_setup_data(); - let (_, proof) = run_entry(setup_data).await.unwrap(); - // [1,2] + [5,7] - // --> [6,9] - // --> [36,9] - // --> [9,36] + [13,1] - // --> [22,37] - // --> [484,37] - // [37,484] - let final_mem = [ - F::::from(37), - F::::from(484), - F::::from(6), - F::::from(0), - F::::from(1), - F::::from(2), - F::::from(0), - F::::from(1), - F::::from(2), - F::::from(u64::MAX), - F::::from(u64::MAX), - F::::from(u64::MAX), - F::::from(u64::MAX), - ]; - assert_eq!(&final_mem.to_vec(), proof.zi_primary()); -} - -#[tokio::test] -#[tracing_test::traced_test] -async fn test_run_serialized_verify() { - let setup_data = get_setup_data(); - let (instance_params, recursive_snark) = run_entry(setup_data.clone()).await.unwrap(); - - // Pseudo-offline the `SetupParams` and regenerate it - let mut setup_params = - instance_params.into_offline(PathBuf::from_str(TEST_OFFLINE_PATH).unwrap()).unwrap(); - setup_params.setup_data = setup_data.clone(); - let setup_params = setup_params.into_online().unwrap(); - - // Create the compressed proof with the offlined `PublicParams` - let proof = program::compress_proof(&recursive_snark, &setup_params.public_params).unwrap(); - let serialized_compressed_proof = proof.serialize().unwrap(); - let proof = serialized_compressed_proof.deserialize().unwrap(); - - // Extend the initial state input with the ROM (happens internally inside `program::run`, so - // we do it out here just for the test) - let mut z0_primary = vec![F::::ONE, F::::from(2)]; - z0_primary.push(F::::ZERO); - let mut rom = vec![ - F::::ZERO, - F::::ONE, - F::::from(2), - F::::ZERO, - F::::ONE, - F::::from(2), - ]; - rom.resize(MAX_ROM_LENGTH, F::::from(u64::MAX)); - z0_primary.extend_from_slice(&rom); - - // Check that it verifies with offlined `PublicParams` regenerated pkey vkey - let (_pk, vk) = CompressedSNARK::::setup(&setup_params.public_params).unwrap(); - let res = proof.proof.verify(&setup_params.public_params, &vk, &z0_primary, &[F::::ZERO]); - assert!(res.is_ok()); - std::fs::remove_file(PathBuf::from_str(TEST_OFFLINE_PATH).unwrap()).unwrap(); -} From 23226f230fad3d86ea7734d9bd7c4ae6ed57115a Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 16:22:24 -0700 Subject: [PATCH 10/51] cleanup --- examples/add_external/src/main.nr | 4 +- .../add_external/target/add_external.json | 2 +- examples/square_zeroth/src/main.nr | 2 +- .../square_zeroth/target/square_zeroth.json | 1 + examples/swap_memory/src/main.nr | 2 +- examples/swap_memory/target/swap_memory.json | 2 +- frontend/src/noir.rs | 60 +++++++++++-- frontend/tests/ivc/mod.rs | 88 +++++++++++++++++++ frontend/tests/lib.rs | 22 +++++ frontend/tests/mod.rs | 87 ------------------ 10 files changed, 171 insertions(+), 99 deletions(-) create mode 100644 examples/square_zeroth/target/square_zeroth.json create mode 100644 frontend/tests/ivc/mod.rs create mode 100644 frontend/tests/lib.rs delete mode 100644 frontend/tests/mod.rs diff --git a/examples/add_external/src/main.nr b/examples/add_external/src/main.nr index f3b1e05..091a944 100644 --- a/examples/add_external/src/main.nr +++ b/examples/add_external/src/main.nr @@ -1,8 +1,8 @@ /// Add two external values to two registers that are folded across circuits. pub fn main( external: [Field; 2], - registers: [Field; 2], - next_pc: Field, + registers: pub [Field; 2], + next_pc: pub Field, ) -> pub (Field, [Field; 2]) { (next_pc, [external[0] + registers[0], external[1] + registers[1]]) } diff --git a/examples/add_external/target/add_external.json b/examples/add_external/target/add_external.json index d3c7a02..3d40c5d 100644 --- a/examples/add_external/target/add_external.json +++ b/examples/add_external/target/add_external.json @@ -1 +1 @@ -{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":10862577844004020612,"abi":{"parameters":[{"name":"external","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"array","length":2,"type":{"kind":"field"}},{"kind":"field"}]},"visibility":"public"},"error_types":{"6485997221020871071":{"error_kind":"string","string":"call to assert_max_bit_size"},"17843811134343075018":{"error_kind":"string","string":"Stack too deep"}}},"bytecode":"H4sIAAAAAAAA/+1bXXLbNhBeiXJi2XF+bDmJf6I6sWtP00wLECRBTqcznmmSQ/SNEslz6CZ97gF6ox4kpkNEK5oEI2PXljLeGQ8lrfRh91tgdwmYPfgq8urvY/V6cPXnXf314KaYzy6rqxJREOTaz6WSqfCTSRyKIJxEsYxlGIeZHyuVx0Gsk0miRSIDlcsiTFQhvsoAYQk3UdjeTU6DS/B+DVe4idqkw5JDQn+5OBw24Ao3UUM6rGsbAcW5SajsxpjEnEjCBSbwAttCtpJPEMJAXhPQg+/LZsJNSMnGE2+7uj4pbeYge4uBjC1CMrgmWWljn9FvVxt3mDkUbiLLibnDEJunQJu1TDtT4s4qXGounjBx8YyJi2cWLlxt5poXv8C95pRph31sc+A9rHYeGFR+U+dSwnhLSg4Hla8U3WHXnMKYHPmbihPcYD0HS3co3ERSFkWcELHRS9osu8bZqeEbXNfE8CvQJgYjgyVjtkwj4+rzC1jNBEMZCzwvS39nsCjLxqeLc8r47CIsqdTV2si0LLJChTrxJzJSUVQEhY7iICvCIM10LoNU+UmuRSHjPNehmuqoSLJpVOCkLTOlgiyZTGXoR+lExJlKRRFo5Ys0UzrLVBxFqVJZFBdxEvt+WqhYhFonIvJV4nPFZ7chPtTbQ5Tx2WPiYY+Ahzbb8+JaSOfpiImHEQEPd7le95l42K94sDVLq9zQNJjLslf9EtawWcJGczRLL4G+WfoAd9Ms3dZOjiT3ihCLslmijAWel6W/M1gUj3gevQa6poSyweHi9DWw7ZopswtLFaNqDeVDQizKHd0DoC0iJkYHDjHqyke3jVHHsMGQEIsyRoeE3OEYHTrEqMunW8ZoWuhpkSRZmMSxzkSk40ki0yLLolznqU6XiVEnFmWMjgi5wzE6gnnTuu7HhsfV9Q00HBsKN5Ftx2eUZ7+uWIRHkJIj8NTFj5tDV6zxisejXDBjoL/x+Al4GrMSd1bhUnPxhomLEyYuToDvOJNrXvxGyMUt/O48zuSaA78z+y3c5Logl35TH2cSxltScvijHGceA21uMfIWGHfoCIviQkLERlPv0I1r+AbXNTEIoE0MRqiPM8eEPr+D1UwwlLHA87L0dwaLQn2cSRmfU6A7zsRJ23W3jys+p9B9bANLxqtDWP8DmqsonMEaFgVsNEdROAP6oiBhNYpCh/iUSednWM2iQBkLPC9Lf2ewKNS7Iuewmomci9NzmCfydX+e64LT4AugP9e6ILQRLAETbkL+lIYR6sVLidUn5G9jTfjrEWJ5hPw9YuKvT8wfYWKTj4E24ZtGZgP536s48JAez6vyuyX3pS27SP+4uv7z7/i/o//7fyPV9TObbboXFt2eRbdv0b2y6A4surFFd2LRvbPoziy6c4vuvUX3waILLLrIovvDovuzRWfmx2F1NXE2a8g0FZfVe+EmcohwqfFjEeghLAqx/WqIMBnwfYM/4MEXZo1/nC3iQ21c8yQTbsTMb4wO7w58qnTls+xPYf76OcIr5TMa12APa3jEcyJijpnca7Hf+FbKXzP4Jrcc98ZN+HbF8WgOvTD2oxrP2MadBp353TayezCj50zH890Ur8LfQONjGSA9/r6pAVvIfnN1qdeFTmWh0iIN0ywLpuluDR8QhyVPpjY85E2rKM41Lip+GNd4Y95sWu+2vNmVGz8h7F5N97lhXE6fr+ZEyFyL/D1orz93kTdNr+g12FHPJx60xxwaPutBe+6tj+E14Bm/PbgZgzbbmniEhs+abPM6bBs02FbH7Vv8aBsH21SvTf0Ge9a9bp1W7++7bpn7nYe6ZZWHfh8e+v2a3Hu/fzyHhvoeUFvN2ah91+yvlPcIoxa877l/8Fp+h/cT6hgOnNzgqMmOXoMd61433lbv77tuGJ7MdR/o+RKobox48L/lRSz76PWo5ifOfZdENhg8vC9bl35NZ767WbOvR2+frNviNYxlxOzz4lxi+PwC+7s5zGlSAAA=","debug_symbols":"7ZzbbuIwEED/Jc95sD3jG7+yWlXcWiEhQFxWWiH+fRNUJ2ygoSYGu5O8NTCZOieO7WPsHLPZfHL4eFus3te7bPTrmC3X0/F+sV4VR8eMnT/abcar8mi3H2/32UgwlWfz1az4i6tTnr0vlvNspMQpvwq1YPAz1iLTVTC3N4JBMfkZDIpDHWxuBQsULliguQz+nWf8h5Zb/NByQ7rlBu0yA1jZKDemW27kVblR2PZgYcBlFgb1nWCOWF8fbxCRfSQiOLhgAao9WGoXK629vLwCnhrgPQ5PD/Aeh2d6CU/aCp4Rj8OzA7zH4fGEx4jPoweau2Cwujn6THj4GQtJwiPbWEgSHjQ/D4lkTiOERN4ezKVxmbli0OSHA79O/Ho51Pfip8E9v1xL7NBH9lIM4qDupUbEQd1L6fBDXeHj2nbQO95LRYmCWpDxmXrwaQy8aB5CkFGfKPTIWJIPvSidlyBjX+mjxgH1qzovMk6XPmoqTmexmgi29yaC22cUBBX3CoiEiiN5IQnWc1DRnij0gIrJxKFHxWT86AXqHYGKycShl7KcWOaCJZMhp3tCWTTgQK8DvZQV4mn0oogxpKwQxFCnrCaJoA7VeaWsPMRQp+xHXqh9pnta1y9jytLzNCShOn9MWXrSp5ey9KRPj4z0eNELtAwXcaDXgR4Z6fGh17o8FcnISTgkZCQiHBIyg/2n/eDY+vsQ9nIEH46f7OVwP8oUiuylG8RB3UuRiIO6l9YRZbZK4oD6Vaip+Ey4lTiSis8ERELFZ6IsEJFU1CcOPSriE4WeoqI9UZbXKComE4feXTmxl/TKM8D7DPQ7oziYbBfL5eLjrfn+suMXW0I5Z7rqGzmz5r/LPJ3fIfbluYxXzzGDO3dJM1dXNBeNHvj2Fspw6W339OBqgkbbSH97q5xnejc61upq7TTvnt64B0RfTV3e3mwVLj10Tm+Eq+8Gm7f29qYaz/Su/hupmull9/Ta3VpzDUcFTG8b9f50fo9ey3Mv6sfefK9Kc6ybCnWn7bzTsBRHf8bbxXiynJdNVPnlYTV1LVZxuP+7cd+4Nm2zXU/ns8N2XrZuFw1bWVjBciHr9qr8CFgOssZwjjI5QPHPiwL8Aw==","file_map":{"18":{"source":"use crate::field::field_less_than;\nuse crate::runtime::is_unconstrained;\n\n// The low and high decomposition of the field modulus\nglobal PLO: Field = 53438638232309528389504892708671455233;\nglobal PHI: Field = 64323764613183177041862057485226039389;\n\npub(crate) global TWO_POW_128: Field = 0x100000000000000000000000000000000;\nglobal TWO_POW_64: Field = 0x10000000000000000;\n\n// Decomposes a single field into two 16 byte fields.\nfn compute_decomposition(mut x: Field) -> (Field, Field) {\n // Here's we're taking advantage of truncating 64 bit limbs from the input field\n // and then subtracting them from the input such the field division is equivalent to integer division.\n let low_lower_64 = (x as u64) as Field;\n x = (x - low_lower_64) / TWO_POW_64;\n let low_upper_64 = (x as u64) as Field;\n\n let high = (x - low_upper_64) / TWO_POW_64;\n let low = low_upper_64 * TWO_POW_64 + low_lower_64;\n\n (low, high)\n}\n\npub(crate) unconstrained fn decompose_hint(x: Field) -> (Field, Field) {\n compute_decomposition(x)\n}\n\nunconstrained fn lte_hint(x: Field, y: Field) -> bool {\n if x == y {\n true\n } else {\n field_less_than(x, y)\n }\n}\n\n// Assert that (alo > blo && ahi >= bhi) || (alo <= blo && ahi > bhi)\nfn assert_gt_limbs(a: (Field, Field), b: (Field, Field)) {\n let (alo, ahi) = a;\n let (blo, bhi) = b;\n /// Safety: borrow is enforced to be boolean due to its type.\n /// if borrow is 0, it asserts that (alo > blo && ahi >= bhi)\n /// if borrow is 1, it asserts that (alo <= blo && ahi > bhi)\n unsafe {\n let borrow = lte_hint(alo, blo);\n\n let rlo = alo - blo - 1 + (borrow as Field) * TWO_POW_128;\n let rhi = ahi - bhi - (borrow as Field);\n\n rlo.assert_max_bit_size::<128>();\n rhi.assert_max_bit_size::<128>();\n }\n}\n\n/// Decompose a single field into two 16 byte fields.\npub fn decompose(x: Field) -> (Field, Field) {\n if is_unconstrained() {\n compute_decomposition(x)\n } else {\n /// Safety: decomposition is properly checked below\n unsafe {\n // Take hints of the decomposition\n let (xlo, xhi) = decompose_hint(x);\n\n // Range check the limbs\n xlo.assert_max_bit_size::<128>();\n xhi.assert_max_bit_size::<128>();\n\n // Check that the decomposition is correct\n assert_eq(x, xlo + TWO_POW_128 * xhi);\n\n // Assert that the decomposition of P is greater than the decomposition of x\n assert_gt_limbs((PLO, PHI), (xlo, xhi));\n (xlo, xhi)\n }\n }\n}\n\npub fn assert_gt(a: Field, b: Field) {\n if is_unconstrained() {\n assert(\n /// Safety: already unconstrained\n unsafe { field_less_than(b, a) },\n );\n } else {\n // Decompose a and b\n let a_limbs = decompose(a);\n let b_limbs = decompose(b);\n\n // Assert that a_limbs is greater than b_limbs\n assert_gt_limbs(a_limbs, b_limbs)\n }\n}\n\npub fn assert_lt(a: Field, b: Field) {\n assert_gt(b, a);\n}\n\npub fn gt(a: Field, b: Field) -> bool {\n if is_unconstrained() {\n /// Safety: unsafe in unconstrained\n unsafe {\n field_less_than(b, a)\n }\n } else if a == b {\n false\n } else {\n /// Safety: Take a hint of the comparison and verify it\n unsafe {\n if field_less_than(a, b) {\n assert_gt(b, a);\n false\n } else {\n assert_gt(a, b);\n true\n }\n }\n }\n}\n\npub fn lt(a: Field, b: Field) -> bool {\n gt(b, a)\n}\n\nmod tests {\n // TODO: Allow imports from \"super\"\n use crate::field::bn254::{assert_gt, decompose, gt, lte_hint, PHI, PLO, TWO_POW_128};\n\n #[test]\n fn check_decompose() {\n assert_eq(decompose(TWO_POW_128), (0, 1));\n assert_eq(decompose(TWO_POW_128 + 0x1234567890), (0x1234567890, 1));\n assert_eq(decompose(0x1234567890), (0x1234567890, 0));\n }\n\n #[test]\n unconstrained fn check_decompose_unconstrained() {\n assert_eq(decompose(TWO_POW_128), (0, 1));\n assert_eq(decompose(TWO_POW_128 + 0x1234567890), (0x1234567890, 1));\n assert_eq(decompose(0x1234567890), (0x1234567890, 0));\n }\n\n #[test]\n unconstrained fn check_lte_hint() {\n assert(lte_hint(0, 1));\n assert(lte_hint(0, 0x100));\n assert(lte_hint(0x100, TWO_POW_128 - 1));\n assert(!lte_hint(0 - 1, 0));\n\n assert(lte_hint(0, 0));\n assert(lte_hint(0x100, 0x100));\n assert(lte_hint(0 - 1, 0 - 1));\n }\n\n #[test]\n fn check_assert_gt() {\n assert_gt(1, 0);\n assert_gt(0x100, 0);\n assert_gt((0 - 1), (0 - 2));\n assert_gt(TWO_POW_128, 0);\n assert_gt(0 - 1, 0);\n }\n\n #[test]\n unconstrained fn check_assert_gt_unconstrained() {\n assert_gt(1, 0);\n assert_gt(0x100, 0);\n assert_gt((0 - 1), (0 - 2));\n assert_gt(TWO_POW_128, 0);\n assert_gt(0 - 1, 0);\n }\n\n #[test]\n fn check_gt() {\n assert(gt(1, 0));\n assert(gt(0x100, 0));\n assert(gt((0 - 1), (0 - 2)));\n assert(gt(TWO_POW_128, 0));\n assert(!gt(0, 0));\n assert(!gt(0, 0x100));\n assert(gt(0 - 1, 0 - 2));\n assert(!gt(0 - 2, 0 - 1));\n }\n\n #[test]\n unconstrained fn check_gt_unconstrained() {\n assert(gt(1, 0));\n assert(gt(0x100, 0));\n assert(gt((0 - 1), (0 - 2)));\n assert(gt(TWO_POW_128, 0));\n assert(!gt(0, 0));\n assert(!gt(0, 0x100));\n assert(gt(0 - 1, 0 - 2));\n assert(!gt(0 - 2, 0 - 1));\n }\n\n #[test]\n fn check_plo_phi() {\n assert_eq(PLO + PHI * TWO_POW_128, 0);\n let p_bytes = crate::field::modulus_le_bytes();\n let mut p_low: Field = 0;\n let mut p_high: Field = 0;\n\n let mut offset = 1;\n for i in 0..16 {\n p_low += (p_bytes[i] as Field) * offset;\n p_high += (p_bytes[i + 16] as Field) * offset;\n offset *= 256;\n }\n assert_eq(p_low, PLO);\n assert_eq(p_high, PHI);\n }\n}\n","path":"std/field/bn254.nr"},"19":{"source":"pub mod bn254;\nuse crate::{runtime::is_unconstrained, static_assert};\nuse bn254::lt as bn254_lt;\n\nimpl Field {\n /// Asserts that `self` can be represented in `bit_size` bits.\n ///\n /// # Failures\n /// Causes a constraint failure for `Field` values exceeding `2^{bit_size}`.\n // docs:start:assert_max_bit_size\n pub fn assert_max_bit_size(self) {\n // docs:end:assert_max_bit_size\n static_assert(\n BIT_SIZE < modulus_num_bits() as u32,\n \"BIT_SIZE must be less than modulus_num_bits\",\n );\n self.__assert_max_bit_size(BIT_SIZE);\n }\n\n #[builtin(apply_range_constraint)]\n fn __assert_max_bit_size(self, bit_size: u32) {}\n\n /// Decomposes `self` into its little endian bit decomposition as a `[u1; N]` array.\n /// This slice will be zero padded should not all bits be necessary to represent `self`.\n ///\n /// # Failures\n /// Causes a constraint failure for `Field` values exceeding `2^N` as the resulting slice will not\n /// be able to represent the original `Field`.\n ///\n /// # Safety\n /// Values of `N` equal to or greater than the number of bits necessary to represent the `Field` modulus\n /// (e.g. 254 for the BN254 field) allow for multiple bit decompositions. This is due to how the `Field` will\n /// wrap around due to overflow when verifying the decomposition.\n #[builtin(to_le_bits)]\n fn _to_le_bits(self: Self) -> [u1; N] {}\n\n /// Decomposes `self` into its big endian bit decomposition as a `[u1; N]` array.\n /// This array will be zero padded should not all bits be necessary to represent `self`.\n ///\n /// # Failures\n /// Causes a constraint failure for `Field` values exceeding `2^N` as the resulting slice will not\n /// be able to represent the original `Field`.\n ///\n /// # Safety\n /// Values of `N` equal to or greater than the number of bits necessary to represent the `Field` modulus\n /// (e.g. 254 for the BN254 field) allow for multiple bit decompositions. This is due to how the `Field` will\n /// wrap around due to overflow when verifying the decomposition.\n #[builtin(to_be_bits)]\n fn _to_be_bits(self: Self) -> [u1; N] {}\n\n /// Decomposes `self` into its little endian bit decomposition as a `[u1; N]` array.\n /// This slice will be zero padded should not all bits be necessary to represent `self`.\n ///\n /// # Failures\n /// Causes a constraint failure for `Field` values exceeding `2^N` as the resulting slice will not\n /// be able to represent the original `Field`.\n ///\n /// # Safety\n /// The bit decomposition returned is canonical and is guaranteed to not overflow the modulus.\n // docs:start:to_le_bits\n pub fn to_le_bits(self: Self) -> [u1; N] {\n // docs:end:to_le_bits\n let bits = self._to_le_bits();\n\n if !is_unconstrained() {\n // Ensure that the byte decomposition does not overflow the modulus\n let p = modulus_le_bits();\n assert(bits.len() <= p.len());\n let mut ok = bits.len() != p.len();\n for i in 0..N {\n if !ok {\n if (bits[N - 1 - i] != p[N - 1 - i]) {\n assert(p[N - 1 - i] == 1);\n ok = true;\n }\n }\n }\n assert(ok);\n }\n bits\n }\n\n /// Decomposes `self` into its big endian bit decomposition as a `[u1; N]` array.\n /// This array will be zero padded should not all bits be necessary to represent `self`.\n ///\n /// # Failures\n /// Causes a constraint failure for `Field` values exceeding `2^N` as the resulting slice will not\n /// be able to represent the original `Field`.\n ///\n /// # Safety\n /// The bit decomposition returned is canonical and is guaranteed to not overflow the modulus.\n // docs:start:to_be_bits\n pub fn to_be_bits(self: Self) -> [u1; N] {\n // docs:end:to_be_bits\n let bits = self._to_be_bits();\n\n if !is_unconstrained() {\n // Ensure that the decomposition does not overflow the modulus\n let p = modulus_be_bits();\n assert(bits.len() <= p.len());\n let mut ok = bits.len() != p.len();\n for i in 0..N {\n if !ok {\n if (bits[i] != p[i]) {\n assert(p[i] == 1);\n ok = true;\n }\n }\n }\n assert(ok);\n }\n bits\n }\n\n /// Decomposes `self` into its little endian byte decomposition as a `[u8;N]` array\n /// This array will be zero padded should not all bytes be necessary to represent `self`.\n ///\n /// # Failures\n /// The length N of the array must be big enough to contain all the bytes of the 'self',\n /// and no more than the number of bytes required to represent the field modulus\n ///\n /// # Safety\n /// The result is ensured to be the canonical decomposition of the field element\n // docs:start:to_le_bytes\n pub fn to_le_bytes(self: Self) -> [u8; N] {\n // docs:end:to_le_bytes\n static_assert(\n N <= modulus_le_bytes().len(),\n \"N must be less than or equal to modulus_le_bytes().len()\",\n );\n // Compute the byte decomposition\n let bytes = self.to_le_radix(256);\n\n if !is_unconstrained() {\n // Ensure that the byte decomposition does not overflow the modulus\n let p = modulus_le_bytes();\n assert(bytes.len() <= p.len());\n let mut ok = bytes.len() != p.len();\n for i in 0..N {\n if !ok {\n if (bytes[N - 1 - i] != p[N - 1 - i]) {\n assert(bytes[N - 1 - i] < p[N - 1 - i]);\n ok = true;\n }\n }\n }\n assert(ok);\n }\n bytes\n }\n\n /// Decomposes `self` into its big endian byte decomposition as a `[u8;N]` array of length required to represent the field modulus\n /// This array will be zero padded should not all bytes be necessary to represent `self`.\n ///\n /// # Failures\n /// The length N of the array must be big enough to contain all the bytes of the 'self',\n /// and no more than the number of bytes required to represent the field modulus\n ///\n /// # Safety\n /// The result is ensured to be the canonical decomposition of the field element\n // docs:start:to_be_bytes\n pub fn to_be_bytes(self: Self) -> [u8; N] {\n // docs:end:to_be_bytes\n static_assert(\n N <= modulus_le_bytes().len(),\n \"N must be less than or equal to modulus_le_bytes().len()\",\n );\n // Compute the byte decomposition\n let bytes = self.to_be_radix(256);\n\n if !is_unconstrained() {\n // Ensure that the byte decomposition does not overflow the modulus\n let p = modulus_be_bytes();\n assert(bytes.len() <= p.len());\n let mut ok = bytes.len() != p.len();\n for i in 0..N {\n if !ok {\n if (bytes[i] != p[i]) {\n assert(bytes[i] < p[i]);\n ok = true;\n }\n }\n }\n assert(ok);\n }\n bytes\n }\n\n // docs:start:to_le_radix\n pub fn to_le_radix(self: Self, radix: u32) -> [u8; N] {\n // Brillig does not need an immediate radix\n if !crate::runtime::is_unconstrained() {\n static_assert(1 < radix, \"radix must be greater than 1\");\n static_assert(radix <= 256, \"radix must be less than or equal to 256\");\n static_assert(radix & (radix - 1) == 0, \"radix must be a power of 2\");\n }\n self.__to_le_radix(radix)\n }\n // docs:end:to_le_radix\n\n // docs:start:to_be_radix\n pub fn to_be_radix(self: Self, radix: u32) -> [u8; N] {\n // Brillig does not need an immediate radix\n if !crate::runtime::is_unconstrained() {\n crate::assert_constant(radix);\n }\n self.__to_be_radix(radix)\n }\n // docs:end:to_be_radix\n\n // `_radix` must be less than 256\n #[builtin(to_le_radix)]\n fn __to_le_radix(self, radix: u32) -> [u8; N] {}\n\n // `_radix` must be less than 256\n #[builtin(to_be_radix)]\n fn __to_be_radix(self, radix: u32) -> [u8; N] {}\n\n // Returns self to the power of the given exponent value.\n // Caution: we assume the exponent fits into 32 bits\n // using a bigger bit size impacts negatively the performance and should be done only if the exponent does not fit in 32 bits\n pub fn pow_32(self, exponent: Field) -> Field {\n let mut r: Field = 1;\n let b: [u1; 32] = exponent.to_le_bits();\n\n for i in 1..33 {\n r *= r;\n r = (b[32 - i] as Field) * (r * self) + (1 - b[32 - i] as Field) * r;\n }\n r\n }\n\n // Parity of (prime) Field element, i.e. sgn0(x mod p) = 0 if x `elem` {0, ..., p-1} is even, otherwise sgn0(x mod p) = 1.\n pub fn sgn0(self) -> u1 {\n self as u1\n }\n\n pub fn lt(self, another: Field) -> bool {\n if crate::compat::is_bn254() {\n bn254_lt(self, another)\n } else {\n lt_fallback(self, another)\n }\n }\n\n /// Convert a little endian byte array to a field element.\n /// If the provided byte array overflows the field modulus then the Field will silently wrap around.\n pub fn from_le_bytes(bytes: [u8; N]) -> Field {\n static_assert(\n N <= modulus_le_bytes().len(),\n \"N must be less than or equal to modulus_le_bytes().len()\",\n );\n let mut v = 1;\n let mut result = 0;\n\n for i in 0..N {\n result += (bytes[i] as Field) * v;\n v = v * 256;\n }\n result\n }\n\n /// Convert a big endian byte array to a field element.\n /// If the provided byte array overflows the field modulus then the Field will silently wrap around.\n pub fn from_be_bytes(bytes: [u8; N]) -> Field {\n let mut v = 1;\n let mut result = 0;\n\n for i in 0..N {\n result += (bytes[N - 1 - i] as Field) * v;\n v = v * 256;\n }\n result\n }\n}\n\n#[builtin(modulus_num_bits)]\npub comptime fn modulus_num_bits() -> u64 {}\n\n#[builtin(modulus_be_bits)]\npub comptime fn modulus_be_bits() -> [u1] {}\n\n#[builtin(modulus_le_bits)]\npub comptime fn modulus_le_bits() -> [u1] {}\n\n#[builtin(modulus_be_bytes)]\npub comptime fn modulus_be_bytes() -> [u8] {}\n\n#[builtin(modulus_le_bytes)]\npub comptime fn modulus_le_bytes() -> [u8] {}\n\n/// An unconstrained only built in to efficiently compare fields.\n#[builtin(field_less_than)]\nunconstrained fn __field_less_than(x: Field, y: Field) -> bool {}\n\npub(crate) unconstrained fn field_less_than(x: Field, y: Field) -> bool {\n __field_less_than(x, y)\n}\n\n// Convert a 32 byte array to a field element by modding\npub fn bytes32_to_field(bytes32: [u8; 32]) -> Field {\n // Convert it to a field element\n let mut v = 1;\n let mut high = 0 as Field;\n let mut low = 0 as Field;\n\n for i in 0..16 {\n high = high + (bytes32[15 - i] as Field) * v;\n low = low + (bytes32[16 + 15 - i] as Field) * v;\n v = v * 256;\n }\n // Abuse that a % p + b % p = (a + b) % p and that low < p\n low + high * v\n}\n\nfn lt_fallback(x: Field, y: Field) -> bool {\n if is_unconstrained() {\n /// Safety: unconstrained context\n unsafe {\n field_less_than(x, y)\n }\n } else {\n let x_bytes: [u8; 32] = x.to_le_bytes();\n let y_bytes: [u8; 32] = y.to_le_bytes();\n let mut x_is_lt = false;\n let mut done = false;\n for i in 0..32 {\n if (!done) {\n let x_byte = x_bytes[32 - 1 - i] as u8;\n let y_byte = y_bytes[32 - 1 - i] as u8;\n let bytes_match = x_byte == y_byte;\n if !bytes_match {\n x_is_lt = x_byte < y_byte;\n done = true;\n }\n }\n }\n x_is_lt\n }\n}\n\nmod tests {\n use crate::{panic::panic, runtime};\n use super::field_less_than;\n\n #[test]\n // docs:start:to_be_bits_example\n fn test_to_be_bits() {\n let field = 2;\n let bits: [u1; 8] = field.to_be_bits();\n assert_eq(bits, [0, 0, 0, 0, 0, 0, 1, 0]);\n }\n // docs:end:to_be_bits_example\n\n #[test]\n // docs:start:to_le_bits_example\n fn test_to_le_bits() {\n let field = 2;\n let bits: [u1; 8] = field.to_le_bits();\n assert_eq(bits, [0, 1, 0, 0, 0, 0, 0, 0]);\n }\n // docs:end:to_le_bits_example\n\n #[test]\n // docs:start:to_be_bytes_example\n fn test_to_be_bytes() {\n let field = 2;\n let bytes: [u8; 8] = field.to_be_bytes();\n assert_eq(bytes, [0, 0, 0, 0, 0, 0, 0, 2]);\n assert_eq(Field::from_be_bytes::<8>(bytes), field);\n }\n // docs:end:to_be_bytes_example\n\n #[test]\n // docs:start:to_le_bytes_example\n fn test_to_le_bytes() {\n let field = 2;\n let bytes: [u8; 8] = field.to_le_bytes();\n assert_eq(bytes, [2, 0, 0, 0, 0, 0, 0, 0]);\n assert_eq(Field::from_le_bytes::<8>(bytes), field);\n }\n // docs:end:to_le_bytes_example\n\n #[test]\n // docs:start:to_be_radix_example\n fn test_to_be_radix() {\n // 259, in base 256, big endian, is [1, 3].\n // i.e. 3 * 256^0 + 1 * 256^1\n let field = 259;\n\n // The radix (in this example, 256) must be a power of 2.\n // The length of the returned byte array can be specified to be\n // >= the amount of space needed.\n let bytes: [u8; 8] = field.to_be_radix(256);\n assert_eq(bytes, [0, 0, 0, 0, 0, 0, 1, 3]);\n assert_eq(Field::from_be_bytes::<8>(bytes), field);\n }\n // docs:end:to_be_radix_example\n\n #[test]\n // docs:start:to_le_radix_example\n fn test_to_le_radix() {\n // 259, in base 256, little endian, is [3, 1].\n // i.e. 3 * 256^0 + 1 * 256^1\n let field = 259;\n\n // The radix (in this example, 256) must be a power of 2.\n // The length of the returned byte array can be specified to be\n // >= the amount of space needed.\n let bytes: [u8; 8] = field.to_le_radix(256);\n assert_eq(bytes, [3, 1, 0, 0, 0, 0, 0, 0]);\n assert_eq(Field::from_le_bytes::<8>(bytes), field);\n }\n // docs:end:to_le_radix_example\n\n #[test(should_fail_with = \"radix must be greater than 1\")]\n fn test_to_le_radix_1() {\n // this test should only fail in constrained mode\n if !runtime::is_unconstrained() {\n let field = 2;\n let _: [u8; 8] = field.to_le_radix(1);\n } else {\n panic(f\"radix must be greater than 1\");\n }\n }\n\n // TODO: Update this test to account for the Brillig restriction that the radix must be greater than 2\n //#[test]\n //fn test_to_le_radix_brillig_1() {\n // // this test should only fail in constrained mode\n // if runtime::is_unconstrained() {\n // let field = 1;\n // let out: [u8; 8] = field.to_le_radix(1);\n // crate::println(out);\n // let expected = [0; 8];\n // assert(out == expected, \"unexpected result\");\n // }\n //}\n\n #[test(should_fail_with = \"radix must be a power of 2\")]\n fn test_to_le_radix_3() {\n // this test should only fail in constrained mode\n if !runtime::is_unconstrained() {\n let field = 2;\n let _: [u8; 8] = field.to_le_radix(3);\n } else {\n panic(f\"radix must be a power of 2\");\n }\n }\n\n #[test]\n fn test_to_le_radix_brillig_3() {\n // this test should only fail in constrained mode\n if runtime::is_unconstrained() {\n let field = 1;\n let out: [u8; 8] = field.to_le_radix(3);\n let mut expected = [0; 8];\n expected[0] = 1;\n assert(out == expected, \"unexpected result\");\n }\n }\n\n #[test(should_fail_with = \"radix must be less than or equal to 256\")]\n fn test_to_le_radix_512() {\n // this test should only fail in constrained mode\n if !runtime::is_unconstrained() {\n let field = 2;\n let _: [u8; 8] = field.to_le_radix(512);\n } else {\n panic(f\"radix must be less than or equal to 256\")\n }\n }\n\n // TODO: Update this test to account for the Brillig restriction that the radix must be less than 512\n //#[test]\n //fn test_to_le_radix_brillig_512() {\n // // this test should only fail in constrained mode\n // if runtime::is_unconstrained() {\n // let field = 1;\n // let out: [u8; 8] = field.to_le_radix(512);\n // let mut expected = [0; 8];\n // expected[0] = 1;\n // assert(out == expected, \"unexpected result\");\n // }\n //}\n\n #[test]\n unconstrained fn test_field_less_than() {\n assert(field_less_than(0, 1));\n assert(field_less_than(0, 0x100));\n assert(field_less_than(0x100, 0 - 1));\n assert(!field_less_than(0 - 1, 0));\n }\n}\n","path":"std/field/mod.nr"},"62":{"source":"/// Add two external values to two registers that are folded across circuits.\npub fn main(\n external: [Field; 2],\n registers: [Field; 2],\n next_pc: Field,\n) -> pub ([Field; 2], Field) {\n assert(next_pc.lt(3));\n ([external[0] + registers[0], external[1] + registers[1]], next_pc)\n}\n","path":"/Users/autoparallel/Code/client-side-prover/examples/add_external/src/main.nr"}},"names":["main"],"brillig_names":["field_less_than","decompose_hint","lte_hint","directive_invert"]} \ No newline at end of file +{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":6155312096090522998,"abi":{"parameters":[{"name":"external","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"public"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WTSw7DIAxE+bVcx8YQ7F2vUlRy/yO0KCxQs4w3eRIyYjHSPGRrDuLveHPGzfmak2DLudfUkfANSRoXyKVtjIyFyycxUefMVZpUEMzUcS9COxyEJQuugQ+9LFg7e+XOmllO0d/zJv6sYpZX9BcV/Y2O4W/nBnbx6eY9LG9jB8Y/RnPmCzVzF/7cAwAA","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/examples/square_zeroth/src/main.nr b/examples/square_zeroth/src/main.nr index 41171cf..cf215bf 100644 --- a/examples/square_zeroth/src/main.nr +++ b/examples/square_zeroth/src/main.nr @@ -1,4 +1,4 @@ /// Square only the first register. -pub fn main(registers: [Field; 2], next_pc: Field) -> pub (Field, [Field; 2]) { +pub fn main(registers: pub [Field; 2], next_pc: pub Field) -> pub (Field, [Field; 2]) { (next_pc, [registers[0] * registers[0], registers[1]]) } diff --git a/examples/square_zeroth/target/square_zeroth.json b/examples/square_zeroth/target/square_zeroth.json new file mode 100644 index 0000000..83c2059 --- /dev/null +++ b/examples/square_zeroth/target/square_zeroth.json @@ -0,0 +1 @@ +{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":12964286917013245901,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"public"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WSUQ7DIAiGtdb7gEiFt11lZvb+R9hMXeLWx9IvITxA/vAD3h3ETwR3Zhn5MTLBlnMrqSHhE5JWYchcN0FBFn4lIWqSpWjVAoqZGu6stMPBMmnBNTDYaUHX8MZevzv814Vr4Grs+65be0PP0dBzn2t1v8y/78cuwlTr/dGdeQN8wPcJQAMAAA==","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/examples/swap_memory/src/main.nr b/examples/swap_memory/src/main.nr index 592af5c..5fca808 100644 --- a/examples/swap_memory/src/main.nr +++ b/examples/swap_memory/src/main.nr @@ -1,4 +1,4 @@ /// Swap the two registers. -pub fn main(registers: [Field; 2], next_pc: Field) -> pub (Field, [Field; 2]) { +pub fn main(registers: pub [Field; 2], next_pc: pub Field) -> pub (Field, [Field; 2]) { (next_pc, [registers[1], registers[0]]) } diff --git a/examples/swap_memory/target/swap_memory.json b/examples/swap_memory/target/swap_memory.json index ed95743..215f95c 100644 --- a/examples/swap_memory/target/swap_memory.json +++ b/examples/swap_memory/target/swap_memory.json @@ -1 +1 @@ -{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":6520664474973846018,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"array","length":2,"type":{"kind":"field"}},{"kind":"field"}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WSQQ6EMAhFW2vvAwUs7OYq01jvfwQ1dtHEpXXhSwgLyE8ewbuLeFRwd6bWf60TLMw1p4qEf0hWVIClLIqKorImJarKmq1YBkOmipsYbXDhuyx4BoZxWfCmsxvoPH/EeRroHAc6n/83t6z+3313g34W2n50d3Yk6PqGPAMAAA==","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file +{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":15239032000248702557,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"public"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WSMQ7DMAgAcRz/BwzEsPUrter8/wltFA+uMtYdchJiACEdEOAkfSLClaXnR8+Mm0gruRHTE7NXUxStm5GRmr6yMTcTK169oJNwo12ddzxZhln4GxTnzcJ/OoeJzutNnGGic5rofNxihW/Gvw99F3GoHf0JrrwBWeHc1TwDAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 66f5d22..3c9baaf 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use acvm::{ acir::{ acir_field::GenericFieldElement, @@ -24,16 +26,62 @@ use crate::program::SwitchboardWitness; // assigning inputs. #[derive(Clone, Serialize, Deserialize, Debug)] pub struct NoirProgram { + #[serde(rename = "noir_version")] + pub version: String, + pub hash: u64, + pub abi: NoirAbi, #[serde( serialize_with = "Program::serialize_program_base64", deserialize_with = "Program::deserialize_program_base64" )] - pub bytecode: Program>, - pub witness: Option, - pub index: usize, - // TODO: To make this more efficient, we could just store an option of the `&mut CS` inside of - // here so we don't actually need to rebuild it always, though the enforcement for the public - // inputs is tougher + pub bytecode: Program>, + pub debug_symbols: String, + pub file_map: HashMap, + pub names: Vec, + pub brillig_names: Vec, + #[serde(skip)] + pub witness: Option, + #[serde(skip)] + pub index: usize, +} + +#[derive(Clone, Serialize, Deserialize, Debug)] +pub struct NoirAbi { + pub parameters: Vec, + pub return_type: NoirReturnType, + pub error_types: HashMap, +} + +#[derive(Clone, Serialize, Deserialize, Debug)] +pub struct NoirParameter { + pub name: String, + #[serde(rename = "type")] + pub parameter_type: NoirType, + pub visibility: String, +} + +#[derive(Clone, Serialize, Deserialize, Debug)] +pub struct NoirReturnType { + pub abi_type: NoirType, + pub visibility: String, +} + +#[derive(Clone, Serialize, Deserialize, Debug)] +#[serde(untagged)] +pub enum NoirType { + Simple { + kind: String, + }, + Array { + kind: String, + length: usize, + #[serde(rename = "type")] + element_type: Box, + }, + Tuple { + kind: String, + fields: Vec, + }, } impl NoirProgram { diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs new file mode 100644 index 0000000..e48e7fc --- /dev/null +++ b/frontend/tests/ivc/mod.rs @@ -0,0 +1,88 @@ +use client_side_prover_frontend::program::{run, Switchboard, SwitchboardWitness}; + +use super::*; + +#[test] +#[traced_test] +fn test_mock_noir_ivc() { + let mut circuit = add_external(); + let witnesses = vec![ + SwitchboardWitness { witness: vec![F::::from(3), F::::from(3)], pc: 0 }, + SwitchboardWitness { witness: vec![F::::from(5), F::::from(7)], pc: 0 }, + SwitchboardWitness { witness: vec![F::::from(0), F::::from(2)], pc: 0 }, + ]; + + let memory = Switchboard { + circuits: vec![circuit], + public_input: vec![F::::from(1), F::::from(2)], + initial_circuit_index: 0, + witnesses, + }; + + let snark = run(&memory).unwrap(); + let zi = snark.zi_primary(); + dbg!(zi); + // First fold: + // step_out[0] == 3 * 1 + 2 + 1 == 6 + // step_out[1] == (3 + 3) * 2 + 1 == 13 + // Second fold: + // step_out[0] == 3 * 6 + 13 + 1 == 32 + // step_out[1] == (3 + 3) * 13 + 6 == 84 + // assert_eq!(zi[0], F::::from(32)); + // assert_eq!(zi[1], F::::from(84)); + // assert_eq!(zi[2], F::::from(2)); + // assert_eq!(zi[3], F::::from(0)); + // assert_eq!(zi[4], F::::from(0)); +} + +// #[test] +// #[traced_test] +// fn test_mock_noir_nivc() { +// let mut add_external = NoirProgram::new(ADD_EXTERNAL); +// add_external.set_private_inputs(vec![F::::from(5), F::::from(7)]); +// let add_external = +// NoirRomCircuit { circuit: add_external, circuit_index: 0, rom_size: 3 }; + +// // TODO: The issue is the private inputs need to be an empty vector or else this isn't computed +// at // all. Be careful, this is insanely touchy and I hate that it is this way. +// let mut square_zeroth = NoirProgram::new(SQUARE_ZEROTH); +// square_zeroth.set_private_inputs(vec![]); +// let square_zeroth = +// NoirRomCircuit { circuit: square_zeroth, circuit_index: 1, rom_size: 3 }; +// let mut swap_memory = NoirProgram::new(SWAP_MEMORY); +// swap_memory.set_private_inputs(vec![]); +// let swap_memory = +// NoirRomCircuit { circuit: swap_memory, circuit_index: 2, rom_size: 3 }; + +// let memory = NoirMemory { +// circuits: vec![add_external, square_zeroth, swap_memory], +// rom: vec![0, 1, 2], +// public_input: vec![ +// F::::from(1), // Actual input +// F::::from(2), // Actual input +// F::::from(0), // PC +// F::::from(0), // ROM +// F::::from(1), // ROM +// F::::from(2), // ROM +// ], +// }; + +// let snark = run(&memory).unwrap(); +// let zi = snark.zi_primary(); +// dbg!(zi); +// // First fold: +// // step_out[0] == 1 + 5 == 6 +// // step_out[1] == 2 + 7 == 9 +// // Second fold: +// // step_out[0] == 6 ** 2 == 36 +// // step_out[1] == 9 +// // Third fold: +// // step_out[0] == 9 +// // step_out[1] == 36 +// assert_eq!(zi[0], F::::from(9)); +// assert_eq!(zi[1], F::::from(36)); +// assert_eq!(zi[2], F::::from(3)); +// assert_eq!(zi[3], F::::from(0)); +// assert_eq!(zi[4], F::::from(1)); +// assert_eq!(zi[5], F::::from(2)); +// } diff --git a/frontend/tests/lib.rs b/frontend/tests/lib.rs new file mode 100644 index 0000000..6237866 --- /dev/null +++ b/frontend/tests/lib.rs @@ -0,0 +1,22 @@ +use client_side_prover_frontend::{noir::NoirProgram, F, G1}; +use tracing_test::traced_test; + +mod ivc; + +pub fn add_external() -> NoirProgram { + let bytecode = std::fs::read("../examples/add_external/target/add_external.json") + .expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) +} + +pub fn square_zeroth() -> NoirProgram { + let bytecode = std::fs::read("../examples/square_zeroth/target/square_zeroth.json") + .expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) +} + +pub fn swap_memory() -> NoirProgram { + let bytecode = std::fs::read("../examples/swap_memory/target/swap_memory.json") + .expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) +} diff --git a/frontend/tests/mod.rs b/frontend/tests/mod.rs deleted file mode 100644 index 33f7cc0..0000000 --- a/frontend/tests/mod.rs +++ /dev/null @@ -1,87 +0,0 @@ -#[test] -#[traced_test] -fn test_mock_noir_ivc() { - let mut circuit = noir_fold(); - circuit.set_private_inputs(vec![F::::from(3)]); - - let rom_circuit = NoirRomCircuit { circuit, circuit_index: 0, rom_size: 2 }; - - let memory = NoirMemory { - circuits: vec![rom_circuit], - rom: vec![0, 0], - public_input: vec![ - F::::from(1), // Actual input - F::::from(2), // Actual input - F::::from(0), // PC - F::::from(0), // ROM - F::::from(0), // ROM - ], - }; - - let snark = run(&memory).unwrap(); - let zi = snark.zi_primary(); - dbg!(zi); - // First fold: - // step_out[0] == 3 * 1 + 2 + 1 == 6 - // step_out[1] == (3 + 3) * 2 + 1 == 13 - // Second fold: - // step_out[0] == 3 * 6 + 13 + 1 == 32 - // step_out[1] == (3 + 3) * 13 + 6 == 84 - assert_eq!(zi[0], F::::from(32)); - assert_eq!(zi[1], F::::from(84)); - assert_eq!(zi[2], F::::from(2)); - assert_eq!(zi[3], F::::from(0)); - assert_eq!(zi[4], F::::from(0)); -} - -#[test] -#[traced_test] -fn test_mock_noir_nivc() { - let mut add_external = NoirProgram::new(ADD_EXTERNAL); - add_external.set_private_inputs(vec![F::::from(5), F::::from(7)]); - let add_external = - NoirRomCircuit { circuit: add_external, circuit_index: 0, rom_size: 3 }; - - // TODO: The issue is the private inputs need to be an empty vector or else this isn't computed at - // all. Be careful, this is insanely touchy and I hate that it is this way. - let mut square_zeroth = NoirProgram::new(SQUARE_ZEROTH); - square_zeroth.set_private_inputs(vec![]); - let square_zeroth = - NoirRomCircuit { circuit: square_zeroth, circuit_index: 1, rom_size: 3 }; - let mut swap_memory = NoirProgram::new(SWAP_MEMORY); - swap_memory.set_private_inputs(vec![]); - let swap_memory = - NoirRomCircuit { circuit: swap_memory, circuit_index: 2, rom_size: 3 }; - - let memory = NoirMemory { - circuits: vec![add_external, square_zeroth, swap_memory], - rom: vec![0, 1, 2], - public_input: vec![ - F::::from(1), // Actual input - F::::from(2), // Actual input - F::::from(0), // PC - F::::from(0), // ROM - F::::from(1), // ROM - F::::from(2), // ROM - ], - }; - - let snark = run(&memory).unwrap(); - let zi = snark.zi_primary(); - dbg!(zi); - // First fold: - // step_out[0] == 1 + 5 == 6 - // step_out[1] == 2 + 7 == 9 - // Second fold: - // step_out[0] == 6 ** 2 == 36 - // step_out[1] == 9 - // Third fold: - // step_out[0] == 9 - // step_out[1] == 36 - assert_eq!(zi[0], F::::from(9)); - assert_eq!(zi[1], F::::from(36)); - assert_eq!(zi[2], F::::from(3)); - assert_eq!(zi[3], F::::from(0)); - assert_eq!(zi[4], F::::from(1)); - assert_eq!(zi[5], F::::from(2)); -} From 58b6c979ef0296a81b7f9b1a1028b82ddc9de30c Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 16:32:04 -0700 Subject: [PATCH 11/51] small changes --- examples/add_external/src/main.nr | 2 +- .../add_external/target/add_external.json | 2 +- examples/square_zeroth/src/main.nr | 2 +- .../square_zeroth/target/square_zeroth.json | 2 +- examples/swap_memory/src/main.nr | 2 +- examples/swap_memory/target/swap_memory.json | 2 +- frontend/src/noir.rs | 4 +- frontend/tests/ivc/mod.rs | 37 ++++++++++++++++++- 8 files changed, 43 insertions(+), 10 deletions(-) diff --git a/examples/add_external/src/main.nr b/examples/add_external/src/main.nr index 091a944..c12b45f 100644 --- a/examples/add_external/src/main.nr +++ b/examples/add_external/src/main.nr @@ -2,7 +2,7 @@ pub fn main( external: [Field; 2], registers: pub [Field; 2], - next_pc: pub Field, + next_pc: Field, ) -> pub (Field, [Field; 2]) { (next_pc, [external[0] + registers[0], external[1] + registers[1]]) } diff --git a/examples/add_external/target/add_external.json b/examples/add_external/target/add_external.json index 3d40c5d..deb6fee 100644 --- a/examples/add_external/target/add_external.json +++ b/examples/add_external/target/add_external.json @@ -1 +1 @@ -{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":6155312096090522998,"abi":{"parameters":[{"name":"external","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"public"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WTSw7DIAxE+bVcx8YQ7F2vUlRy/yO0KCxQs4w3eRIyYjHSPGRrDuLveHPGzfmak2DLudfUkfANSRoXyKVtjIyFyycxUefMVZpUEMzUcS9COxyEJQuugQ+9LFg7e+XOmllO0d/zJv6sYpZX9BcV/Y2O4W/nBnbx6eY9LG9jB8Y/RnPmCzVzF/7cAwAA","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file +{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":2655773232820318361,"abi":{"parameters":[{"name":"external","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WTQQoDIQxFHZ3W6yTGjMmuV6nUuf8RWhmFobNsNn0gARM+/B+yuIP4ecFd8aM+RiXYcm4lNSR8QtIqDJnrJijIwq8kRE2yFK1aQDFTw52VdjhYT1rwG3iz04Kz52Ds2VLLG+Z3/5P8FkOtYJhfNMyve1y/8uvM/3mHfvTnTL+BvsforrwBE7EYeNwDAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/examples/square_zeroth/src/main.nr b/examples/square_zeroth/src/main.nr index cf215bf..a0a682b 100644 --- a/examples/square_zeroth/src/main.nr +++ b/examples/square_zeroth/src/main.nr @@ -1,4 +1,4 @@ /// Square only the first register. -pub fn main(registers: pub [Field; 2], next_pc: pub Field) -> pub (Field, [Field; 2]) { +pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub (Field, [Field; 2]) { (next_pc, [registers[0] * registers[0], registers[1]]) } diff --git a/examples/square_zeroth/target/square_zeroth.json b/examples/square_zeroth/target/square_zeroth.json index 83c2059..e716cbe 100644 --- a/examples/square_zeroth/target/square_zeroth.json +++ b/examples/square_zeroth/target/square_zeroth.json @@ -1 +1 @@ -{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":12964286917013245901,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"public"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WSUQ7DIAiGtdb7gEiFt11lZvb+R9hMXeLWx9IvITxA/vAD3h3ETwR3Zhn5MTLBlnMrqSHhE5JWYchcN0FBFn4lIWqSpWjVAoqZGu6stMPBMmnBNTDYaUHX8MZevzv814Vr4Grs+65be0PP0dBzn2t1v8y/78cuwlTr/dGdeQN8wPcJQAMAAA==","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file +{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":3633696500572180175,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WTWwrEIAxFfdT9JMbU5G+2MjJ2/0uYKVUo7WczByQgcvEkxLuD9DvR3QmjvkYlWEvpNXckfEPWJgyF2yooyMKfLERdilRtWkGxUMeNlTY4CKcseAZGuyzYM7yx6+zhNReegYux979m7Q2dk6Hz/q/lMptw8p/3cx/ieJ/cnS/OjlpIQAMAAA==","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/examples/swap_memory/src/main.nr b/examples/swap_memory/src/main.nr index 5fca808..b199022 100644 --- a/examples/swap_memory/src/main.nr +++ b/examples/swap_memory/src/main.nr @@ -1,4 +1,4 @@ /// Swap the two registers. -pub fn main(registers: pub [Field; 2], next_pc: pub Field) -> pub (Field, [Field; 2]) { +pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub (Field, [Field; 2]) { (next_pc, [registers[1], registers[0]]) } diff --git a/examples/swap_memory/target/swap_memory.json b/examples/swap_memory/target/swap_memory.json index 215f95c..2c6da9e 100644 --- a/examples/swap_memory/target/swap_memory.json +++ b/examples/swap_memory/target/swap_memory.json @@ -1 +1 @@ -{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":15239032000248702557,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"public"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WSMQ7DMAgAcRz/BwzEsPUrter8/wltFA+uMtYdchJiACEdEOAkfSLClaXnR8+Mm0gruRHTE7NXUxStm5GRmr6yMTcTK169oJNwo12ddzxZhln4GxTnzcJ/OoeJzutNnGGic5rofNxihW/Gvw99F3GoHf0JrrwBWeHc1TwDAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file +{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":10660601329448082079,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WTTQrEMAhG89PcR6M2upurTJj0/keYKc1AoMumiz4IQpAPnqJ3B+n3ojsTen31SrAyt5IbEr4hW1UBlroqKorKJytRU9Zi1QoYMjXcxGiDgzBkwTUwzsuCO539ROflIc5uonOa6LzvYulZfnAPwyz2//8txN6f3Jkv669xlDwDAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 3c9baaf..d0c8bc6 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -130,8 +130,8 @@ impl StepCircuit> for NoirProgram { None }; - // For folding in particular: - assert_eq!(self.circuit().return_values.0.len(), self.circuit().public_parameters.0.len()); + // TODO: This is a bit hacky. For NIVC folding in particular: + assert_eq!(self.circuit().return_values.0.len() - 1, self.circuit().public_parameters.0.len()); // TODO: we could probably avoid this but i'm lazy // Create a map to track allocated variables for the cs diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index e48e7fc..02f83eb 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -4,8 +4,41 @@ use super::*; #[test] #[traced_test] -fn test_mock_noir_ivc() { - let mut circuit = add_external(); +fn test_ivc() { + let circuit = square_zeroth(); + let witnesses = vec![ + SwitchboardWitness { witness: vec![], pc: 0 }, + SwitchboardWitness { witness: vec![], pc: 0 }, + SwitchboardWitness { witness: vec![], pc: 0 }, + ]; + + let memory = Switchboard { + circuits: vec![circuit], + public_input: vec![F::::from(2), F::::from(1)], + initial_circuit_index: 0, + witnesses, + }; + + let snark = run(&memory).unwrap(); + let zi = snark.zi_primary(); + dbg!(zi); + // First fold: + // step_out[0] == 3 * 1 + 2 + 1 == 6 + // step_out[1] == (3 + 3) * 2 + 1 == 13 + // Second fold: + // step_out[0] == 3 * 6 + 13 + 1 == 32 + // step_out[1] == (3 + 3) * 13 + 6 == 84 + // assert_eq!(zi[0], F::::from(32)); + // assert_eq!(zi[1], F::::from(84)); + // assert_eq!(zi[2], F::::from(2)); + // assert_eq!(zi[3], F::::from(0)); + // assert_eq!(zi[4], F::::from(0)); +} + +#[test] +#[traced_test] +fn test_ivc_private_inputs() { + let circuit = add_external(); let witnesses = vec![ SwitchboardWitness { witness: vec![F::::from(3), F::::from(3)], pc: 0 }, SwitchboardWitness { witness: vec![F::::from(5), F::::from(7)], pc: 0 }, From 9fc6f3e3fcb43d526950e935ccd5102cada7407e Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 18:18:48 -0700 Subject: [PATCH 12/51] WIP: abi improvements --- Cargo.lock | 99 +- Cargo.toml | 2 +- examples/add_external/src/main.nr | 16 +- .../add_external/target/add_external.json | 2 +- examples/square_zeroth/src/main.nr | 9 +- .../square_zeroth/target/square_zeroth.json | 2 +- frontend/Cargo.lock | 2716 ----------------- frontend/Cargo.toml | 3 +- frontend/src/noir.rs | 390 ++- frontend/src/program/mod.rs | 3 +- frontend/tests/ivc/mod.rs | 6 +- 11 files changed, 407 insertions(+), 2841 deletions(-) delete mode 100644 frontend/Cargo.lock diff --git a/Cargo.lock b/Cargo.lock index 154bc40..786138e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -41,7 +41,7 @@ dependencies = [ "acvm_blackbox_solver", "brillig_vm", "fxhash", - "indexmap", + "indexmap 1.9.3", "serde", "thiserror", "tracing", @@ -641,6 +641,7 @@ dependencies = [ "hex", "itertools 0.13.0", "js-sys", + "noirc_abi", "num-bigint 0.4.6", "serde", "serde-wasm-bindgen", @@ -870,6 +871,12 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + [[package]] name = "errno" version = "0.3.9" @@ -1177,6 +1184,14 @@ dependencies = [ "hashbrown 0.12.3", ] +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +dependencies = [ + "equivalent", + "hashbrown 0.15.2", +] + [[package]] name = "is-terminal" version = "0.4.13" @@ -1188,6 +1203,11 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "iter-extended" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" + [[package]] name = "itertools" version = "0.10.5" @@ -1352,6 +1372,31 @@ dependencies = [ "trait-set", ] +[[package]] +name = "noirc_abi" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acvm", + "iter-extended", + "noirc_printable_type", + "num-bigint 0.4.6", + "num-traits", + "serde", + "serde_json", + "thiserror", + "toml", +] + +[[package]] +name = "noirc_printable_type" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acvm", + "serde", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1996,6 +2041,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "sha2" version = "0.10.8" @@ -2263,6 +2317,40 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "toml" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +dependencies = [ + "indexmap 2.7.1", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tracing" version = "0.1.40" @@ -2632,6 +2720,15 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 97ece87..bbf021c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,7 +51,7 @@ criterion={ version="0.5", features=["html_reports"] } # dev dependencies flate2 ="1.0" sha2 ="0.10.7" -tracing-test ="0.2.4" +tracing-test ={ version="0.2.4", features = ["no-env-filter"] } expect-test ="1.4.1" anyhow ="1.0.72" tap ="1.0.1" diff --git a/examples/add_external/src/main.nr b/examples/add_external/src/main.nr index c12b45f..f59053b 100644 --- a/examples/add_external/src/main.nr +++ b/examples/add_external/src/main.nr @@ -1,8 +1,12 @@ +pub struct FoldingIO { + pub registers: [Field; 2], + pub next_pc: Field, +} + /// Add two external values to two registers that are folded across circuits. -pub fn main( - external: [Field; 2], - registers: pub [Field; 2], - next_pc: Field, -) -> pub (Field, [Field; 2]) { - (next_pc, [external[0] + registers[0], external[1] + registers[1]]) +pub fn main(external: [Field; 2], registers: pub [Field; 2], next_pc: Field) -> pub FoldingIO { + FoldingIO { + registers: [external[0] + registers[0], external[1] + registers[1]], + next_pc: next_pc, + } } diff --git a/examples/add_external/target/add_external.json b/examples/add_external/target/add_external.json index deb6fee..a4cdda8 100644 --- a/examples/add_external/target/add_external.json +++ b/examples/add_external/target/add_external.json @@ -1 +1 @@ -{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":2655773232820318361,"abi":{"parameters":[{"name":"external","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WTQQoDIQxFHZ3W6yTGjMmuV6nUuf8RWhmFobNsNn0gARM+/B+yuIP4ecFd8aM+RiXYcm4lNSR8QtIqDJnrJijIwq8kRE2yFK1aQDFTw52VdjhYT1rwG3iz04Kz52Ds2VLLG+Z3/5P8FkOtYJhfNMyve1y/8uvM/3mHfvTnTL+BvsforrwBE7EYeNwDAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file +{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":2789485860577127199,"abi":{"parameters":[{"name":"external","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"struct","path":"FoldingIO","fields":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}}},{"name":"next_pc","type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WSUQrDIAyGrXbzOokxNXnbVSaz9z/CJl1B2semH4gQwg/5ksltxN8L7sxee/1/giXnVlJDwjckrcKQuS6Cgiz8SULUJEvRqgUUMzVcWWmFDcssP2TBNfBhlwV3+psMs4Khv+dN/ryxv9lw5mg4c9/rfLiZzl73g48w9PS77e6jO/MFM7orOdwDAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/examples/square_zeroth/src/main.nr b/examples/square_zeroth/src/main.nr index a0a682b..96b7c50 100644 --- a/examples/square_zeroth/src/main.nr +++ b/examples/square_zeroth/src/main.nr @@ -1,4 +1,9 @@ +pub struct FoldingIO { + pub registers: [Field; 2], + pub next_pc: Field, +} + /// Square only the first register. -pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub (Field, [Field; 2]) { - (next_pc, [registers[0] * registers[0], registers[1]]) +pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub FoldingIO { + FoldingIO { registers: [registers[0] * registers[0], registers[1]], next_pc: next_pc } } diff --git a/examples/square_zeroth/target/square_zeroth.json b/examples/square_zeroth/target/square_zeroth.json index e716cbe..fb32a0e 100644 --- a/examples/square_zeroth/target/square_zeroth.json +++ b/examples/square_zeroth/target/square_zeroth.json @@ -1 +1 @@ -{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":3633696500572180175,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WTWwrEIAxFfdT9JMbU5G+2MjJ2/0uYKVUo7WczByQgcvEkxLuD9DvR3QmjvkYlWEvpNXckfEPWJgyF2yooyMKfLERdilRtWkGxUMeNlTY4CKcseAZGuyzYM7yx6+zhNReegYux979m7Q2dk6Hz/q/lMptw8p/3cx/ieJ/cnS/OjlpIQAMAAA==","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file +{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":1249437751568206918,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"struct","path":"FoldingIO","fields":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}}},{"name":"next_pc","type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WTSwoDMQhA85ncR6NOdNerNDRz/yO0ZTJ0aOmqbvpABBHhocawUx6Rw4s482VmgpV5tDqQ8ArVugqw9FVRUVRuVYmGsjbr1sCQaeAmRhvsfJsLv4HZbxac9ENy9o+OzsufOCdH5+Lo/NzF8naP6eR/1I9/yLO/hE/uE3pMEkADAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/Cargo.lock b/frontend/Cargo.lock deleted file mode 100644 index b693177..0000000 --- a/frontend/Cargo.lock +++ /dev/null @@ -1,2716 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addchain" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2e69442aa5628ea6951fa33e24efe8313f4321a91bd729fc2f75bdfc858570" -dependencies = [ - "num-bigint 0.3.3", - "num-integer", - "num-traits", -] - -[[package]] -name = "ahash" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", -] - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr", -] - -[[package]] -name = "alloy-rlp" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" -dependencies = [ - "arrayvec", - "bytes", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anstream" -version = "0.6.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" - -[[package]] -name = "anstyle-parse" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" -dependencies = [ - "anstyle", - "windows-sys 0.52.0", -] - -[[package]] -name = "anyhow" -version = "1.0.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" - -[[package]] -name = "ark-bn254" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" -dependencies = [ - "ark-ec", - "ark-ff 0.4.2", - "ark-std 0.4.0", -] - -[[package]] -name = "ark-ec" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" -dependencies = [ - "ark-ff 0.4.2", - "ark-poly", - "ark-serialize 0.4.2", - "ark-std 0.4.0", - "derivative", - "hashbrown 0.13.2", - "itertools 0.10.5", - "num-traits", - "zeroize", -] - -[[package]] -name = "ark-ff" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" -dependencies = [ - "ark-ff-asm 0.3.0", - "ark-ff-macros 0.3.0", - "ark-serialize 0.3.0", - "ark-std 0.3.0", - "derivative", - "num-bigint 0.4.6", - "num-traits", - "paste", - "rustc_version 0.3.3", - "zeroize", -] - -[[package]] -name = "ark-ff" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" -dependencies = [ - "ark-ff-asm 0.4.2", - "ark-ff-macros 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", - "derivative", - "digest 0.10.7", - "itertools 0.10.5", - "num-bigint 0.4.6", - "num-traits", - "paste", - "rustc_version 0.4.1", - "zeroize", -] - -[[package]] -name = "ark-ff-asm" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-ff-asm" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-ff-macros" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-ff-macros" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-poly" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" -dependencies = [ - "ark-ff 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", - "derivative", - "hashbrown 0.13.2", -] - -[[package]] -name = "ark-serialize" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" -dependencies = [ - "ark-std 0.3.0", - "digest 0.9.0", -] - -[[package]] -name = "ark-serialize" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" -dependencies = [ - "ark-serialize-derive", - "ark-std 0.4.0", - "digest 0.10.7", - "num-bigint 0.4.6", -] - -[[package]] -name = "ark-serialize-derive" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-std" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" -dependencies = [ - "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "ark-std" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" -dependencies = [ - "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "arrayref" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" - -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" - -[[package]] -name = "ascii-canvas" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" -dependencies = [ - "term", -] - -[[package]] -name = "auto_impl" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "autocfg" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" -dependencies = [ - "autocfg 1.3.0", -] - -[[package]] -name = "autocfg" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" - -[[package]] -name = "bellpepper" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ae286c2cb403324ab644c7cc68dceb25fe52ca9429908a726d7ed272c1edf7b" -dependencies = [ - "bellpepper-core", - "byteorder", - "ff", -] - -[[package]] -name = "bellpepper-core" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8abb418570756396d722841b19edfec21d4e89e1cf8990610663040ecb1aea" -dependencies = [ - "blake2s_simd", - "byteorder", - "ff", - "serde", - "thiserror", -] - -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - -[[package]] -name = "bindgen" -version = "0.69.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" -dependencies = [ - "bitflags", - "cexpr", - "clang-sys", - "itertools 0.12.1", - "lazy_static", - "lazycell", - "log", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "syn 2.0.77", - "which", -] - -[[package]] -name = "bit-set" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" -dependencies = [ - "bit-vec", -] - -[[package]] -name = "bit-vec" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" - -[[package]] -name = "bitflags" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" - -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "blake2b_simd" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" -dependencies = [ - "arrayref", - "arrayvec", - "constant_time_eq", -] - -[[package]] -name = "blake2s_simd" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94230421e395b9920d23df13ea5d77a20e1725331f90fbbf6df6040b33f756ae" -dependencies = [ - "arrayref", - "arrayvec", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "blst" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4378725facc195f1a538864863f6de233b500a8862747e7f165078a419d5e874" -dependencies = [ - "cc", - "glob", - "threadpool", - "zeroize", -] - -[[package]] -name = "blstrs" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" -dependencies = [ - "blst", - "byte-slice-cast", - "ff", - "group", - "pairing", - "rand_core 0.6.4", - "serde", - "subtle", -] - -[[package]] -name = "bumpalo" -version = "3.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" - -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" - -[[package]] -name = "cc" -version = "1.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45bcde016d64c21da4be18b655631e5ab6d3107607e71a73a9f53eb48aae23fb" -dependencies = [ - "shlex", -] - -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "circom_algebra" -version = "2.1.4" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "constant_tracking", - "num-bigint-dig", - "num-traits", -] - -[[package]] -name = "circom_witnesscalc" -version = "0.2.0" -source = "git+https://github.com/iden3/circom-witnesscalc#ec597bb986883416fd6be312b4fa044b8d6bcc25" -dependencies = [ - "ark-bn254", - "ark-ff 0.4.2", - "ark-serialize 0.4.2", - "bindgen", - "byteorder", - "code_producers", - "compiler", - "constraint_generation", - "hex", - "lazy_static", - "libc", - "parser", - "postcard", - "program_structure", - "prost", - "prost-build", - "rand 0.8.5", - "ruint", - "serde", - "serde_json", - "type_analysis", - "wtns-file", -] - -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", - "libloading", -] - -[[package]] -name = "clap" -version = "4.5.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" -dependencies = [ - "clap_builder", - "clap_derive", -] - -[[package]] -name = "clap_builder" -version = "4.5.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim", -] - -[[package]] -name = "clap_derive" -version = "4.5.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "clap_lex" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" - -[[package]] -name = "cobs" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" - -[[package]] -name = "code_producers" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "handlebars", - "lz_fnv", - "num-bigint-dig", - "serde_json", -] - -[[package]] -name = "codespan" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ebaf6bb6a863ad6aa3a18729e9710c53d75df03306714d9cc1f7357a00cd789" -dependencies = [ - "codespan-reporting", -] - -[[package]] -name = "codespan-reporting" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0762455306b1ed42bc651ef6a2197aabda5e1d4a43c34d5eab5c1a3634e81d" -dependencies = [ - "termcolor", - "unicode-width", -] - -[[package]] -name = "colorchoice" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" - -[[package]] -name = "compiler" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "code_producers", - "constant_tracking", - "num-bigint-dig", - "num-traits", - "program_structure", -] - -[[package]] -name = "constant_time_eq" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" - -[[package]] -name = "constant_tracking" -version = "2.0.0" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" - -[[package]] -name = "constraint_generation" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "ansi_term", - "circom_algebra", - "compiler", - "constraint_list", - "constraint_writers", - "dag", - "num-bigint-dig", - "num-traits", - "program_structure", -] - -[[package]] -name = "constraint_list" -version = "2.1.8" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "circom_algebra", - "constraint_writers", - "json", - "num_cpus", - "program_structure", - "threadpool", -] - -[[package]] -name = "constraint_writers" -version = "2.1.8" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "circom_algebra", - "json", -] - -[[package]] -name = "cpufeatures" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" -dependencies = [ - "libc", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array 0.14.7", - "typenum", -] - -[[package]] -name = "dag" -version = "2.1.8" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "circom_algebra", - "constraint_list", - "constraint_writers", - "json", - "program_structure", -] - -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "diff" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer", - "crypto-common", -] - -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if", - "dirs-sys-next", -] - -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - -[[package]] -name = "either" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" - -[[package]] -name = "embedded-io" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" - -[[package]] -name = "embedded-io" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" - -[[package]] -name = "ena" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" -dependencies = [ - "log", -] - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "errno" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "fastrand" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" - -[[package]] -name = "fastrlp" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" -dependencies = [ - "arrayvec", - "auto_impl", - "bytes", -] - -[[package]] -name = "ff" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" -dependencies = [ - "bitvec", - "byteorder", - "ff_derive", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "ff_derive" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9f54704be45ed286151c5e11531316eaef5b8f5af7d597b806fdb8af108d84a" -dependencies = [ - "addchain", - "cfg-if", - "num-bigint 0.3.3", - "num-integer", - "num-traits", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "fixed-hash" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "generic-array" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96512db27971c2c3eece70a1e106fbe6c87760234e31e8f7e5634912fe52794a" -dependencies = [ - "typenum", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" -dependencies = [ - "cfg-if", - "js-sys", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", -] - -[[package]] -name = "glob" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" - -[[package]] -name = "group" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" -dependencies = [ - "ff", - "rand 0.8.5", - "rand_core 0.6.4", - "rand_xorshift", - "subtle", -] - -[[package]] -name = "halo2curves" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db81d01d0bbfec9f624d7590fc6929ee2537a64ec1e080d8f8c9e2d2da291405" -dependencies = [ - "blake2b_simd", - "ff", - "group", - "hex", - "lazy_static", - "num-bigint 0.4.6", - "num-traits", - "pairing", - "pasta_curves", - "paste", - "rand 0.8.5", - "rand_core 0.6.4", - "rayon", - "serde", - "serde_arrays", - "static_assertions", - "subtle", -] - -[[package]] -name = "handlebars" -version = "4.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa67bab9ff362228eb3d00bd024a4965d8231bbb7921167f0cfa66c6626b225" -dependencies = [ - "log", - "pest", - "pest_derive", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "hashbrown" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" - -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -dependencies = [ - "serde", -] - -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "impl-codec" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "indexmap" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" -dependencies = [ - "equivalent", - "hashbrown 0.14.5", -] - -[[package]] -name = "is-terminal" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" -dependencies = [ - "hermit-abi 0.4.0", - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "is_terminal_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" - -[[package]] -name = "js-sys" -version = "0.3.70" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "json" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" - -[[package]] -name = "keccak" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" -dependencies = [ - "cpufeatures", -] - -[[package]] -name = "lalrpop" -version = "0.19.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a1cbf952127589f2851ab2046af368fd20645491bb4b376f04b7f94d7a9837b" -dependencies = [ - "ascii-canvas", - "bit-set", - "diff", - "ena", - "is-terminal", - "itertools 0.10.5", - "lalrpop-util", - "petgraph", - "regex", - "regex-syntax 0.6.29", - "string_cache", - "term", - "tiny-keccak", - "unicode-xid", -] - -[[package]] -name = "lalrpop-util" -version = "0.19.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3c48237b9604c5a4702de6b824e02006c3214327564636aef27c1028a8fa0ed" -dependencies = [ - "regex", -] - -[[package]] -name = "lazy_static" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" -dependencies = [ - "spin", -] - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "libc" -version = "0.2.158" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" - -[[package]] -name = "libloading" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" -dependencies = [ - "cfg-if", - "windows-targets", -] - -[[package]] -name = "libm" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" - -[[package]] -name = "libredox" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" -dependencies = [ - "bitflags", - "libc", -] - -[[package]] -name = "linux-raw-sys" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" - -[[package]] -name = "lock_api" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" -dependencies = [ - "autocfg 1.3.0", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" - -[[package]] -name = "lz_fnv" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bbb1b0dbe51f0976eaa466f4e0bdc11856fe8008aee26f30ccec8de15b28e38" - -[[package]] -name = "memchr" -version = "2.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "multimap" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" - -[[package]] -name = "neptune" -version = "13.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06626c9ac04c894e9a23d061ba1309f28506cdc5fe64156d28a15fb57fc8e438" -dependencies = [ - "bellpepper", - "bellpepper-core", - "blake2s_simd", - "blstrs", - "byteorder", - "ff", - "generic-array 0.14.7", - "log", - "pasta_curves", - "serde", - "trait-set", -] - -[[package]] -name = "new_debug_unreachable" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "nova-snark" -version = "0.37.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b80bc67f3e77ad68dec356b5df15e8ce30d8855fc76e92782945a5fa74d6fc" -dependencies = [ - "bellpepper", - "bellpepper-core", - "bincode", - "bitvec", - "byteorder", - "digest 0.10.7", - "ff", - "generic-array 1.1.0", - "getrandom 0.2.15", - "group", - "halo2curves", - "itertools 0.12.1", - "neptune", - "num-bigint 0.4.6", - "num-integer", - "num-traits", - "once_cell", - "pasta-msm", - "pasta_curves", - "rand_chacha 0.3.1", - "rand_core 0.6.4", - "rayon", - "serde", - "sha3", - "subtle", - "thiserror", -] - -[[package]] -name = "num-bigint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" -dependencies = [ - "autocfg 1.3.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" -dependencies = [ - "num-integer", - "num-traits", - "rand 0.8.5", - "serde", -] - -[[package]] -name = "num-bigint-dig" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d51546d704f52ef14b3c962b5776e53d5b862e5790e40a350d366c209bd7f7a" -dependencies = [ - "autocfg 0.1.8", - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand 0.7.3", - "serde", - "smallvec", -] - -[[package]] -name = "num-integer" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" -dependencies = [ - "autocfg 1.3.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg 1.3.0", - "libm", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi 0.3.9", - "libc", -] - -[[package]] -name = "once_cell" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" - -[[package]] -name = "pairing" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" -dependencies = [ - "group", -] - -[[package]] -name = "parity-scale-codec" -version = "3.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" -dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "parking_lot" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets", -] - -[[package]] -name = "parser" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "lalrpop", - "lalrpop-util", - "num-bigint-dig", - "num-traits", - "program_structure", - "regex", - "rustc-hex", - "serde", - "serde_derive", -] - -[[package]] -name = "pasta-msm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e85d75eba3e7e9ee3bd11342b669185e194dadda3557934bc1000d9b87159d3" -dependencies = [ - "cc", - "pasta_curves", - "semolina", - "sppark", - "which", -] - -[[package]] -name = "pasta_curves" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" -dependencies = [ - "blake2b_simd", - "ff", - "group", - "hex", - "lazy_static", - "rand 0.8.5", - "serde", - "static_assertions", - "subtle", -] - -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - -[[package]] -name = "pest" -version = "2.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c73c26c01b8c87956cea613c907c9d6ecffd8d18a2a5908e5de0adfaa185cea" -dependencies = [ - "memchr", - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664d22978e2815783adbdd2c588b455b1bd625299ce36b2a99881ac9627e6d8d" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2d5487022d5d33f4c30d91c22afa240ce2a644e87fe08caad974d4eab6badbe" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "pest_meta" -version = "2.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0091754bbd0ea592c4deb3a122ce8ecbb0753b738aa82bc055fcc2eccc8d8174" -dependencies = [ - "once_cell", - "pest", - "sha2", -] - -[[package]] -name = "petgraph" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" -dependencies = [ - "fixedbitset", - "indexmap", -] - -[[package]] -name = "phf_shared" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" -dependencies = [ - "siphasher", -] - -[[package]] -name = "postcard" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7f0a8d620d71c457dd1d47df76bb18960378da56af4527aaa10f515eee732e" -dependencies = [ - "cobs", - "embedded-io 0.4.0", - "embedded-io 0.6.1", - "serde", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" -dependencies = [ - "zerocopy", -] - -[[package]] -name = "precomputed-hash" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" - -[[package]] -name = "prettyplease" -version = "0.2.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" -dependencies = [ - "proc-macro2", - "syn 2.0.77", -] - -[[package]] -name = "primitive-types" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" -dependencies = [ - "fixed-hash", - "impl-codec", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" -dependencies = [ - "toml_edit", -] - -[[package]] -name = "proc-macro2" -version = "1.0.86" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "program_structure" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "codespan", - "codespan-reporting", - "num-bigint-dig", - "num-traits", - "regex", - "rustc-hex", - "serde", - "serde_derive", -] - -[[package]] -name = "proofs" -version = "0.1.0" -dependencies = [ - "bellpepper-core", - "byteorder", - "circom_witnesscalc", - "clap", - "ff", - "nova-snark", - "num-bigint 0.4.6", - "num-traits", - "serde", - "serde_json", -] - -[[package]] -name = "proptest" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" -dependencies = [ - "bitflags", - "lazy_static", - "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_xorshift", - "regex-syntax 0.8.4", - "unarray", -] - -[[package]] -name = "prost" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2ecbe40f08db5c006b5764a2645f7f3f141ce756412ac9e1dd6087e6d32995" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" -dependencies = [ - "bytes", - "heck", - "itertools 0.12.1", - "log", - "multimap", - "once_cell", - "petgraph", - "prettyplease", - "prost", - "prost-types", - "regex", - "syn 2.0.77", - "tempfile", -] - -[[package]] -name = "prost-derive" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" -dependencies = [ - "anyhow", - "itertools 0.12.1", - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "prost-types" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60caa6738c7369b940c3d49246a8d1749323674c65cb13010134f5c9bad5b519" -dependencies = [ - "prost", -] - -[[package]] -name = "quote" -version = "1.0.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.15", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_xorshift" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" -dependencies = [ - "rand_core 0.6.4", -] - -[[package]] -name = "rayon" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "redox_syscall" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" -dependencies = [ - "bitflags", -] - -[[package]] -name = "redox_users" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" -dependencies = [ - "getrandom 0.2.15", - "libredox", - "thiserror", -] - -[[package]] -name = "regex" -version = "1.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax 0.8.4", -] - -[[package]] -name = "regex-automata" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax 0.8.4", -] - -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - -[[package]] -name = "regex-syntax" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" - -[[package]] -name = "rlp" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" -dependencies = [ - "bytes", - "rustc-hex", -] - -[[package]] -name = "ruint" -version = "1.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" -dependencies = [ - "alloy-rlp", - "ark-ff 0.3.0", - "ark-ff 0.4.2", - "bytes", - "fastrlp", - "num-bigint 0.4.6", - "num-traits", - "parity-scale-codec", - "primitive-types", - "proptest", - "rand 0.8.5", - "rlp", - "ruint-macro", - "serde", - "valuable", - "zeroize", -] - -[[package]] -name = "ruint-macro" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", -] - -[[package]] -name = "rustc_version" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" -dependencies = [ - "semver 1.0.23", -] - -[[package]] -name = "rustix" -version = "0.38.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" -dependencies = [ - "bitflags", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.52.0", -] - -[[package]] -name = "rustversion" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" - -[[package]] -name = "ryu" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "semolina" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0111fd4fa831becb0606b9a2285ef3bee3c6a70d690209b8ae9514e9befe23" -dependencies = [ - "cc", - "glob", -] - -[[package]] -name = "semver" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver" -version = "1.0.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" - -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - -[[package]] -name = "serde" -version = "1.0.210" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_arrays" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38636132857f68ec3d5f3eb121166d2af33cb55174c4d5ff645db6165cbef0fd" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.210" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "serde_json" -version = "1.0.128" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", -] - -[[package]] -name = "sha2" -version = "0.10.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha3" -version = "0.10.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" -dependencies = [ - "digest 0.10.7", - "keccak", -] - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "siphasher" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" - -[[package]] -name = "smallvec" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" - -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - -[[package]] -name = "sppark" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55f3833d30846a26110dccb1d5366314c2c52516a9173b74238c16b24b1a9f9" -dependencies = [ - "cc", - "which", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "string_cache" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" -dependencies = [ - "new_debug_unreachable", - "once_cell", - "parking_lot", - "phf_shared", - "precomputed-hash", -] - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "subtle" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tempfile" -version = "3.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" -dependencies = [ - "cfg-if", - "fastrand", - "once_cell", - "rustix", - "windows-sys 0.59.0", -] - -[[package]] -name = "term" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" -dependencies = [ - "dirs-next", - "rustversion", - "winapi", -] - -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "thiserror" -version = "1.0.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "toml_datetime" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" - -[[package]] -name = "toml_edit" -version = "0.22.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" -dependencies = [ - "indexmap", - "toml_datetime", - "winnow", -] - -[[package]] -name = "trait-set" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b79e2e9c9ab44c6d7c20d5976961b47e8f49ac199154daa514b77cd1ab536625" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "type_analysis" -version = "2.1.9" -source = "git+https://github.com/olomix/circom.git?branch=master#22de6cb3200ed673c53201d9007ecdf21013783a" -dependencies = [ - "num-bigint-dig", - "num-traits", - "program_structure", -] - -[[package]] -name = "typenum" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" - -[[package]] -name = "ucd-trie" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" - -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unarray" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" - -[[package]] -name = "unicode-ident" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" - -[[package]] -name = "unicode-width" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" - -[[package]] -name = "unicode-xid" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" - -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - -[[package]] -name = "version_check" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" -dependencies = [ - "cfg-if", - "once_cell", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn 2.0.77", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.77", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" - -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "winnow" -version = "0.6.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" -dependencies = [ - "memchr", -] - -[[package]] -name = "wtns-file" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d3b856452298f68a5879e3901918bac5d753ca9fa4be8a983a37a3d25dabf0a" -dependencies = [ - "byteorder", -] - -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - -[[package]] -name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "byteorder", - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "zeroize" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.77", -] diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index a9c152c..b444777 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -22,6 +22,7 @@ bincode ={ workspace=true } # noir acvm ={ git="https://github.com/noir-lang/noir", rev="v1.0.0-beta.2" } +noirc_abi={ git="https://github.com/noir-lang/noir", rev="v1.0.0-beta.2" } ark-bn254="0.5" #- Wasm target configuration ----------------------------------------------------------------------# @@ -32,7 +33,7 @@ js-sys ="0.3.64" wasm-bindgen-futures="0.4.37" [dev-dependencies] -tracing-test="0.2.5" +tracing-test={workspace=true} tempdir ="0.3.7" tokio ={ version="1.43", features=["full"] } diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index d0c8bc6..adb65e4 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -16,6 +16,7 @@ use bellpepper_core::{ }; use client_side_prover::supernova::StepCircuit; use ff::PrimeField; +use noirc_abi::{Abi, AbiParameter, AbiType, AbiVisibility}; use tracing::trace; use super::*; @@ -29,7 +30,7 @@ pub struct NoirProgram { #[serde(rename = "noir_version")] pub version: String, pub hash: u64, - pub abi: NoirAbi, + pub abi: Abi, #[serde( serialize_with = "Program::serialize_program_base64", deserialize_with = "Program::deserialize_program_base64" @@ -45,50 +46,9 @@ pub struct NoirProgram { pub index: usize, } -#[derive(Clone, Serialize, Deserialize, Debug)] -pub struct NoirAbi { - pub parameters: Vec, - pub return_type: NoirReturnType, - pub error_types: HashMap, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -pub struct NoirParameter { - pub name: String, - #[serde(rename = "type")] - pub parameter_type: NoirType, - pub visibility: String, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -pub struct NoirReturnType { - pub abi_type: NoirType, - pub visibility: String, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -#[serde(untagged)] -pub enum NoirType { - Simple { - kind: String, - }, - Array { - kind: String, - length: usize, - #[serde(rename = "type")] - element_type: Box, - }, - Tuple { - kind: String, - fields: Vec, - }, -} - impl NoirProgram { pub fn new(bin: &[u8]) -> Self { serde_json::from_slice(bin).unwrap() } - pub fn arity(&self) -> usize { self.circuit().public_parameters.0.len() } - pub fn circuit(&self) -> &Circuit> { &self.bytecode.functions[0] } pub fn unconstrained_functions(&self) -> &Vec>> { @@ -101,15 +61,11 @@ impl NoirProgram { } impl StepCircuit> for NoirProgram { - // NOTE: +1 for the PC - fn arity(&self) -> usize { self.arity() + 1 } + // TODO: This is a bit hacky. We need to add 1 for the PC + fn arity(&self) -> usize { self.circuit().public_parameters.0.len() } fn circuit_index(&self) -> usize { self.index } - // TODO: we now need to shift this to use the `z` values as the sole public inputs, the struct - // should only hold witness - // TODO: We should check if the constraints for z are actually done properly - // tell clippy to shut up #[allow(clippy::too_many_lines)] fn synthesize>>( &self, @@ -117,8 +73,20 @@ impl StepCircuit> for NoirProgram { pc: Option<&AllocatedNum>>, z: &[AllocatedNum>], ) -> Result<(Option>>, Vec>>), SynthesisError> { - dbg!(z); + trace!("Synthesizing NoirProgram with {} inputs", z.len()); + trace!("Inner pc: {pc:?}"); + trace!("Circuit index: {}", self.index); + trace!("ABI parameters: {:?}", self.abi.parameters); + trace!("ABI return type: {:?}", self.abi.return_type); + trace!("Private parameters count: {}", self.circuit().private_parameters.len()); + trace!("Public parameters count: {}", self.circuit().public_parameters.0.len()); + trace!("Return values count: {}", self.circuit().return_values.0.len()); + + dbg!(&self); + + // Initialize ACVM with the circuit let mut acvm = if self.witness.is_some() { + trace!("Witness is present, initializing ACVM"); Some(ACVM::new( &StubbedBlackBoxSolver(false), &self.circuit().opcodes, @@ -127,63 +95,180 @@ impl StepCircuit> for NoirProgram { &[], )) } else { + trace!("No witness provided, skipping ACVM initialization"); None }; - // TODO: This is a bit hacky. For NIVC folding in particular: - assert_eq!(self.circuit().return_values.0.len() - 1, self.circuit().public_parameters.0.len()); - - // TODO: we could probably avoid this but i'm lazy // Create a map to track allocated variables for the cs let mut allocated_vars: HashMap>> = HashMap::new(); - // TODO: Hacking here to get the first index of public, assuming the come in a block. This is - // really dirty too - let num_private_inputs = dbg!(self.circuit().private_parameters.len()); - - // Set up public inputs - self.circuit().public_parameters.0.iter().for_each(|witness| { - println!("public instance: {witness:?}"); - let var = z[witness.as_usize() - num_private_inputs].clone(); - if self.witness.is_some() { - trace!("overwriting public {witness:?} with {var:?}"); - // TODO: This is a bit hacky and assumes private inputs come first. I don't like that - acvm - .as_mut() - .unwrap() - .overwrite_witness(*witness, convert_to_acir_field(var.get_value().unwrap())); - } - // TODO: Fix unwrap - // Alloc 1 for now and update later as needed - // let var = AllocatedNum::alloc(&mut *cs, || Ok(F::::ONE)).unwrap(); - // println!("AllocatedNum pub input: {var:?}"); + // Find the registers parameter in the ABI + let registers_param = match self.abi.parameters.iter().find(|p| p.name == "registers") { + Some(param) => { + trace!("Found registers parameter: {:?}", param); + param + }, + None => { + trace!("ERROR: No 'registers' parameter found in ABI"); + trace!( + "Available parameters: {:?}", + self.abi.parameters.iter().map(|p| &p.name).collect::>() + ); + panic!("Expected to find 'registers' parameter in ABI"); + }, + }; - allocated_vars.insert(*witness, var); - }); + // Get the length of registers array + let registers_length = match ®isters_param.typ { + AbiType::Array { length, .. } => { + trace!("Registers is an Array type with length {}", length); + *length + }, + _ => { + trace!("ERROR: Unexpected registers type: {:?}", registers_param.typ); + panic!("Expected 'registers' to be an array type, found {:?}", registers_param.typ); + }, + }; + + trace!("Using registers length: {}", registers_length); + + // Process private inputs first + trace!("Processing {} private inputs", self.circuit().private_parameters.len()); + + // Get only the private parameters from the ABI + let private_params: Vec<&AbiParameter> = + self.abi.parameters.iter().filter(|p| p.visibility == AbiVisibility::Private).collect(); + + trace!("Found {} private parameters in ABI", private_params.len()); + + for (i, witness) in self.circuit().private_parameters.iter().enumerate() { + let param = if i < private_params.len() { + private_params[i] + } else { + trace!( + "WARNING: Private parameter index {} exceeds private ABI parameters length {}", + i, + private_params.len() + ); + continue; + }; + + trace!( + "Processing private input '{}' (witness {:?}) of type {:?}", + param.name, + witness, + param.typ + ); - // Set up private inputs - self.circuit().private_parameters.iter().for_each(|witness| { let f = self.witness.as_ref().map(|inputs| { - let f = convert_to_acir_field(inputs.witness[witness.as_usize()]); - acvm.as_mut().unwrap().overwrite_witness(*witness, f); - f + trace!("Witness map size: {}", inputs.witness.len()); + if witness.as_usize() < inputs.witness.len() { + let f = convert_to_acir_field(inputs.witness[witness.as_usize()]); + trace!("Private input value: {:?}", f); + acvm.as_mut().unwrap().overwrite_witness(*witness, f); + f + } else { + trace!( + "ERROR: Witness index {} out of bounds (max: {})", + witness.as_usize(), + inputs.witness.len() - 1 + ); + GenericFieldElement::zero() + } }); - let var = AllocatedNum::alloc(&mut *cs, || Ok(convert_to_halo2_field(f.unwrap_or_default()))) - .unwrap(); + + let var = + AllocatedNum::alloc(&mut cs.namespace(|| format!("private_input_{}", param.name)), || { + let value = convert_to_halo2_field(f.unwrap_or_default()); + trace!("Allocated private input '{}' with value: {:?}", param.name, value); + Ok(value) + })?; + allocated_vars.insert(*witness, var); - }); + trace!( + "Added private input witness {:?} to allocated_vars (size now: {})", + witness, + allocated_vars.len() + ); + } + + // Process public inputs (registers) from z + trace!( + "Processing {} public inputs (registers) from z (z.len = {})", + self.circuit().public_parameters.0.len(), + z.len() + ); + + if z.len() != registers_length as usize { + trace!( + "WARNING: z.len() ({}) is not equal to registers_length ({})", + z.len(), + registers_length + ); + } + + for (i, witness) in self.circuit().public_parameters.0.iter().enumerate() { + if i < registers_length as usize && i < z.len() { + trace!("Processing public register at index {} (witness {:?})", i, witness); + + let var = z[i].clone(); + let value_str = var.get_value().map_or("None".to_string(), |v| format!("{:?}", v)); + trace!("Public input value from z[{}]: {}", i, value_str); + + if self.witness.is_some() { + if let Some(value) = var.get_value() { + trace!("Overwriting public witness {:?} with value from z: {:?}", witness, value); + acvm.as_mut().unwrap().overwrite_witness(*witness, convert_to_acir_field(value)); + } else { + trace!("WARNING: No value available for public input at index {}", i); + } + } + + allocated_vars.insert(*witness, var); + trace!( + "Added public input witness {:?} to allocated_vars (size now: {})", + witness, + allocated_vars.len() + ); + } else if i >= registers_length as usize { + trace!( + "Skipping public parameter at index {} as it exceeds registers_length {}", + i, + registers_length + ); + } else { + trace!("ERROR: Public parameter index {} exceeds z.len() {}", i, z.len()); + } + } + // Execute ACVM to get witness values if we have inputs let acir_witness_map = if self.witness.is_some() { - let _status = acvm.as_mut().unwrap().solve(); - Some(acvm.unwrap().finalize()) + trace!("Executing ACVM solve..."); + let status = acvm.as_mut().unwrap().solve(); + trace!("ACVM solve status: {:?}", status); + let witness_map = acvm.unwrap().finalize(); + Some(witness_map) } else { + trace!("Skipping ACVM execution (no witness)"); None }; + // Helper function to get witness values let get_witness_value = |witness: &Witness| -> F { - acir_witness_map.as_ref().map_or(F::::ONE, |map| { - map.get(witness).map_or(F::::ONE, |value| convert_to_halo2_field(*value)) - }) + let result = acir_witness_map.as_ref().map_or(F::::ONE, |map| { + map.get(witness).map_or_else( + || { + trace!("WARNING: Witness {witness:?} not found in ACVM witness map, using default"); + F::::ONE + }, + |value| { + let converted = convert_to_halo2_field(*value); + trace!("Got witness {:?} value: {:?}", witness, converted); + converted + }, + ) + }); + result }; // Helper to get or create a variable for a witness @@ -193,17 +278,27 @@ impl StepCircuit> for NoirProgram { gate_idx: usize| -> Result { if let Some(var) = allocated_vars.get(witness) { + trace!("Using existing variable for witness {:?}", witness); Ok(var.get_variable()) } else { + trace!("Allocating new variable for witness {:?} in gate {}", witness, gate_idx); let var = AllocatedNum::alloc(cs.namespace(|| format!("aux_{gate_idx}")), || { - Ok(get_witness_value(witness)) + let value = get_witness_value(witness); + trace!("Allocated auxiliary variable with value: {:?}", value); + Ok(value) })?; allocated_vars.insert(*witness, var.clone()); + trace!( + "Added auxiliary witness {:?} to allocated_vars (size now: {})", + witness, + allocated_vars.len() + ); Ok(var.get_variable()) } }; // Process gates + trace!("Processing {} gates", self.circuit().opcodes.len()); for (gate_idx, opcode) in self.circuit().opcodes.iter().enumerate() { if let Opcode::AssertZero(gate) = opcode { // Initialize empty linear combinations for each part of our R1CS constraint @@ -244,31 +339,81 @@ impl StepCircuit> for NoirProgram { |_| right_terms.clone(), |_| final_terms, ); + } else { + panic!("non-AssertZero gate {} of type {:?}", gate_idx, opcode); } } - let mut z_out = vec![]; - for ret in &self.circuit().return_values.0 { - z_out.push(allocated_vars.get(ret).unwrap().clone()); + // Prepare output values + trace!("Preparing return values"); + let mut return_values = vec![]; + for (i, ret) in self.circuit().return_values.0.iter().enumerate() { + trace!("Processing return value {} (witness {:?})", i, ret); + if let Some(var) = allocated_vars.get(ret) { + let value_str = var.get_value().map_or("None".to_string(), |v| format!("{:?}", v)); + trace!("Found allocated variable for return value {}: {}", i, value_str); + return_values.push(var.clone()); + } else { + trace!("ERROR: Return value {} (witness {:?}) not found in allocated variables", i, ret); + trace!("Available witnesses: {:?}", allocated_vars.keys().collect::>()); + return Err(SynthesisError::AssignmentMissing); + } } - // TODO: fix the pc - Ok((z_out.last().cloned(), z_out)) + trace!("Return values count: {}", return_values.len()); + trace!("Return values witnesses: {:?}", self.circuit().return_values.0); + + // Check if the return type is a struct as expected + if let Some(return_type) = &self.abi.return_type { + if let AbiType::Struct { fields, path } = &return_type.abi_type { + trace!("Return type is a struct: {} with {} fields", path, fields.len()); + + if path != "FoldingIO" { + panic!("Expected return type to be FoldingIO struct, found {}", path); + } + + // Find the registers field in the struct and get its length + let registers_length = fields + .iter() + .find(|(name, _)| name == "registers") + .map(|(_, typ)| match typ { + AbiType::Array { length, .. } => *length, + _ => panic!("Expected registers to be an array type, found {:?}", typ), + }) + .unwrap_or_else(|| panic!("Expected 'registers' field in FoldingIO struct")); + + trace!("registers_length: {}", registers_length); + + // The next_pc is after all the register values + let next_pc_index = registers_length as usize; + trace!("next_pc_index in flattened return values: {}", next_pc_index); + + if next_pc_index < return_values.len() { + let next_pc = Some(return_values[next_pc_index].clone()); + trace!("Using return value at index {} as next_pc", next_pc_index); + + trace!( + "Synthesis complete, returning next_pc and {} return values", + return_values[..registers_length as usize].to_vec().len() + ); + return Ok((next_pc, return_values[..registers_length as usize].to_vec())); + } else { + trace!( + "ERROR: next_pc index {} is out of bounds for return_values length {}", + next_pc_index, + return_values.len() + ); + panic!("next_pc index out of bounds"); + } + } else { + trace!("ERROR: Return type is not a struct: {:?}", return_type.abi_type); + panic!("Expected return type to be a struct, found {:?}", return_type.abi_type); + } + } else { + trace!("ERROR: No return type specified in ABI"); + panic!("Expected return type to be specified"); + } } - // TODO: fix the pc - // fn synthesize>>( - // &self, - // cs: &mut CS, - // pc: Option<&AllocatedNum>>, - // z: &[AllocatedNum>], - // ) -> Result<(Option>>, Vec>>), SynthesisError> { - // let rom_index = &z[self.arity()]; // jump to where we pushed pc data into CS - // let allocated_rom = &z[self.arity() + 1..]; // jump to where we pushed rom data into C - // let mut circuit_constraints = self.vanilla_synthesize(cs, z)?; - // circuit_constraints.push(rom_index_next); - // circuit_constraints.extend(z[self.arity() + 1..].iter().cloned()); - // Ok((Some(pc_next), circuit_constraints)) - // } } fn convert_to_halo2_field(f: GenericFieldElement) -> F { @@ -299,4 +444,33 @@ mod tests { let halo2_f = convert_to_halo2_field(f); assert_eq!(halo2_f, F::::from(3)); } + + #[test] + fn test_deserialize_abi() { + let json_path = "../examples/add_external/target/add_external.json"; + let json_data = std::fs::read(json_path).expect("Failed to read add_external.json"); + + let program: NoirProgram = + serde_json::from_slice(&json_data).expect("Failed to deserialize add_external.json"); + + // Verify basic structure + assert_eq!(program.version, "1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355"); + assert_eq!(program.hash, 2789485860577127199); + + // Verify parameters + assert_eq!(program.abi.parameters.len(), 3); + assert_eq!(program.abi.parameters[0].name, "external"); + assert_eq!(program.abi.parameters[1].name, "registers"); + assert_eq!(program.abi.parameters[2].name, "next_pc"); + + // Verify return type + if let AbiType::Struct { fields, path } = &program.abi.return_type.as_ref().unwrap().abi_type { + assert_eq!(fields.len(), 2); + assert_eq!(path, "FoldingIO"); + assert_eq!(fields[0].0, "registers"); + assert_eq!(fields[1].0, "next_pc"); + } else { + panic!("Expected tuple return type, got {:?}", program.abi.return_type); + } + } } diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index 39462ae..33774cb 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -86,7 +86,8 @@ pub fn run(switchboard: &Switchboard) -> Result, ProofError> info!("Step {} of {} witnesses", idx, switchboard.witnesses.len()); debug!("Program counter = {:?}", switchboard_witness.pc); - let circuit_primary = switchboard.primary_circuit(switchboard_witness.pc); + let mut circuit_primary = switchboard.primary_circuit(switchboard_witness.pc); + circuit_primary.witness = Some(switchboard_witness.clone()); let circuit_secondary = switchboard.secondary_circuit(); let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 02f83eb..242075c 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -7,9 +7,9 @@ use super::*; fn test_ivc() { let circuit = square_zeroth(); let witnesses = vec![ - SwitchboardWitness { witness: vec![], pc: 0 }, - SwitchboardWitness { witness: vec![], pc: 0 }, - SwitchboardWitness { witness: vec![], pc: 0 }, + SwitchboardWitness { witness: vec![F::::from(0)], pc: 0 }, + SwitchboardWitness { witness: vec![F::::from(0)], pc: 0 }, + SwitchboardWitness { witness: vec![F::::from(0)], pc: 0 }, ]; let memory = Switchboard { From 1c935e21d713bd666bf617e353001d6e8e80c391 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 28 Feb 2025 18:32:40 -0700 Subject: [PATCH 13/51] got it working again --- Cargo.lock | 2 ++ frontend/src/noir.rs | 20 +++++++------------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 786138e..3f5fbaf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1184,6 +1184,8 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "indexmap" version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index adb65e4..42e8d1e 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -162,19 +162,13 @@ impl StepCircuit> for NoirProgram { let f = self.witness.as_ref().map(|inputs| { trace!("Witness map size: {}", inputs.witness.len()); - if witness.as_usize() < inputs.witness.len() { - let f = convert_to_acir_field(inputs.witness[witness.as_usize()]); - trace!("Private input value: {:?}", f); - acvm.as_mut().unwrap().overwrite_witness(*witness, f); - f - } else { - trace!( - "ERROR: Witness index {} out of bounds (max: {})", - witness.as_usize(), - inputs.witness.len() - 1 - ); - GenericFieldElement::zero() - } + // TODO: This is a bit hacky. We need to subtract the registers length from the witness + // index, and this assumes registers is the first parameter. + let f = + convert_to_acir_field(inputs.witness[witness.as_usize() - registers_length as usize]); + trace!("Private input value: {:?}", f); + acvm.as_mut().unwrap().overwrite_witness(*witness, f); + f }); let var = From d11ae71df9664dfbe373be40525cf063d8cb3e11 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sat, 1 Mar 2025 09:23:54 -0700 Subject: [PATCH 14/51] WIP: better functions --- Nargo.toml | 3 +++ examples/add_external/Nargo.toml | 3 +++ examples/add_external/src/main.nr | 15 ++++++++------- examples/square_zeroth/Nargo.toml | 3 +++ examples/square_zeroth/src/main.nr | 13 +++++++------ examples/swap_memory/Nargo.toml | 3 +++ examples/swap_memory/src/main.nr | 6 ++++-- nivc/Nargo.toml | 6 ++++++ nivc/src/lib.nr | 8 ++++++++ 9 files changed, 45 insertions(+), 15 deletions(-) create mode 100644 Nargo.toml create mode 100644 nivc/Nargo.toml create mode 100644 nivc/src/lib.nr diff --git a/Nargo.toml b/Nargo.toml new file mode 100644 index 0000000..cf2964f --- /dev/null +++ b/Nargo.toml @@ -0,0 +1,3 @@ +[workspace] +members = ["nivc", "examples/add_external", "examples/square_zeroth", "examples/swap_memory"] + diff --git a/examples/add_external/Nargo.toml b/examples/add_external/Nargo.toml index 0b0a015..cd37b1b 100644 --- a/examples/add_external/Nargo.toml +++ b/examples/add_external/Nargo.toml @@ -4,3 +4,6 @@ compiler_version = ">=0.36.0" name = "add_external" type = "bin" version = "0.1.0" + +[dependencies] +nivc = { path = "../../nivc" } diff --git a/examples/add_external/src/main.nr b/examples/add_external/src/main.nr index f59053b..a6359e2 100644 --- a/examples/add_external/src/main.nr +++ b/examples/add_external/src/main.nr @@ -1,12 +1,13 @@ -pub struct FoldingIO { - pub registers: [Field; 2], - pub next_pc: Field, -} +use nivc::FoldingOutput; /// Add two external values to two registers that are folded across circuits. -pub fn main(external: [Field; 2], registers: pub [Field; 2], next_pc: Field) -> pub FoldingIO { - FoldingIO { +pub fn main( + registers: pub [Field; 2], + external: [Field; 2], + next_pc: Field, +) -> pub FoldingOutput<2> { + FoldingOutput { registers: [external[0] + registers[0], external[1] + registers[1]], - next_pc: next_pc, + next_pc: Option::some(next_pc), } } diff --git a/examples/square_zeroth/Nargo.toml b/examples/square_zeroth/Nargo.toml index 04e65ad..c8ab6e7 100644 --- a/examples/square_zeroth/Nargo.toml +++ b/examples/square_zeroth/Nargo.toml @@ -4,3 +4,6 @@ compiler_version = ">=0.36.0" name = "square_zeroth" type = "bin" version = "0.1.0" + +[dependencies] +nivc = { path = "../../nivc" } diff --git a/examples/square_zeroth/src/main.nr b/examples/square_zeroth/src/main.nr index 96b7c50..d0d0cc9 100644 --- a/examples/square_zeroth/src/main.nr +++ b/examples/square_zeroth/src/main.nr @@ -1,9 +1,10 @@ -pub struct FoldingIO { - pub registers: [Field; 2], - pub next_pc: Field, -} +use nivc::FoldingOutput; /// Square only the first register. -pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub FoldingIO { - FoldingIO { registers: [registers[0] * registers[0], registers[1]], next_pc: next_pc } +pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub FoldingOutput<2> { + FoldingOutput { + registers: [registers[0] * registers[0], registers[1]], + next_pc: Option::some(next_pc), + } } + diff --git a/examples/swap_memory/Nargo.toml b/examples/swap_memory/Nargo.toml index d37da1c..e495845 100644 --- a/examples/swap_memory/Nargo.toml +++ b/examples/swap_memory/Nargo.toml @@ -4,3 +4,6 @@ compiler_version = ">=0.36.0" name = "swap_memory" type = "bin" version = "0.1.0" + +[dependencies] +nivc = { path = "../../nivc" } diff --git a/examples/swap_memory/src/main.nr b/examples/swap_memory/src/main.nr index b199022..b7db79f 100644 --- a/examples/swap_memory/src/main.nr +++ b/examples/swap_memory/src/main.nr @@ -1,4 +1,6 @@ +use nivc::FoldingOutput; + /// Swap the two registers. -pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub (Field, [Field; 2]) { - (next_pc, [registers[1], registers[0]]) +pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub FoldingOutput<2> { + FoldingOutput { registers: [registers[1], registers[0]], next_pc: Option::some(next_pc) } } diff --git a/nivc/Nargo.toml b/nivc/Nargo.toml new file mode 100644 index 0000000..8de5e69 --- /dev/null +++ b/nivc/Nargo.toml @@ -0,0 +1,6 @@ +[package] +authors = ["Colin Roberts"] +compiler_version = ">=0.36.0" +name = "nivc" +type = "lib" +version = "0.1.0" diff --git a/nivc/src/lib.nr b/nivc/src/lib.nr new file mode 100644 index 0000000..7b65485 --- /dev/null +++ b/nivc/src/lib.nr @@ -0,0 +1,8 @@ +pub struct FoldingOutput { + pub registers: [Field; N], + pub next_pc: Option, +} + +// TODO: It would be nice to be able to force the `main` function to have a specific signature. In particular, we want: +// pub fn main(registers: pub [Field; N], input: T) -> pub FoldingOutput { .. } +// Perhaps this can be done with a macro almost like `#[nivc_main]`? From 28c479fdefdbd82b72dc8aac6b6d1437d94a5368 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sat, 1 Mar 2025 11:04:39 -0700 Subject: [PATCH 15/51] feat: use ACIR ABI --- examples/add_external/src/main.nr | 5 +- .../add_external/target/add_external.json | 1 - examples/square_zeroth/src/main.nr | 5 +- .../square_zeroth/target/square_zeroth.json | 1 - examples/swap_memory/src/main.nr | 2 +- examples/swap_memory/target/swap_memory.json | 1 - frontend/src/lib.rs | 41 +- frontend/src/noir.rs | 370 ++-- frontend/src/program/mod.rs | 28 +- frontend/src/proof.rs | 2 +- frontend/tests/ivc/mod.rs | 153 +- frontend/tests/lib.rs | 14 +- nivc/src/lib.nr | 2 +- src/lib.rs | 1669 ----------------- 14 files changed, 282 insertions(+), 2012 deletions(-) delete mode 100644 examples/add_external/target/add_external.json delete mode 100644 examples/square_zeroth/target/square_zeroth.json delete mode 100644 examples/swap_memory/target/swap_memory.json delete mode 100644 src/lib.rs diff --git a/examples/add_external/src/main.nr b/examples/add_external/src/main.nr index a6359e2..c9eaf44 100644 --- a/examples/add_external/src/main.nr +++ b/examples/add_external/src/main.nr @@ -6,8 +6,5 @@ pub fn main( external: [Field; 2], next_pc: Field, ) -> pub FoldingOutput<2> { - FoldingOutput { - registers: [external[0] + registers[0], external[1] + registers[1]], - next_pc: Option::some(next_pc), - } + FoldingOutput { registers: [external[0] + registers[0], external[1] + registers[1]], next_pc } } diff --git a/examples/add_external/target/add_external.json b/examples/add_external/target/add_external.json deleted file mode 100644 index a4cdda8..0000000 --- a/examples/add_external/target/add_external.json +++ /dev/null @@ -1 +0,0 @@ -{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":2789485860577127199,"abi":{"parameters":[{"name":"external","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"private"},{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"struct","path":"FoldingIO","fields":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}}},{"name":"next_pc","type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/7WSUQrDIAyGrXbzOokxNXnbVSaz9z/CJl1B2semH4gQwg/5ksltxN8L7sxee/1/giXnVlJDwjckrcKQuS6Cgiz8SULUJEvRqgUUMzVcWWmFDcssP2TBNfBhlwV3+psMs4Khv+dN/ryxv9lw5mg4c9/rfLiZzl73g48w9PS77e6jO/MFM7orOdwDAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/examples/square_zeroth/src/main.nr b/examples/square_zeroth/src/main.nr index d0d0cc9..92ea52a 100644 --- a/examples/square_zeroth/src/main.nr +++ b/examples/square_zeroth/src/main.nr @@ -2,9 +2,6 @@ use nivc::FoldingOutput; /// Square only the first register. pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub FoldingOutput<2> { - FoldingOutput { - registers: [registers[0] * registers[0], registers[1]], - next_pc: Option::some(next_pc), - } + FoldingOutput { registers: [registers[0] * registers[0], registers[1]], next_pc } } diff --git a/examples/square_zeroth/target/square_zeroth.json b/examples/square_zeroth/target/square_zeroth.json deleted file mode 100644 index fb32a0e..0000000 --- a/examples/square_zeroth/target/square_zeroth.json +++ /dev/null @@ -1 +0,0 @@ -{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":1249437751568206918,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"struct","path":"FoldingIO","fields":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}}},{"name":"next_pc","type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WTSwoDMQhA85ncR6NOdNerNDRz/yO0ZTJ0aOmqbvpABBHhocawUx6Rw4s482VmgpV5tDqQ8ArVugqw9FVRUVRuVYmGsjbr1sCQaeAmRhvsfJsLv4HZbxac9ENy9o+OzsufOCdH5+Lo/NzF8naP6eR/1I9/yLO/hE/uE3pMEkADAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/examples/swap_memory/src/main.nr b/examples/swap_memory/src/main.nr index b7db79f..9baf654 100644 --- a/examples/swap_memory/src/main.nr +++ b/examples/swap_memory/src/main.nr @@ -2,5 +2,5 @@ use nivc::FoldingOutput; /// Swap the two registers. pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub FoldingOutput<2> { - FoldingOutput { registers: [registers[1], registers[0]], next_pc: Option::some(next_pc) } + FoldingOutput { registers: [registers[1], registers[0]], next_pc } } diff --git a/examples/swap_memory/target/swap_memory.json b/examples/swap_memory/target/swap_memory.json deleted file mode 100644 index 2c6da9e..0000000 --- a/examples/swap_memory/target/swap_memory.json +++ /dev/null @@ -1 +0,0 @@ -{"noir_version":"1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355","hash":10660601329448082079,"abi":{"parameters":[{"name":"registers","type":{"kind":"array","length":2,"type":{"kind":"field"}},"visibility":"public"},{"name":"next_pc","type":{"kind":"field"},"visibility":"private"}],"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"field"},{"kind":"array","length":2,"type":{"kind":"field"}}]},"visibility":"public"},"error_types":{}},"bytecode":"H4sIAAAAAAAA/9WTTQrEMAhG89PcR6M2upurTJj0/keYKc1AoMumiz4IQpAPnqJ3B+n3ojsTen31SrAyt5IbEr4hW1UBlroqKorKJytRU9Zi1QoYMjXcxGiDgzBkwTUwzsuCO539ROflIc5uonOa6LzvYulZfnAPwyz2//8txN6f3Jkv669xlDwDAAA=","debug_symbols":"XYxLCoAwDAXvkrUn8Coi0k9aAqEpsRWk9O5+cCFdzhveNPBoa9woBdlhXhqwOFNI0k2tT2CVmCluw3wYJWMZPww1uZ8tZ8bhn1Uc+qr4lF7X134B","file_map":{},"names":["main"],"brillig_names":[]} \ No newline at end of file diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index 19b0723..4be4416 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -33,8 +33,6 @@ //! - `ProverKey`: Represents the prover key needed to create a `CompressedSNARK`. //! - `VerifierKey`: Represents the verifier key needed to create a `CompressedSNARK`. -use std::{collections::HashMap, path::PathBuf, str::FromStr}; - use client_side_prover::{ provider::GrumpkinEngine, spartan::batched::BatchedRelaxedR1CSSNARK, @@ -43,8 +41,7 @@ use client_side_prover::{ }; use ff::Field; use serde::{Deserialize, Serialize}; -use serde_json::Value; -use tracing::{debug, error, info}; +use tracing::{debug, info}; use crate::error::ProofError; @@ -54,25 +51,8 @@ pub mod program; pub mod proof; pub mod setup; -/// Represents the first elliptic curve engine used in the proof system. -pub type E1 = client_side_prover::provider::Bn256EngineKZG; -/// Represents the second elliptic curve engine used in the proof system. -pub type E2 = GrumpkinEngine; -/// Represents the group associated with the first elliptic curve engine. -pub type G1 = ::GE; -/// Represents the group associated with the second elliptic curve engine. -pub type G2 = ::GE; -/// Represents the evaluation engine for the first elliptic curve. -pub type EE1 = - client_side_prover::provider::hyperkzg::EvaluationEngine; -/// Represents the evaluation engine for the second elliptic curve. -pub type EE2 = client_side_prover::provider::ipa_pc::EvaluationEngine; -/// Represents the SNARK for the first elliptic curve. -pub type S1 = BatchedRelaxedR1CSSNARK; -/// Represents the SNARK for the second elliptic curve. -pub type S2 = BatchedRelaxedR1CSSNARK; -/// Represents the scalar field associated with a given group. -pub type F = ::Scalar; +/// Represents the scalar field for the primary curve (bn254) +pub type Scalar = ::Scalar; /// Represents the params needed to create `PublicParams` alongside the /// circuits' R1CSs. Specifically typed to the `proofs` crate choices of curves @@ -84,3 +64,18 @@ pub type ProverKey = client_side_prover::supernova::snark::ProverKey /// The `VerifierKey` needed to create a `CompressedSNARK` using the `proofs` /// crate choices of curves and engines. pub type VerifierKey = client_side_prover::supernova::snark::VerifierKey; + +/// Represents the first elliptic curve engine used in the proof system. +type E1 = client_side_prover::provider::Bn256EngineKZG; +/// Represents the second elliptic curve engine used in the proof system. +type E2 = GrumpkinEngine; +/// Represents the group associated with the first elliptic curve engine. +type G1 = ::GE; +/// Represents the evaluation engine for the first elliptic curve. +type EE1 = client_side_prover::provider::hyperkzg::EvaluationEngine; +/// Represents the evaluation engine for the second elliptic curve. +type EE2 = client_side_prover::provider::ipa_pc::EvaluationEngine; +/// Represents the SNARK for the first elliptic curve. +type S1 = BatchedRelaxedR1CSSNARK; +/// Represents the SNARK for the second elliptic curve. +type S2 = BatchedRelaxedR1CSSNARK; diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 42e8d1e..7160c91 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -16,11 +16,11 @@ use bellpepper_core::{ }; use client_side_prover::supernova::StepCircuit; use ff::PrimeField; -use noirc_abi::{Abi, AbiParameter, AbiType, AbiVisibility}; +use noirc_abi::{input_parser::InputValue, Abi, AbiParameter, AbiType, AbiVisibility}; use tracing::trace; use super::*; -use crate::program::SwitchboardWitness; +use crate::program::SwitchboardInputs; // TODO: If we deserialize more here and get metadata, we could more easily look at witnesses, etc. // Especially if we want to output a constraint to the PC. Using the abi would be handy for @@ -41,7 +41,7 @@ pub struct NoirProgram { pub names: Vec, pub brillig_names: Vec, #[serde(skip)] - pub witness: Option, + pub witness: Option, #[serde(skip)] pub index: usize, } @@ -55,219 +55,108 @@ impl NoirProgram { &self.bytecode.unconstrained_functions } - pub fn set_inputs(&mut self, switchboard_witness: SwitchboardWitness) { + pub fn set_inputs(&mut self, switchboard_witness: SwitchboardInputs) { self.witness = Some(switchboard_witness); } } -impl StepCircuit> for NoirProgram { +impl StepCircuit for NoirProgram { // TODO: This is a bit hacky. We need to add 1 for the PC fn arity(&self) -> usize { self.circuit().public_parameters.0.len() } fn circuit_index(&self) -> usize { self.index } #[allow(clippy::too_many_lines)] - fn synthesize>>( + #[allow(clippy::too_many_lines)] + fn synthesize>( &self, cs: &mut CS, - pc: Option<&AllocatedNum>>, - z: &[AllocatedNum>], - ) -> Result<(Option>>, Vec>>), SynthesisError> { + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { trace!("Synthesizing NoirProgram with {} inputs", z.len()); - trace!("Inner pc: {pc:?}"); + trace!("Inner pc: {:?}", pc); trace!("Circuit index: {}", self.index); trace!("ABI parameters: {:?}", self.abi.parameters); trace!("ABI return type: {:?}", self.abi.return_type); - trace!("Private parameters count: {}", self.circuit().private_parameters.len()); - trace!("Public parameters count: {}", self.circuit().public_parameters.0.len()); - trace!("Return values count: {}", self.circuit().return_values.0.len()); + trace!("Private parameters: {:?}", self.circuit().private_parameters); + trace!("Public parameters: {:?}", self.circuit().public_parameters); + trace!("Return values: {:?}", self.circuit().return_values); - dbg!(&self); + // Create a map to track allocated variables for the cs + let mut allocated_vars: HashMap> = HashMap::new(); - // Initialize ACVM with the circuit - let mut acvm = if self.witness.is_some() { + // Initialize ACVM and populate witness map from inputs + let mut acvm_witness_map = if let Some(inputs) = &self.witness { trace!("Witness is present, initializing ACVM"); - Some(ACVM::new( + let mut acvm = ACVM::new( &StubbedBlackBoxSolver(false), &self.circuit().opcodes, WitnessMap::new(), self.unconstrained_functions(), &[], - )) - } else { - trace!("No witness provided, skipping ACVM initialization"); - None - }; - - // Create a map to track allocated variables for the cs - let mut allocated_vars: HashMap>> = HashMap::new(); - - // Find the registers parameter in the ABI - let registers_param = match self.abi.parameters.iter().find(|p| p.name == "registers") { - Some(param) => { - trace!("Found registers parameter: {:?}", param); - param - }, - None => { - trace!("ERROR: No 'registers' parameter found in ABI"); - trace!( - "Available parameters: {:?}", - self.abi.parameters.iter().map(|p| &p.name).collect::>() - ); - panic!("Expected to find 'registers' parameter in ABI"); - }, - }; - - // Get the length of registers array - let registers_length = match ®isters_param.typ { - AbiType::Array { length, .. } => { - trace!("Registers is an Array type with length {}", length); - *length - }, - _ => { - trace!("ERROR: Unexpected registers type: {:?}", registers_param.typ); - panic!("Expected 'registers' to be an array type, found {:?}", registers_param.typ); - }, - }; - - trace!("Using registers length: {}", registers_length); - - // Process private inputs first - trace!("Processing {} private inputs", self.circuit().private_parameters.len()); - - // Get only the private parameters from the ABI - let private_params: Vec<&AbiParameter> = - self.abi.parameters.iter().filter(|p| p.visibility == AbiVisibility::Private).collect(); - - trace!("Found {} private parameters in ABI", private_params.len()); - - for (i, witness) in self.circuit().private_parameters.iter().enumerate() { - let param = if i < private_params.len() { - private_params[i] - } else { - trace!( - "WARNING: Private parameter index {} exceeds private ABI parameters length {}", - i, - private_params.len() - ); - continue; - }; - - trace!( - "Processing private input '{}' (witness {:?}) of type {:?}", - param.name, - witness, - param.typ - ); - - let f = self.witness.as_ref().map(|inputs| { - trace!("Witness map size: {}", inputs.witness.len()); - // TODO: This is a bit hacky. We need to subtract the registers length from the witness - // index, and this assumes registers is the first parameter. - let f = - convert_to_acir_field(inputs.witness[witness.as_usize() - registers_length as usize]); - trace!("Private input value: {:?}", f); - acvm.as_mut().unwrap().overwrite_witness(*witness, f); - f - }); - - let var = - AllocatedNum::alloc(&mut cs.namespace(|| format!("private_input_{}", param.name)), || { - let value = convert_to_halo2_field(f.unwrap_or_default()); - trace!("Allocated private input '{}' with value: {:?}", param.name, value); - Ok(value) - })?; - - allocated_vars.insert(*witness, var); - trace!( - "Added private input witness {:?} to allocated_vars (size now: {})", - witness, - allocated_vars.len() ); - } - // Process public inputs (registers) from z - trace!( - "Processing {} public inputs (registers) from z (z.len = {})", - self.circuit().public_parameters.0.len(), - z.len() - ); - - if z.len() != registers_length as usize { - trace!( - "WARNING: z.len() ({}) is not equal to registers_length ({})", - z.len(), - registers_length + // Convert InputMap to ACVM witness map + // We need to convert from Scalar to GenericFieldElement + // TODO: Shouldn't clone here, but it works for now. + let mut inputs = inputs.private_inputs.clone(); + inputs.insert( + "registers".to_string(), + InputValue::Vec( + z.iter() + .map(|z| InputValue::Field(convert_to_acir_field(z.get_value().unwrap()))) + .collect(), + ), ); - } - - for (i, witness) in self.circuit().public_parameters.0.iter().enumerate() { - if i < registers_length as usize && i < z.len() { - trace!("Processing public register at index {} (witness {:?})", i, witness); - - let var = z[i].clone(); - let value_str = var.get_value().map_or("None".to_string(), |v| format!("{:?}", v)); - trace!("Public input value from z[{}]: {}", i, value_str); - - if self.witness.is_some() { - if let Some(value) = var.get_value() { - trace!("Overwriting public witness {:?} with value from z: {:?}", witness, value); - acvm.as_mut().unwrap().overwrite_witness(*witness, convert_to_acir_field(value)); - } else { - trace!("WARNING: No value available for public input at index {}", i); - } + if let Ok(encoded_map) = self.abi.encode(&inputs, None) { + for (witness, value) in encoded_map { + // Convert FieldElement to GenericFieldElement + acvm.overwrite_witness(witness, value); } - - allocated_vars.insert(*witness, var); - trace!( - "Added public input witness {:?} to allocated_vars (size now: {})", - witness, - allocated_vars.len() - ); - } else if i >= registers_length as usize { - trace!( - "Skipping public parameter at index {} as it exceeds registers_length {}", - i, - registers_length - ); - } else { - trace!("ERROR: Public parameter index {} exceeds z.len() {}", i, z.len()); } - } - // Execute ACVM to get witness values if we have inputs - let acir_witness_map = if self.witness.is_some() { + // Execute ACVM to get witness values trace!("Executing ACVM solve..."); - let status = acvm.as_mut().unwrap().solve(); + let status = acvm.solve(); trace!("ACVM solve status: {:?}", status); - let witness_map = acvm.unwrap().finalize(); + + let witness_map = acvm.finalize(); Some(witness_map) } else { - trace!("Skipping ACVM execution (no witness)"); + trace!("No witness provided, skipping ACVM initialization"); None }; - // Helper function to get witness values - let get_witness_value = |witness: &Witness| -> F { - let result = acir_witness_map.as_ref().map_or(F::::ONE, |map| { - map.get(witness).map_or_else( - || { - trace!("WARNING: Witness {witness:?} not found in ACVM witness map, using default"); - F::::ONE - }, - |value| { - let converted = convert_to_halo2_field(*value); - trace!("Got witness {:?} value: {:?}", witness, converted); - converted - }, - ) - }); - result - }; + // Allocate public variables from z + for (i, witness) in self.circuit().public_parameters.0.iter().enumerate() { + if i < z.len() { + trace!("Allocating public input {} (witness {:?}) from z", i, witness); + allocated_vars.insert(*witness, z[i].clone()); + } + } + + // Allocate private variables + for &witness in &self.circuit().private_parameters { + if !allocated_vars.contains_key(&witness) { + let value = acvm_witness_map + .as_ref() + .and_then(|map| map.get(&witness)) + .map(|&v| convert_to_halo2_field(v)); + + trace!("Allocating private input (witness {:?}) with value: {:?}", witness, value); + let var = AllocatedNum::alloc( + &mut cs.namespace(|| format!("private_input_{}", witness.as_usize())), + || Ok(value.unwrap_or_else(Scalar::zero)), + )?; + + allocated_vars.insert(witness, var); + } + } - // Helper to get or create a variable for a witness + // Helper function to get or create a variable for a witness let get_var = |witness: &Witness, - allocated_vars: &mut HashMap>>, + allocated_vars: &mut HashMap>, cs: &mut CS, gate_idx: usize| -> Result { @@ -276,17 +165,21 @@ impl StepCircuit> for NoirProgram { Ok(var.get_variable()) } else { trace!("Allocating new variable for witness {:?} in gate {}", witness, gate_idx); - let var = AllocatedNum::alloc(cs.namespace(|| format!("aux_{gate_idx}")), || { - let value = get_witness_value(witness); - trace!("Allocated auxiliary variable with value: {:?}", value); - Ok(value) - })?; + + // Get value from ACVM if available + let value = acvm_witness_map + .as_ref() + .and_then(|map| map.get(witness)) + .map(|&v| convert_to_halo2_field(v)); + + let var = AllocatedNum::alloc( + cs.namespace(|| format!("aux_{}_w{}", gate_idx, witness.as_usize())), + || Ok(value.unwrap_or_else(Scalar::zero)), + )?; + allocated_vars.insert(*witness, var.clone()); - trace!( - "Added auxiliary witness {:?} to allocated_vars (size now: {})", - witness, - allocated_vars.len() - ); + trace!("Added auxiliary witness {:?} to allocated_vars", witness); + Ok(var.get_variable()) } }; @@ -309,7 +202,7 @@ impl StepCircuit> for NoirProgram { // Build Az (left terms) with coefficient left_terms = left_terms + (coeff, left_var); // Build Bz (right terms) with coefficient 1 - right_terms = right_terms + (F::::one(), right_var); + right_terms = right_terms + (Scalar::one(), right_var); } // Process addition terms (these contribute to the C matrix in R1CS) @@ -338,87 +231,90 @@ impl StepCircuit> for NoirProgram { } } - // Prepare output values + // Prepare return values trace!("Preparing return values"); let mut return_values = vec![]; + + // Ensure all return witnesses have allocated variables for (i, ret) in self.circuit().return_values.0.iter().enumerate() { - trace!("Processing return value {} (witness {:?})", i, ret); - if let Some(var) = allocated_vars.get(ret) { - let value_str = var.get_value().map_or("None".to_string(), |v| format!("{:?}", v)); - trace!("Found allocated variable for return value {}: {}", i, value_str); - return_values.push(var.clone()); - } else { - trace!("ERROR: Return value {} (witness {:?}) not found in allocated variables", i, ret); - trace!("Available witnesses: {:?}", allocated_vars.keys().collect::>()); - return Err(SynthesisError::AssignmentMissing); - } - } + if !allocated_vars.contains_key(ret) { + trace!("Return value {} (witness {:?}) not yet allocated, creating", i, ret); - trace!("Return values count: {}", return_values.len()); - trace!("Return values witnesses: {:?}", self.circuit().return_values.0); + // Get value from ACVM if available + let value = acvm_witness_map + .as_ref() + .and_then(|map| map.get(ret)) + .map(|&v| convert_to_halo2_field(v)); - // Check if the return type is a struct as expected - if let Some(return_type) = &self.abi.return_type { - if let AbiType::Struct { fields, path } = &return_type.abi_type { - trace!("Return type is a struct: {} with {} fields", path, fields.len()); + let var = AllocatedNum::alloc(&mut cs.namespace(|| format!("return_value_{}", i)), || { + Ok(value.unwrap_or_else(Scalar::zero)) + })?; - if path != "FoldingIO" { - panic!("Expected return type to be FoldingIO struct, found {}", path); - } + allocated_vars.insert(*ret, var); + } - // Find the registers field in the struct and get its length - let registers_length = fields - .iter() - .find(|(name, _)| name == "registers") - .map(|(_, typ)| match typ { - AbiType::Array { length, .. } => *length, - _ => panic!("Expected registers to be an array type, found {:?}", typ), - }) - .unwrap_or_else(|| panic!("Expected 'registers' field in FoldingIO struct")); + trace!("Adding return value {} (witness {:?}) to results", i, ret); + return_values.push(allocated_vars[ret].clone()); + } - trace!("registers_length: {}", registers_length); + // Extract return structure from ABI + if let Some(return_type) = &self.abi.return_type { + if let AbiType::Struct { fields, .. } = &return_type.abi_type { + // Find the registers field in the struct + let (registers_length, next_pc_index) = fields + .iter() + .find(|(name, _)| name == "registers") + .map(|(_, typ)| match typ { + AbiType::Array { length, .. } => (*length as usize, *length as usize), // next_pc follows registers + _ => panic!("Expected registers to be an array type, found {:?}", typ), + }) + .unwrap_or_else(|| panic!("Expected 'registers' field in return struct")); - // The next_pc is after all the register values - let next_pc_index = registers_length as usize; - trace!("next_pc_index in flattened return values: {}", next_pc_index); + trace!( + "Return struct has registers_length={}, next_pc_index={}", + registers_length, + next_pc_index + ); if next_pc_index < return_values.len() { + // Extract next_pc and registers let next_pc = Some(return_values[next_pc_index].clone()); - trace!("Using return value at index {} as next_pc", next_pc_index); + let registers = return_values[..registers_length].to_vec(); trace!( - "Synthesis complete, returning next_pc and {} return values", - return_values[..registers_length as usize].to_vec().len() + "Returning next_pc at index {} and {} register values", + next_pc_index, + registers.len() ); - return Ok((next_pc, return_values[..registers_length as usize].to_vec())); + return Ok((next_pc, registers)); } else { trace!( - "ERROR: next_pc index {} is out of bounds for return_values length {}", + "ERROR: next_pc_index {} out of bounds for return_values length {}", next_pc_index, return_values.len() ); - panic!("next_pc index out of bounds"); + return Err(SynthesisError::Unsatisfiable); } } else { - trace!("ERROR: Return type is not a struct: {:?}", return_type.abi_type); - panic!("Expected return type to be a struct, found {:?}", return_type.abi_type); + trace!("Return type is not a struct: {:?}", return_type.abi_type); + return Err(SynthesisError::Unsatisfiable); } } else { - trace!("ERROR: No return type specified in ABI"); - panic!("Expected return type to be specified"); + trace!("No return type specified"); + return Err(SynthesisError::Unsatisfiable); } } } -fn convert_to_halo2_field(f: GenericFieldElement) -> F { +fn convert_to_halo2_field(f: GenericFieldElement) -> Scalar { let bytes = f.to_be_bytes(); let mut arr = [0u8; 32]; arr.copy_from_slice(&bytes[..32]); arr.reverse(); - F::::from_repr(arr).unwrap() + Scalar::from_repr(arr).unwrap() } -fn convert_to_acir_field(f: F) -> GenericFieldElement { +fn convert_to_acir_field(f: Scalar) -> GenericFieldElement { let mut bytes = f.to_bytes(); bytes.reverse(); GenericFieldElement::from_be_bytes_reduce(&bytes) @@ -430,13 +326,13 @@ mod tests { #[test] fn test_conversions() { - let f = F::::from(5); + let f = Scalar::from(5); let acir_f = convert_to_acir_field(f); assert_eq!(acir_f, GenericFieldElement::from_repr(Fr::from(5))); let f = GenericFieldElement::from_repr(Fr::from(3)); let halo2_f = convert_to_halo2_field(f); - assert_eq!(halo2_f, F::::from(3)); + assert_eq!(halo2_f, Scalar::from(3)); } #[test] diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index 33774cb..9e4c6e9 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -1,7 +1,11 @@ +use std::collections::HashMap; + use client_side_prover::{ supernova::{NonUniformCircuit, RecursiveSNARK}, traits::{snark::default_ck_hint, Dual}, }; +use halo2curves::grumpkin; +use noirc_abi::InputMap; use proof::FoldingProof; use tracing::trace; @@ -14,25 +18,25 @@ pub mod data; // files should only be used to adjust the visibility of exported items. /// Compressed proof type -pub type CompressedProof = FoldingProof, F>; +pub type CompressedProof = FoldingProof, Scalar>; -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SwitchboardWitness { - pub witness: Vec>, - pub pc: usize, +#[derive(Debug, Clone)] +pub struct SwitchboardInputs { + pub private_inputs: InputMap, + pub pc: usize, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone)] pub struct Switchboard { pub circuits: Vec, - pub public_input: Vec>, + pub public_input: Vec, pub initial_circuit_index: usize, - pub witnesses: Vec, + pub switchboard_inputs: Vec, } impl NonUniformCircuit for Switchboard { type C1 = NoirProgram; - type C2 = TrivialCircuit>; + type C2 = TrivialCircuit; fn num_circuits(&self) -> usize { self.circuits.len() } @@ -77,13 +81,13 @@ pub fn run(switchboard: &Switchboard) -> Result, ProofError> let public_params = PublicParams::setup(&memory_clone, &*default_ck_hint(), &*default_ck_hint()); let z0_primary = &switchboard.public_input; - let z0_secondary = &[F::::ZERO]; + let z0_secondary = &[grumpkin::Fr::ZERO]; let mut recursive_snark_option = None; let time = std::time::Instant::now(); - for (idx, switchboard_witness) in switchboard.witnesses.iter().enumerate() { - info!("Step {} of {} witnesses", idx, switchboard.witnesses.len()); + for (idx, switchboard_witness) in switchboard.switchboard_inputs.iter().enumerate() { + info!("Step {} of {} witnesses", idx, switchboard.switchboard_inputs.len()); debug!("Program counter = {:?}", switchboard_witness.pc); let mut circuit_primary = switchboard.primary_circuit(switchboard_witness.pc); diff --git a/frontend/src/proof.rs b/frontend/src/proof.rs index 4fced4c..2bc50de 100644 --- a/frontend/src/proof.rs +++ b/frontend/src/proof.rs @@ -54,7 +54,7 @@ impl FoldingProof, String> { Ok(FoldingProof { proof, - verifier_digest: F::::from_bytes( + verifier_digest: Scalar::from_bytes( &hex::decode(&self.verifier_digest).unwrap().try_into().unwrap(), ) .unwrap(), diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 242075c..84516a3 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -1,78 +1,131 @@ -use client_side_prover_frontend::program::{run, Switchboard, SwitchboardWitness}; +use std::collections::HashMap; + +use acvm::acir::{acir_field::GenericFieldElement, circuit::Opcode}; +use client_side_prover_frontend::program::{run, Switchboard, SwitchboardInputs}; +use noirc_abi::{input_parser::InputValue, InputMap}; +use tracing::trace; use super::*; +fn debug_acir_circuit(circuit: &NoirProgram) { + trace!("=== ACIR Circuit Debug ==="); + trace!("ABI: {:?}", circuit.abi); + + trace!("Private parameters: {:?}", circuit.circuit().private_parameters); + trace!("Public parameters: {:?}", circuit.circuit().public_parameters); + trace!("Return values: {:?}", circuit.circuit().return_values); + + trace!("ACIR Opcodes:"); + for (i, op) in circuit.circuit().opcodes.iter().enumerate() { + if let Opcode::AssertZero(gate) = op { + trace!( + " Gate {}: mul_terms={:?}, linear_combinations={:?}, q_c={:?}", + i, + gate.mul_terms, + gate.linear_combinations, + gate.q_c + ); + } else { + trace!(" Opcode {}: {:?}", i, op); + } + } + trace!("=== End Debug ==="); +} + #[test] #[traced_test] fn test_ivc() { let circuit = square_zeroth(); - let witnesses = vec![ - SwitchboardWitness { witness: vec![F::::from(0)], pc: 0 }, - SwitchboardWitness { witness: vec![F::::from(0)], pc: 0 }, - SwitchboardWitness { witness: vec![F::::from(0)], pc: 0 }, + let switchboard_inputs = vec![ + SwitchboardInputs { + private_inputs: InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(0_u64)), + )]), + pc: 0, + }, + SwitchboardInputs { + private_inputs: InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(0_u64)), + )]), + pc: 0, + }, + SwitchboardInputs { + private_inputs: InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(0_u64)), + )]), + pc: 0, + }, ]; let memory = Switchboard { circuits: vec![circuit], - public_input: vec![F::::from(2), F::::from(1)], + public_input: vec![Scalar::from(2), Scalar::from(1)], initial_circuit_index: 0, - witnesses, + switchboard_inputs, }; let snark = run(&memory).unwrap(); - let zi = snark.zi_primary(); - dbg!(zi); - // First fold: - // step_out[0] == 3 * 1 + 2 + 1 == 6 - // step_out[1] == (3 + 3) * 2 + 1 == 13 - // Second fold: - // step_out[0] == 3 * 6 + 13 + 1 == 32 - // step_out[1] == (3 + 3) * 13 + 6 == 84 - // assert_eq!(zi[0], F::::from(32)); - // assert_eq!(zi[1], F::::from(84)); - // assert_eq!(zi[2], F::::from(2)); - // assert_eq!(zi[3], F::::from(0)); - // assert_eq!(zi[4], F::::from(0)); + dbg!(&snark.zi_primary()); + assert_eq!(snark.zi_primary()[0], Scalar::from(256)); + assert_eq!(snark.zi_primary()[1], Scalar::from(1)); } #[test] #[traced_test] fn test_ivc_private_inputs() { let circuit = add_external(); - let witnesses = vec![ - SwitchboardWitness { witness: vec![F::::from(3), F::::from(3)], pc: 0 }, - SwitchboardWitness { witness: vec![F::::from(5), F::::from(7)], pc: 0 }, - SwitchboardWitness { witness: vec![F::::from(0), F::::from(2)], pc: 0 }, + debug_acir_circuit(&circuit); + let switchboard_inputs = vec![ + SwitchboardInputs { + private_inputs: InputMap::from([ + ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), + ( + "external".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(3_u64)), + InputValue::Field(GenericFieldElement::from(3_u64)), + ]), + ), + ]), + pc: 0, + }, + SwitchboardInputs { + private_inputs: InputMap::from([ + ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), + ( + "external".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(420_u64)), + InputValue::Field(GenericFieldElement::from(69_u64)), + ]), + ), + ]), + pc: 0, + }, ]; let memory = Switchboard { circuits: vec![circuit], - public_input: vec![F::::from(1), F::::from(2)], + public_input: vec![Scalar::from(1), Scalar::from(2)], initial_circuit_index: 0, - witnesses, + switchboard_inputs, }; let snark = run(&memory).unwrap(); let zi = snark.zi_primary(); dbg!(zi); - // First fold: - // step_out[0] == 3 * 1 + 2 + 1 == 6 - // step_out[1] == (3 + 3) * 2 + 1 == 13 - // Second fold: - // step_out[0] == 3 * 6 + 13 + 1 == 32 - // step_out[1] == (3 + 3) * 13 + 6 == 84 - // assert_eq!(zi[0], F::::from(32)); - // assert_eq!(zi[1], F::::from(84)); - // assert_eq!(zi[2], F::::from(2)); - // assert_eq!(zi[3], F::::from(0)); - // assert_eq!(zi[4], F::::from(0)); + assert_eq!(zi[0], Scalar::from(424)); + assert_eq!(zi[1], Scalar::from(74)); } // #[test] // #[traced_test] // fn test_mock_noir_nivc() { // let mut add_external = NoirProgram::new(ADD_EXTERNAL); -// add_external.set_private_inputs(vec![F::::from(5), F::::from(7)]); +// add_external.set_private_inputs(vec![Scalar::from(5), Scalar::from(7)]); // let add_external = // NoirRomCircuit { circuit: add_external, circuit_index: 0, rom_size: 3 }; @@ -91,12 +144,12 @@ fn test_ivc_private_inputs() { // circuits: vec![add_external, square_zeroth, swap_memory], // rom: vec![0, 1, 2], // public_input: vec![ -// F::::from(1), // Actual input -// F::::from(2), // Actual input -// F::::from(0), // PC -// F::::from(0), // ROM -// F::::from(1), // ROM -// F::::from(2), // ROM +// Scalar::from(1), // Actual input +// Scalar::from(2), // Actual input +// Scalar::from(0), // PC +// Scalar::from(0), // ROM +// Scalar::from(1), // ROM +// Scalar::from(2), // ROM // ], // }; @@ -112,10 +165,10 @@ fn test_ivc_private_inputs() { // // Third fold: // // step_out[0] == 9 // // step_out[1] == 36 -// assert_eq!(zi[0], F::::from(9)); -// assert_eq!(zi[1], F::::from(36)); -// assert_eq!(zi[2], F::::from(3)); -// assert_eq!(zi[3], F::::from(0)); -// assert_eq!(zi[4], F::::from(1)); -// assert_eq!(zi[5], F::::from(2)); +// assert_eq!(zi[0], Scalar::from(9)); +// assert_eq!(zi[1], Scalar::from(36)); +// assert_eq!(zi[2], Scalar::from(3)); +// assert_eq!(zi[3], Scalar::from(0)); +// assert_eq!(zi[4], Scalar::from(1)); +// assert_eq!(zi[5], Scalar::from(2)); // } diff --git a/frontend/tests/lib.rs b/frontend/tests/lib.rs index 6237866..4e70d39 100644 --- a/frontend/tests/lib.rs +++ b/frontend/tests/lib.rs @@ -1,22 +1,22 @@ -use client_side_prover_frontend::{noir::NoirProgram, F, G1}; +use client_side_prover_frontend::{noir::NoirProgram, Scalar}; use tracing_test::traced_test; mod ivc; pub fn add_external() -> NoirProgram { - let bytecode = std::fs::read("../examples/add_external/target/add_external.json") - .expect("Failed to read Noir program file"); + let bytecode = + std::fs::read("../target/add_external.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } pub fn square_zeroth() -> NoirProgram { - let bytecode = std::fs::read("../examples/square_zeroth/target/square_zeroth.json") - .expect("Failed to read Noir program file"); + let bytecode = + std::fs::read("..//target/square_zeroth.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } pub fn swap_memory() -> NoirProgram { - let bytecode = std::fs::read("../examples/swap_memory/target/swap_memory.json") - .expect("Failed to read Noir program file"); + let bytecode = + std::fs::read("../arget/swap_memory.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } diff --git a/nivc/src/lib.nr b/nivc/src/lib.nr index 7b65485..d477996 100644 --- a/nivc/src/lib.nr +++ b/nivc/src/lib.nr @@ -1,6 +1,6 @@ pub struct FoldingOutput { pub registers: [Field; N], - pub next_pc: Option, + pub next_pc: Field, } // TODO: It would be nice to be able to force the `main` function to have a specific signature. In particular, we want: diff --git a/src/lib.rs b/src/lib.rs deleted file mode 100644 index 876578f..0000000 --- a/src/lib.rs +++ /dev/null @@ -1,1669 +0,0 @@ -#![allow(non_snake_case)] - -// private modules -mod bellpepper; -mod circuit; -mod digest; -mod nifs; - -// public modules -pub mod constants; -pub mod errors; -pub mod fast_serde; -pub mod gadgets; -pub mod provider; -pub mod r1cs; -pub mod spartan; -pub mod traits; - -pub mod cyclefold; -pub mod supernova; - -use std::sync::Arc; - -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use circuit::{NovaAugmentedCircuit, NovaAugmentedCircuitInputs, NovaAugmentedCircuitParams}; -use constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_WITHOUT_IO_FOR_CRHF, NUM_HASH_BITS}; -use errors::NovaError; -use ff::Field; -use gadgets::scalar_as_base; -use nifs::NIFS; -use once_cell::sync::OnceCell; -use r1cs::{ - CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, - RelaxedR1CSWitness, -}; -use serde::{Deserialize, Serialize}; -use supernova::StepCircuit; -use traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait}, - snark::RelaxedR1CSSNARKTrait, - AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROConstants, ROConstantsCircuit, ROTrait, -}; - -use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - digest::{DigestComputer, SimpleDigestible}, - r1cs::R1CSResult, -}; - -/// A type that holds parameters for the primary and secondary circuits of Nova -/// and SuperNova -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct R1CSWithArity { - F_arity: usize, - r1cs_shape: R1CSShape, -} - -impl SimpleDigestible for R1CSWithArity {} - -impl R1CSWithArity { - /// Create a new `R1CSWithArity` - pub fn new(r1cs_shape: R1CSShape, F_arity: usize) -> Self { - Self { - F_arity, - r1cs_shape, - } - } - - /// Return the [`R1CSWithArity`]' digest. - pub fn digest(&self) -> E::Scalar { - let dc: DigestComputer<'_, ::Scalar, Self> = DigestComputer::new(self); - dc.digest().expect("Failure in computing digest") - } -} - -/// A type that holds public parameters of Nova -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct PublicParams -where - E: CurveCycleEquipped, -{ - F_arity_primary: usize, - F_arity_secondary: usize, - ro_consts_primary: ROConstants, - ro_consts_circuit_primary: ROConstantsCircuit>, - pub ck_primary: Arc>, - circuit_shape_primary: R1CSWithArity, - ro_consts_secondary: ROConstants>, - ro_consts_circuit_secondary: ROConstantsCircuit, - pub ck_secondary: Arc>>, - circuit_shape_secondary: R1CSWithArity>, - augmented_circuit_params_primary: NovaAugmentedCircuitParams, - augmented_circuit_params_secondary: NovaAugmentedCircuitParams, - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} - -impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} - -impl PublicParams -where - E1: CurveCycleEquipped, -{ - /// Set up builder to create `PublicParams` for a pair of circuits `C1` and - /// `C2`. - /// - /// # Note - /// - /// Public parameters set up a number of bases for the homomorphic - /// commitment scheme of Nova. - /// - /// Some final compressing SNARKs, like variants of Spartan, use computation - /// commitments that require larger sizes for these parameters. These - /// SNARKs provide a hint for these values by implementing - /// `RelaxedR1CSSNARKTrait::ck_floor()`, which can be passed to this - /// function. - /// - /// If you're not using such a SNARK, pass - /// `arecibo::traits::snark::default_ck_hint()` instead. - /// - /// # Arguments - /// - /// * `c_primary`: The primary circuit of type `C1`. - /// * `c_secondary`: The secondary circuit of type `C2`. - /// * `ck_hint1`: A `CommitmentKeyHint` for `G1`, which is a function that - /// provides a hint for the number of generators required in the - /// commitment scheme for the primary circuit. - /// * `ck_hint2`: A `CommitmentKeyHint` for `G2`, similar to `ck_hint1`, but - /// for the secondary circuit. - /// - /// # Example - /// - /// ```rust - /// # use arecibo::spartan::ppsnark::RelaxedR1CSSNARK; - /// # use arecibo::provider::ipa_pc::EvaluationEngine; - /// # use arecibo::provider::{PallasEngine, VestaEngine}; - /// # use arecibo::traits::{circuit::TrivialCircuit, Engine, snark::RelaxedR1CSSNARKTrait}; - /// use arecibo::PublicParams; - /// - /// type E1 = PallasEngine; - /// type E2 = VestaEngine; - /// type EE = EvaluationEngine; - /// type SPrime = RelaxedR1CSSNARK>; - /// - /// let circuit1 = TrivialCircuit::<::Scalar>::default(); - /// let circuit2 = TrivialCircuit::<::Scalar>::default(); - /// // Only relevant for a SNARK using computation commitmnets, pass &(|_| 0) - /// // or &*nova_snark::traits::snark::default_ck_hint() otherwise. - /// let ck_hint1 = &*SPrime::::ck_floor(); - /// let ck_hint2 = &*SPrime::::ck_floor(); - /// - /// let pp = PublicParams::setup(&circuit1, &circuit2, ck_hint1, ck_hint2).unwrap(); - /// ``` - pub fn setup, C2: StepCircuit< as Engine>::Scalar>>( - c_primary: &C1, - c_secondary: &C2, - ck_hint1: &CommitmentKeyHint, - ck_hint2: &CommitmentKeyHint>, - ) -> Result { - let augmented_circuit_params_primary = - NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - let augmented_circuit_params_secondary = - NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - - let ro_consts_primary: ROConstants = ROConstants::::default(); - let ro_consts_secondary: ROConstants> = ROConstants::>::default(); - - let F_arity_primary = c_primary.arity(); - let F_arity_secondary = c_secondary.arity(); - - // ro_consts_circuit_primary are parameterized by E2 because the type alias uses - // E2::Base = E1::Scalar - let ro_consts_circuit_primary: ROConstantsCircuit> = - ROConstantsCircuit::>::default(); - let ro_consts_circuit_secondary: ROConstantsCircuit = - ROConstantsCircuit::::default(); - - // Initialize ck for the primary - let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( - &augmented_circuit_params_primary, - None, - c_primary, - ro_consts_circuit_primary.clone(), - ); - let mut cs: ShapeCS = ShapeCS::new(); - let _ = circuit_primary.synthesize(&mut cs); - let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape_and_key(ck_hint1); - let ck_primary = Arc::new(ck_primary); - - // Initialize ck for the secondary - let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( - &augmented_circuit_params_secondary, - None, - c_secondary, - ro_consts_circuit_secondary.clone(), - ); - let mut cs: ShapeCS> = ShapeCS::new(); - let _ = circuit_secondary.synthesize(&mut cs); - let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape_and_key(ck_hint2); - let ck_secondary = Arc::new(ck_secondary); - - if r1cs_shape_primary.num_io != 2 || r1cs_shape_secondary.num_io != 2 { - return Err(NovaError::InvalidStepCircuitIO); - } - - let circuit_shape_primary = R1CSWithArity::new(r1cs_shape_primary, F_arity_primary); - let circuit_shape_secondary = R1CSWithArity::new(r1cs_shape_secondary, F_arity_secondary); - - Ok(Self { - F_arity_primary, - F_arity_secondary, - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - circuit_shape_primary, - ro_consts_secondary, - ro_consts_circuit_secondary, - ck_secondary, - circuit_shape_secondary, - augmented_circuit_params_primary, - augmented_circuit_params_secondary, - digest: OnceCell::new(), - }) - } - - /// Retrieve the digest of the public parameters. - pub fn digest(&self) -> E1::Scalar { - self.digest - .get_or_try_init(|| DigestComputer::new(self).digest()) - .cloned() - .expect("Failure in retrieving digest") - } - - /// Returns the number of constraints in the primary and secondary circuits - pub const fn num_constraints(&self) -> (usize, usize) { - ( - self.circuit_shape_primary.r1cs_shape.num_cons, - self.circuit_shape_secondary.r1cs_shape.num_cons, - ) - } - - /// Returns the number of variables in the primary and secondary circuits - pub const fn num_variables(&self) -> (usize, usize) { - ( - self.circuit_shape_primary.r1cs_shape.num_vars, - self.circuit_shape_secondary.r1cs_shape.num_vars, - ) - } -} - -/// A resource buffer for [`RecursiveSNARK`] for storing scratch values that are -/// computed by `prove_step`, which allows the reuse of memory allocations and -/// avoids unnecessary new allocations in the critical section. -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ResourceBuffer { - l_w: Option>, - l_u: Option>, - - ABC_Z_1: R1CSResult, - ABC_Z_2: R1CSResult, - - /// buffer for `commit_T` - T: Vec, -} - -/// A SNARK that proves the correct execution of an incremental computation -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - z0_primary: Vec, - z0_secondary: Vec< as Engine>::Scalar>, - r_W_primary: RelaxedR1CSWitness, - r_U_primary: RelaxedR1CSInstance, - r_W_secondary: RelaxedR1CSWitness>, - r_U_secondary: RelaxedR1CSInstance>, - l_w_secondary: R1CSWitness>, - l_u_secondary: R1CSInstance>, - - /// Buffer for memory needed by the primary fold-step - buffer_primary: ResourceBuffer, - /// Buffer for memory needed by the secondary fold-step - buffer_secondary: ResourceBuffer>, - - i: usize, - zi_primary: Vec, - zi_secondary: Vec< as Engine>::Scalar>, -} - -impl RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - /// Create new instance of recursive SNARK - pub fn new, C2: StepCircuit< as Engine>::Scalar>>( - pp: &PublicParams, - c_primary: &C1, - c_secondary: &C2, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result { - if z0_primary.len() != pp.F_arity_primary || z0_secondary.len() != pp.F_arity_secondary { - return Err(NovaError::InvalidInitialInputLength); - } - - let r1cs_primary = &pp.circuit_shape_primary.r1cs_shape; - let r1cs_secondary = &pp.circuit_shape_secondary.r1cs_shape; - - // base case for the primary - let mut cs_primary = SatisfyingAssignment::::new(); - let inputs_primary: NovaAugmentedCircuitInputs> = NovaAugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - E1::Scalar::ZERO, - z0_primary.to_vec(), - None, - None, - None, - None, - ); - - let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( - &pp.augmented_circuit_params_primary, - Some(inputs_primary), - c_primary, - pp.ro_consts_circuit_primary.clone(), - ); - let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; - let (u_primary, w_primary) = - cs_primary.r1cs_instance_and_witness(r1cs_primary, &pp.ck_primary)?; - - // base case for the secondary - let mut cs_secondary = SatisfyingAssignment::>::new(); - let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( - pp.digest(), - as Engine>::Scalar::ZERO, - z0_secondary.to_vec(), - None, - None, - Some(u_primary.clone()), - None, - ); - let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( - &pp.augmented_circuit_params_secondary, - Some(inputs_secondary), - c_secondary, - pp.ro_consts_circuit_secondary.clone(), - ); - let zi_secondary = circuit_secondary.synthesize(&mut cs_secondary)?; - let (u_secondary, w_secondary) = cs_secondary - .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary)?; - - // IVC proof for the primary circuit - let l_w_primary = w_primary; - let l_u_primary = u_primary; - let r_W_primary = RelaxedR1CSWitness::from_r1cs_witness(r1cs_primary, l_w_primary); - let r_U_primary = RelaxedR1CSInstance::from_r1cs_instance( - &*pp.ck_primary, - &pp.circuit_shape_primary.r1cs_shape, - l_u_primary, - ); - - // IVC proof for the secondary circuit - let l_w_secondary = w_secondary; - let l_u_secondary = u_secondary; - let r_W_secondary = RelaxedR1CSWitness::>::default(r1cs_secondary); - let r_U_secondary = - RelaxedR1CSInstance::>::default(&pp.ck_secondary, r1cs_secondary); - - assert!( - !(zi_primary.len() != pp.F_arity_primary || zi_secondary.len() != pp.F_arity_secondary), - "Invalid step length" - ); - - let zi_primary = zi_primary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::::Scalar>, _>>()?; - - let zi_secondary = zi_secondary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect:: as Engine>::Scalar>, _>>()?; - - let buffer_primary = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(r1cs_primary.num_cons), - ABC_Z_2: R1CSResult::default(r1cs_primary.num_cons), - T: r1cs::default_T::(r1cs_primary.num_cons), - }; - - let buffer_secondary = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(r1cs_secondary.num_cons), - ABC_Z_2: R1CSResult::default(r1cs_secondary.num_cons), - T: r1cs::default_T::>(r1cs_secondary.num_cons), - }; - - Ok(Self { - z0_primary: z0_primary.to_vec(), - z0_secondary: z0_secondary.to_vec(), - r_W_primary, - r_U_primary, - r_W_secondary, - r_U_secondary, - l_w_secondary, - l_u_secondary, - - buffer_primary, - buffer_secondary, - i: 0, - zi_primary, - zi_secondary, - }) - } - - /// Inputs of the primary circuits - pub fn z0_primary(&self) -> &Vec { - &self.z0_primary - } - - /// Outputs of the primary circuits - pub fn zi_primary(&self) -> &Vec { - &self.zi_primary - } - - /// Create a new `RecursiveSNARK` (or updates the provided `RecursiveSNARK`) - /// by executing a step of the incremental computation - #[tracing::instrument(skip_all, name = "nova::RecursiveSNARK::prove_step")] - pub fn prove_step< - C1: StepCircuit, - C2: StepCircuit< as Engine>::Scalar>, - >( - &mut self, - pp: &PublicParams, - c_primary: &C1, - c_secondary: &C2, - ) -> Result<(), NovaError> { - // first step was already done in the constructor - if self.i == 0 { - self.i = 1; - return Ok(()); - } - - // save the inputs before proceeding to the `i+1`th step - let r_U_primary_i = self.r_U_primary.clone(); - let r_U_secondary_i = self.r_U_secondary.clone(); - let l_u_secondary_i = self.l_u_secondary.clone(); - - // fold the secondary circuit's instance - let (nifs_secondary, _) = NIFS::prove_mut( - &*pp.ck_secondary, - &pp.ro_consts_secondary, - &scalar_as_base::(pp.digest()), - &pp.circuit_shape_secondary.r1cs_shape, - &mut self.r_U_secondary, - &mut self.r_W_secondary, - &self.l_u_secondary, - &self.l_w_secondary, - &mut self.buffer_secondary.T, - &mut self.buffer_secondary.ABC_Z_1, - &mut self.buffer_secondary.ABC_Z_2, - )?; - - let mut cs_primary = SatisfyingAssignment::::with_capacity( - pp.circuit_shape_primary.r1cs_shape.num_io + 1, - pp.circuit_shape_primary.r1cs_shape.num_vars, - ); - let inputs_primary: NovaAugmentedCircuitInputs> = NovaAugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - E1::Scalar::from(self.i as u64), - self.z0_primary.to_vec(), - Some(self.zi_primary.clone()), - Some(r_U_secondary_i), - Some(l_u_secondary_i), - Some(Commitment::>::decompress(&nifs_secondary.comm_T)?), - ); - - let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( - &pp.augmented_circuit_params_primary, - Some(inputs_primary), - c_primary, - pp.ro_consts_circuit_primary.clone(), - ); - - let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; - - let (l_u_primary, l_w_primary) = cs_primary - .r1cs_instance_and_witness(&pp.circuit_shape_primary.r1cs_shape, &pp.ck_primary)?; - - // fold the primary circuit's instance - let (nifs_primary, _) = NIFS::prove_mut( - &*pp.ck_primary, - &pp.ro_consts_primary, - &pp.digest(), - &pp.circuit_shape_primary.r1cs_shape, - &mut self.r_U_primary, - &mut self.r_W_primary, - &l_u_primary, - &l_w_primary, - &mut self.buffer_primary.T, - &mut self.buffer_primary.ABC_Z_1, - &mut self.buffer_primary.ABC_Z_2, - )?; - - let mut cs_secondary = SatisfyingAssignment::>::with_capacity( - pp.circuit_shape_secondary.r1cs_shape.num_io + 1, - pp.circuit_shape_secondary.r1cs_shape.num_vars, - ); - let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( - pp.digest(), - as Engine>::Scalar::from(self.i as u64), - self.z0_secondary.to_vec(), - Some(self.zi_secondary.clone()), - Some(r_U_primary_i), - Some(l_u_primary), - Some(Commitment::::decompress(&nifs_primary.comm_T)?), - ); - - let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( - &pp.augmented_circuit_params_secondary, - Some(inputs_secondary), - c_secondary, - pp.ro_consts_circuit_secondary.clone(), - ); - let zi_secondary = circuit_secondary.synthesize(&mut cs_secondary)?; - - let (l_u_secondary, l_w_secondary) = cs_secondary - .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary) - .map_err(|_e| NovaError::UnSat)?; - - // update the running instances and witnesses - self.zi_primary = zi_primary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::::Scalar>, _>>()?; - self.zi_secondary = zi_secondary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect:: as Engine>::Scalar>, _>>()?; - - self.l_u_secondary = l_u_secondary; - self.l_w_secondary = l_w_secondary; - - self.i += 1; - - Ok(()) - } - - /// Verify the correctness of the `RecursiveSNARK` - pub fn verify( - &self, - pp: &PublicParams, - num_steps: usize, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result<(Vec, Vec< as Engine>::Scalar>), NovaError> { - // number of steps cannot be zero - let is_num_steps_zero = num_steps == 0; - - // check if the provided proof has executed num_steps - let is_num_steps_not_match = self.i != num_steps; - - // check if the initial inputs match - let is_inputs_not_match = - self.z0_primary != z0_primary || self.z0_secondary != z0_secondary; - - // check if the (relaxed) R1CS instances have two public outputs - let is_instance_has_two_outputs = self.l_u_secondary.X.len() != 2 - || self.r_U_primary.X.len() != 2 - || self.r_U_secondary.X.len() != 2; - - if is_num_steps_zero - || is_num_steps_not_match - || is_inputs_not_match - || is_instance_has_two_outputs - { - return Err(NovaError::ProofVerifyError); - } - - // check if the output hashes in R1CS instances point to the right running - // instances - let (hash_primary, hash_secondary) = { - let mut hasher = as Engine>::RO::new( - pp.ro_consts_secondary.clone(), - NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * pp.F_arity_primary, - ); - hasher.absorb(pp.digest()); - hasher.absorb(E1::Scalar::from(num_steps as u64)); - for e in z0_primary { - hasher.absorb(*e); - } - for e in &self.zi_primary { - hasher.absorb(*e); - } - self.r_U_secondary.absorb_in_ro(&mut hasher); - - let mut hasher2 = ::RO::new( - pp.ro_consts_primary.clone(), - NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * pp.F_arity_secondary, - ); - hasher2.absorb(scalar_as_base::(pp.digest())); - hasher2.absorb( as Engine>::Scalar::from(num_steps as u64)); - for e in z0_secondary { - hasher2.absorb(*e); - } - for e in &self.zi_secondary { - hasher2.absorb(*e); - } - self.r_U_primary.absorb_in_ro(&mut hasher2); - - ( - hasher.squeeze(NUM_HASH_BITS), - hasher2.squeeze(NUM_HASH_BITS), - ) - }; - - if hash_primary != self.l_u_secondary.X[0] - || hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) - { - return Err(NovaError::ProofVerifyError); - } - - // check the satisfiability of the provided instances - let (res_r_primary, (res_r_secondary, res_l_secondary)) = rayon::join( - || { - pp.circuit_shape_primary.r1cs_shape.is_sat_relaxed( - &pp.ck_primary, - &self.r_U_primary, - &self.r_W_primary, - ) - }, - || { - rayon::join( - || { - pp.circuit_shape_secondary.r1cs_shape.is_sat_relaxed( - &pp.ck_secondary, - &self.r_U_secondary, - &self.r_W_secondary, - ) - }, - || { - pp.circuit_shape_secondary.r1cs_shape.is_sat( - &pp.ck_secondary, - &self.l_u_secondary, - &self.l_w_secondary, - ) - }, - ) - }, - ); - - // check the returned res objects - res_r_primary?; - res_r_secondary?; - res_l_secondary?; - - Ok((self.zi_primary.clone(), self.zi_secondary.clone())) - } - - /// Get the outputs after the last step of computation. - pub fn outputs(&self) -> (&[E1::Scalar], &[ as Engine>::Scalar]) { - (&self.zi_primary, &self.zi_secondary) - } - - /// The number of steps which have been executed thus far. - pub fn num_steps(&self) -> usize { - self.i - } -} - -/// A type that holds the prover key for `CompressedSNARK` -#[derive(Clone, Debug)] -pub struct ProverKey -where - E1: CurveCycleEquipped, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - pub pk_primary: S1::ProverKey, - pub pk_secondary: S2::ProverKey, -} - -/// A type that holds the verifier key for `CompressedSNARK` -#[derive(Debug, Clone, Serialize)] -#[serde(bound = "")] -pub struct VerifierKey -where - E1: CurveCycleEquipped, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - F_arity_primary: usize, - F_arity_secondary: usize, - ro_consts_primary: ROConstants, - ro_consts_secondary: ROConstants>, - pp_digest: E1::Scalar, - vk_primary: S1::VerifierKey, - vk_secondary: S2::VerifierKey, -} - -/// A SNARK that proves the knowledge of a valid `RecursiveSNARK` -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CompressedSNARK -where - E1: CurveCycleEquipped, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - r_U_primary: RelaxedR1CSInstance, - r_W_snark_primary: S1, - - r_U_secondary: RelaxedR1CSInstance>, - l_u_secondary: R1CSInstance>, - nifs_secondary: NIFS>, - f_W_snark_secondary: S2, - - zn_primary: Vec, - zn_secondary: Vec< as Engine>::Scalar>, -} - -impl CompressedSNARK -where - E1: CurveCycleEquipped, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - /// Creates prover and verifier keys for `CompressedSNARK` - pub fn setup( - pp: &PublicParams, - ) -> Result<(ProverKey, VerifierKey), NovaError> { - let (pk_primary, vk_primary) = - S1::setup(pp.ck_primary.clone(), &pp.circuit_shape_primary.r1cs_shape)?; - let (pk_secondary, vk_secondary) = S2::setup( - pp.ck_secondary.clone(), - &pp.circuit_shape_secondary.r1cs_shape, - )?; - - let pk = ProverKey { - pk_primary, - pk_secondary, - }; - - let vk = VerifierKey { - F_arity_primary: pp.F_arity_primary, - F_arity_secondary: pp.F_arity_secondary, - ro_consts_primary: pp.ro_consts_primary.clone(), - ro_consts_secondary: pp.ro_consts_secondary.clone(), - pp_digest: pp.digest(), - vk_primary, - vk_secondary, - }; - - Ok((pk, vk)) - } - - /// Create a new `CompressedSNARK` - pub fn prove( - pp: &PublicParams, - pk: &ProverKey, - recursive_snark: &RecursiveSNARK, - ) -> Result { - // fold the secondary circuit's instance with its running instance - let (nifs_secondary, (f_U_secondary, f_W_secondary), _) = NIFS::prove( - &*pp.ck_secondary, - &pp.ro_consts_secondary, - &scalar_as_base::(pp.digest()), - &pp.circuit_shape_secondary.r1cs_shape, - &recursive_snark.r_U_secondary, - &recursive_snark.r_W_secondary, - &recursive_snark.l_u_secondary, - &recursive_snark.l_w_secondary, - )?; - - // create SNARKs proving the knowledge of f_W_primary and f_W_secondary - let (r_W_snark_primary, f_W_snark_secondary) = rayon::join( - || { - S1::prove( - &pp.ck_primary, - &pk.pk_primary, - &pp.circuit_shape_primary.r1cs_shape, - &recursive_snark.r_U_primary, - &recursive_snark.r_W_primary, - ) - }, - || { - S2::prove( - &pp.ck_secondary, - &pk.pk_secondary, - &pp.circuit_shape_secondary.r1cs_shape, - &f_U_secondary, - &f_W_secondary, - ) - }, - ); - - Ok(Self { - r_U_primary: recursive_snark.r_U_primary.clone(), - r_W_snark_primary: r_W_snark_primary?, - - r_U_secondary: recursive_snark.r_U_secondary.clone(), - l_u_secondary: recursive_snark.l_u_secondary.clone(), - nifs_secondary, - f_W_snark_secondary: f_W_snark_secondary?, - - zn_primary: recursive_snark.zi_primary.clone(), - zn_secondary: recursive_snark.zi_secondary.clone(), - }) - } - - /// Verify the correctness of the `CompressedSNARK` - pub fn verify( - &self, - vk: &VerifierKey, - num_steps: usize, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result<(Vec, Vec< as Engine>::Scalar>), NovaError> { - // the number of steps cannot be zero - if num_steps == 0 { - return Err(NovaError::ProofVerifyError); - } - - // check if the (relaxed) R1CS instances have two public outputs - if self.l_u_secondary.X.len() != 2 - || self.r_U_primary.X.len() != 2 - || self.r_U_secondary.X.len() != 2 - { - return Err(NovaError::ProofVerifyError); - } - - // check if the output hashes in R1CS instances point to the right running - // instances - let (hash_primary, hash_secondary) = { - let mut hasher = as Engine>::RO::new( - vk.ro_consts_secondary.clone(), - NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * vk.F_arity_primary, - ); - hasher.absorb(vk.pp_digest); - hasher.absorb(E1::Scalar::from(num_steps as u64)); - for e in z0_primary { - hasher.absorb(*e); - } - for e in &self.zn_primary { - hasher.absorb(*e); - } - self.r_U_secondary.absorb_in_ro(&mut hasher); - - let mut hasher2 = ::RO::new( - vk.ro_consts_primary.clone(), - NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * vk.F_arity_secondary, - ); - hasher2.absorb(scalar_as_base::(vk.pp_digest)); - hasher2.absorb( as Engine>::Scalar::from(num_steps as u64)); - for e in z0_secondary { - hasher2.absorb(*e); - } - for e in &self.zn_secondary { - hasher2.absorb(*e); - } - self.r_U_primary.absorb_in_ro(&mut hasher2); - - ( - hasher.squeeze(NUM_HASH_BITS), - hasher2.squeeze(NUM_HASH_BITS), - ) - }; - - if hash_primary != self.l_u_secondary.X[0] - || hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) - { - return Err(NovaError::ProofVerifyError); - } - - // fold the secondary's running instance with the last instance to get a folded - // instance - let f_U_secondary = self.nifs_secondary.verify( - &vk.ro_consts_secondary, - &scalar_as_base::(vk.pp_digest), - &self.r_U_secondary, - &self.l_u_secondary, - )?; - - // check the satisfiability of the folded instances using - // SNARKs proving the knowledge of their satisfying witnesses - let (res_primary, res_secondary) = rayon::join( - || { - self.r_W_snark_primary - .verify(&vk.vk_primary, &self.r_U_primary) - }, - || { - self.f_W_snark_secondary - .verify(&vk.vk_secondary, &f_U_secondary) - }, - ); - - res_primary?; - res_secondary?; - - Ok((self.zn_primary.clone(), self.zn_secondary.clone())) - } -} - -/// Compute the circuit digest of a [`StepCircuit`]. -/// -/// Note for callers: This function should be called with its performance -/// characteristics in mind. It will synthesize and digest the full `circuit` -/// given. -pub fn circuit_digest>( - circuit: &C, -) -> E1::Scalar { - let augmented_circuit_params = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - - // ro_consts_circuit are parameterized by G2 because the type alias uses - // G2::Base = G1::Scalar - let ro_consts_circuit: ROConstantsCircuit> = ROConstantsCircuit::>::default(); - - // Initialize ck for the primary - let augmented_circuit: NovaAugmentedCircuit<'_, Dual, C> = - NovaAugmentedCircuit::new(&augmented_circuit_params, None, circuit, ro_consts_circuit); - let mut cs: ShapeCS = ShapeCS::new(); - let _ = augmented_circuit.synthesize(&mut cs); - cs.r1cs_shape().digest() -} - -pub type CommitmentKey = <::CE as CommitmentEngineTrait>::CommitmentKey; -type Commitment = <::CE as CommitmentEngineTrait>::Commitment; -type CompressedCommitment = <<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait>::CompressedCommitment; -type CE = ::CE; - -// #[cfg(test)] -// mod tests { -// use core::{fmt::Write, marker::PhantomData}; - -// use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, -// SynthesisError}; use expect_test::{expect, Expect}; -// use ff::PrimeField; -// use halo2curves::bn256::Bn256; -// use traits::circuit::TrivialCircuit; - -// use self::traits::CurveCycleEquipped; -// use super::*; -// use crate::{ -// provider::{ -// non_hiding_zeromorph::ZMPCS, Bn256EngineIPA, Bn256EngineKZG, -// Bn256EngineZM, PallasEngine, Secp256k1Engine, -// }, -// traits::{evaluation::EvaluationEngineTrait, snark::default_ck_hint}, -// }; - -// type EE = provider::ipa_pc::EvaluationEngine; -// type S = spartan::snark::RelaxedR1CSSNARK; -// type SPrime = spartan::ppsnark::RelaxedR1CSSNARK; - -// #[derive(Clone, Debug, Default)] -// struct CubicCircuit { -// _p: PhantomData, -// } - -// impl StepCircuit for CubicCircuit { -// fn arity(&self) -> usize { -// 1 -// } - -// fn synthesize>( -// &self, -// cs: &mut CS, -// z: &[AllocatedNum], -// ) -> Result>, SynthesisError> { -// // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and -// `y` are // respectively the input and output. -// let x = &z[0]; -// let x_sq = x.square(cs.namespace(|| "x_sq"))?; -// let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; -// let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { -// Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + -// F::from(5u64)) })?; - -// cs.enforce( -// || "y = x^3 + x + 5", -// |lc| { -// lc + x_cu.get_variable() -// + x.get_variable() -// + CS::one() -// + CS::one() -// + CS::one() -// + CS::one() -// + CS::one() -// }, -// |lc| lc + CS::one(), -// |lc| lc + y.get_variable(), -// ); - -// Ok(vec![y]) -// } -// } - -// impl CubicCircuit { -// fn output(&self, z: &[F]) -> Vec { -// vec![z[0] * z[0] * z[0] + z[0] + F::from(5u64)] -// } -// } - -// fn test_pp_digest_with(circuit1: &T1, circuit2: -// &T2, expected: &Expect) where -// E1: CurveCycleEquipped, -// T1: StepCircuit, -// T2: StepCircuit< as Engine>::Scalar>, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// // this tests public parameters with a size specifically intended for -// a // spark-compressed SNARK -// let ck_hint1 = &*SPrime::::ck_floor(); -// let ck_hint2 = &*SPrime::, EE2>::ck_floor(); -// let pp = PublicParams::::setup(circuit1, circuit2, ck_hint1, -// ck_hint2).unwrap(); - -// let digest_str = -// pp.digest() -// .to_repr() -// .as_ref() -// .iter() -// .fold(String::new(), |mut output, b| { -// let _ = write!(output, "{b:02x}"); -// output -// }); - -// expected.assert_eq(&digest_str); -// } - -// #[test] -// fn test_pp_digest() { -// test_pp_digest_with::, EE<_>>( -// &TrivialCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["e5a6a85b77f3fb958b69722a5a21bf656fd21a6b5a012708a4b086b6be6d2b03"], -// ); - -// test_pp_digest_with::, EE<_>>( -// &CubicCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["ec707a8b822baebca114b6e61b238374f9ed358c542dd37ee73febb47832cd01"], -// ); - -// test_pp_digest_with::, EE<_>>( -// &TrivialCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["df52de22456157eb056003d4dc580a167ab8ce40a151c9944ea09a6fd0028600"], -// ); - -// test_pp_digest_with::, EE<_>>( -// &CubicCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["b3ad0f4b734c5bd2ab9e83be8ee0cbaaa120e5cd0270b51cb9d7778a33f0b801"], -// ); - -// test_pp_digest_with::, EE<_>>( -// &TrivialCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["e1feca53664212ee750da857c726b2a09bb30b2964f22ea85a19b58c9eaf5701"], -// ); -// test_pp_digest_with::, EE<_>>( -// &CubicCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["4ad6b10b6fd24fecba49f08d35bc874a6da9c77735bc0bcf4b78b1914a97e602"], -// ); -// } - -// fn test_ivc_trivial_with() -// where -// E1: CurveCycleEquipped, -// { -// let test_circuit1 = TrivialCircuit::<::Scalar>::default(); let test_circuit2 = -// TrivialCircuit::< as Engine>::Scalar>::default(); - -// // produce public parameters -// let pp = PublicParams::::setup( -// &test_circuit1, -// &test_circuit2, -// &*default_ck_hint(), -// &*default_ck_hint(), -// ) -// .unwrap(); -// let num_steps = 1; - -// // produce a recursive SNARK -// let mut recursive_snark = RecursiveSNARK::new( -// &pp, -// &test_circuit1, -// &test_circuit2, -// &[::Scalar::ZERO], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// recursive_snark -// .prove_step(&pp, &test_circuit1, &test_circuit2) -// .unwrap(); - -// // verify the recursive SNARK -// recursive_snark -// .verify( -// &pp, -// num_steps, -// &[::Scalar::ZERO], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); -// } - -// #[test] -// fn test_ivc_trivial() { -// test_ivc_trivial_with::(); -// test_ivc_trivial_with::(); -// test_ivc_trivial_with::(); -// } - -// fn test_ivc_nontrivial_with() -// where -// E1: CurveCycleEquipped, -// { -// let circuit_primary = TrivialCircuit::default(); -// let circuit_secondary = CubicCircuit::default(); - -// // produce public parameters -// let pp = PublicParams::::setup( -// &circuit_primary, -// &circuit_secondary, -// &*default_ck_hint(), -// &*default_ck_hint(), -// ) -// .unwrap(); - -// let num_steps = 3; - -// // produce a recursive SNARK -// let mut recursive_snark = RecursiveSNARK::::new( -// &pp, -// &circuit_primary, -// &circuit_secondary, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// for i in 0..num_steps { -// recursive_snark -// .prove_step(&pp, &circuit_primary, &circuit_secondary) -// .unwrap(); - -// // verify the recursive snark at each step of recursion -// recursive_snark -// .verify( -// &pp, -// i + 1, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); -// } - -// // verify the recursive SNARK -// let (zn_primary, zn_secondary) = recursive_snark -// .verify( -// &pp, -// num_steps, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// // sanity: check the claimed output with a direct computation of the -// same assert_eq!(zn_primary, vec![::Scalar::ONE]); -// let mut zn_secondary_direct = vec![ as -// Engine>::Scalar::ZERO]; for _i in 0..num_steps { -// zn_secondary_direct = -// circuit_secondary.clone().output(&zn_secondary_direct); } -// assert_eq!(zn_secondary, zn_secondary_direct); -// assert_eq!( -// zn_secondary, -// vec![ as Engine>::Scalar::from(2460515u64)] -// ); -// } - -// #[test] -// fn test_ivc_nontrivial() { -// test_ivc_nontrivial_with::(); -// test_ivc_nontrivial_with::(); -// test_ivc_nontrivial_with::(); -// } - -// fn test_ivc_nontrivial_with_some_compression_with() -// where -// E1: CurveCycleEquipped, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// S1: RelaxedR1CSSNARKTrait, -// S2: RelaxedR1CSSNARKTrait>, -// { -// let circuit_primary = TrivialCircuit::default(); -// let circuit_secondary = CubicCircuit::default(); - -// // produce public parameters, which we'll maybe use with a -// preprocessing // compressed SNARK -// let pp = PublicParams::::setup( -// &circuit_primary, -// &circuit_secondary, -// &*S1::ck_floor(), -// &*S2::ck_floor(), -// ) -// .unwrap(); - -// let num_steps = 3; - -// // produce a recursive SNARK -// let mut recursive_snark = RecursiveSNARK::::new( -// &pp, -// &circuit_primary, -// &circuit_secondary, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// for _i in 0..num_steps { -// recursive_snark -// .prove_step(&pp, &circuit_primary, &circuit_secondary) -// .unwrap(); -// } - -// // verify the recursive SNARK -// let (zn_primary, zn_secondary) = recursive_snark -// .verify( -// &pp, -// num_steps, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// // sanity: check the claimed output with a direct computation of the -// same assert_eq!(zn_primary, vec![::Scalar::ONE]); -// let mut zn_secondary_direct = vec![ as -// Engine>::Scalar::ZERO]; for _i in 0..num_steps { -// zn_secondary_direct = -// circuit_secondary.clone().output(&zn_secondary_direct); } -// assert_eq!(zn_secondary, zn_secondary_direct); -// assert_eq!( -// zn_secondary, -// vec![ as Engine>::Scalar::from(2460515u64)] -// ); - -// // run the compressed snark -// // produce the prover and verifier keys for compressed snark -// let (pk, vk) = CompressedSNARK::<_, S1, S2>::setup(&pp).unwrap(); - -// // produce a compressed SNARK -// let compressed_snark = -// CompressedSNARK::<_, S1, S2>::prove(&pp, &pk, -// &recursive_snark).unwrap(); - -// // verify the compressed SNARK -// compressed_snark -// .verify( -// &vk, -// num_steps, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); -// } - -// fn test_ivc_nontrivial_with_compression_with() -// where -// E1: CurveCycleEquipped, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// test_ivc_nontrivial_with_some_compression_with::, S<_, -// EE2>>() } - -// #[test] -// fn test_ivc_nontrivial_with_compression() { -// test_ivc_nontrivial_with_compression_with::, -// EE<_>>(); test_ivc_nontrivial_with_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_compression_with::, -// EE<_>>(); test_ivc_nontrivial_with_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_compression_with::< -// Bn256EngineKZG, -// provider::hyperkzg::EvaluationEngine, -// EE<_>, -// >(); -// } - -// fn test_ivc_nontrivial_with_spark_compression_with() -// where -// E1: CurveCycleEquipped, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// test_ivc_nontrivial_with_some_compression_with::, -// SPrime<_, EE2>>() } - -// #[test] -// fn test_ivc_nontrivial_with_spark_compression() { -// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_spark_compression_with::< -// Bn256EngineKZG, -// provider::hyperkzg::EvaluationEngine, -// EE<_>, -// >(); -// } - -// type BatchedS = spartan::batched::BatchedRelaxedR1CSSNARK; -// type BatchedSPrime = spartan::batched::BatchedRelaxedR1CSSNARK; - -// fn test_ivc_nontrivial_with_batched_compression_with() -// where -// E1: CurveCycleEquipped, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// // this tests compatibility of the batched workflow with the -// non-batched one test_ivc_nontrivial_with_some_compression_with::, BatchedS<_, EE2>>() } - -// #[test] -// fn test_ivc_nontrivial_with_batched_compression() { -// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>( ); -// test_ivc_nontrivial_with_batched_compression_with::< -// Bn256EngineKZG, -// provider::hyperkzg::EvaluationEngine, -// EE<_>, -// >(); -// } - -// fn test_ivc_nontrivial_with_batched_spark_compression_with() where -// E1: CurveCycleEquipped, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// // this tests compatibility of the batched workflow with the -// non-batched one test_ivc_nontrivial_with_some_compression_with::< -// E1, -// BatchedSPrime<_, EE1>, -// BatchedSPrime<_, EE2>, -// >() -// } - -// #[test] -// fn test_ivc_nontrivial_with_batched_spark_compression() { -// test_ivc_nontrivial_with_batched_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_spark_compression_with::< -// Bn256EngineZM, -// ZMPCS, -// EE<_>, -// >(); -// test_ivc_nontrivial_with_batched_spark_compression_with::< -// Bn256EngineKZG, -// provider::hyperkzg::EvaluationEngine, -// EE<_>, -// >(); -// } - -// fn test_ivc_nondet_with_compression_with() -// where -// E1: CurveCycleEquipped, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// // y is a non-deterministic advice representing the fifth root of the -// input at a // step. -// #[derive(Clone, Debug)] -// struct FifthRootCheckingCircuit { -// y: F, -// } - -// impl FifthRootCheckingCircuit { -// fn new(num_steps: usize) -> (Vec, Vec) { -// let mut powers = Vec::new(); -// let rng = &mut rand::rngs::OsRng; -// let mut seed = F::random(rng); -// for _i in 0..num_steps + 1 { -// seed *= seed.clone().square().square(); - -// powers.push(Self { y: seed }); -// } - -// // reverse the powers to get roots -// let roots = powers.into_iter().rev().collect::>(); -// (vec![roots[0].y], roots[1..].to_vec()) -// } -// } - -// impl StepCircuit for FifthRootCheckingCircuit -// where -// F: PrimeField, -// { -// fn arity(&self) -> usize { -// 1 -// } - -// fn synthesize>( -// &self, -// cs: &mut CS, -// z: &[AllocatedNum], -// ) -> Result>, SynthesisError> { -// let x = &z[0]; - -// // we allocate a variable and set it to the provided -// non-deterministic advice. let y = -// AllocatedNum::alloc_infallible(cs.namespace(|| "y"), || self.y); - -// // We now check if y = x^{1/5} by checking if y^5 = x -// let y_sq = y.square(cs.namespace(|| "y_sq"))?; -// let y_quad = y_sq.square(cs.namespace(|| "y_quad"))?; -// let y_pow_5 = y_quad.mul(cs.namespace(|| "y_fifth"), &y)?; - -// cs.enforce( -// || "y^5 = x", -// |lc| lc + y_pow_5.get_variable(), -// |lc| lc + CS::one(), -// |lc| lc + x.get_variable(), -// ); - -// Ok(vec![y]) -// } -// } - -// let circuit_primary = FifthRootCheckingCircuit { -// y: ::Scalar::ZERO, -// }; - -// let circuit_secondary = TrivialCircuit::default(); - -// // produce public parameters -// let pp = PublicParams::::setup( -// &circuit_primary, -// &circuit_secondary, -// &*default_ck_hint(), -// &*default_ck_hint(), -// ) -// .unwrap(); - -// let num_steps = 3; - -// // produce non-deterministic advice -// let (z0_primary, roots) = FifthRootCheckingCircuit::new(num_steps); -// let z0_secondary = vec![ as Engine>::Scalar::ZERO]; - -// // produce a recursive SNARK -// let mut recursive_snark = RecursiveSNARK::::new( -// &pp, -// &roots[0], -// &circuit_secondary, -// &z0_primary, -// &z0_secondary, -// ) -// .unwrap(); - -// for circuit_primary in roots.iter().take(num_steps) { -// recursive_snark -// .prove_step(&pp, circuit_primary, &circuit_secondary) -// .unwrap(); -// } - -// // verify the recursive SNARK -// recursive_snark -// .verify(&pp, num_steps, &z0_primary, &z0_secondary) -// .unwrap(); - -// // produce the prover and verifier keys for compressed snark -// let (pk, vk) = CompressedSNARK::<_, S, S<_, -// EE2>>::setup(&pp).unwrap(); - -// // produce a compressed SNARK -// let compressed_snark = -// CompressedSNARK::<_, S, S<_, EE2>>::prove(&pp, &pk, -// &recursive_snark).unwrap(); - -// // verify the compressed SNARK -// compressed_snark -// .verify(&vk, num_steps, &z0_primary, &z0_secondary) -// .unwrap(); -// } - -// #[test] -// fn test_ivc_nondet_with_compression() { -// test_ivc_nondet_with_compression_with::, -// EE<_>>(); test_ivc_nondet_with_compression_with::, EE<_>>(); -// test_ivc_nondet_with_compression_with::, -// EE<_>>(); test_ivc_nondet_with_compression_with::, EE<_>>(); } - -// fn test_ivc_base_with() -// where -// E1: CurveCycleEquipped, -// { -// let test_circuit1 = TrivialCircuit::<::Scalar>::default(); let test_circuit2 = -// CubicCircuit::< as Engine>::Scalar>::default(); - -// // produce public parameters -// let pp = PublicParams::::setup( -// &test_circuit1, -// &test_circuit2, -// &*default_ck_hint(), -// &*default_ck_hint(), -// ) -// .unwrap(); - -// let num_steps = 1; - -// // produce a recursive SNARK -// let mut recursive_snark = RecursiveSNARK::::new( -// &pp, -// &test_circuit1, -// &test_circuit2, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// // produce a recursive SNARK -// recursive_snark -// .prove_step(&pp, &test_circuit1, &test_circuit2) -// .unwrap(); - -// // verify the recursive SNARK -// let (zn_primary, zn_secondary) = recursive_snark -// .verify( -// &pp, -// num_steps, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// assert_eq!(zn_primary, vec![::Scalar::ONE]); -// assert_eq!(zn_secondary, vec![ as -// Engine>::Scalar::from(5u64)]); } - -// #[test] -// fn test_ivc_base() { -// test_ivc_base_with::(); -// test_ivc_base_with::(); -// test_ivc_base_with::(); -// } - -// fn test_setup_with() { -// #[derive(Clone, Debug, Default)] -// struct CircuitWithInputize { -// _p: PhantomData, -// } - -// impl StepCircuit for CircuitWithInputize { -// fn arity(&self) -> usize { -// 1 -// } - -// fn synthesize>( -// &self, -// cs: &mut CS, -// z: &[AllocatedNum], -// ) -> Result>, SynthesisError> { -// let x = &z[0]; -// // a simplified version of this test would only have one -// input // but beside the Nova Public parameter requirement for -// a num_io = 2, being // probed in this test, we *also* require -// num_io to be even, so // negative testing requires at least 4 -// inputs let y = x.square(cs.namespace(|| "x_sq"))?; -// y.inputize(cs.namespace(|| "y"))?; // inputize y -// let y2 = x.square(cs.namespace(|| "x_sq2"))?; -// y2.inputize(cs.namespace(|| "y2"))?; // inputize y2 -// let y3 = x.square(cs.namespace(|| "x_sq3"))?; -// y3.inputize(cs.namespace(|| "y3"))?; // inputize y2 -// let y4 = x.square(cs.namespace(|| "x_sq4"))?; -// y4.inputize(cs.namespace(|| "y4"))?; // inputize y2 -// Ok(vec![y, y2, y3, y4]) -// } -// } - -// // produce public parameters with trivial secondary -// let circuit = CircuitWithInputize::<::Scalar>::default(); let pp = PublicParams::::setup( -// &circuit, -// &TrivialCircuit::default(), -// &*default_ck_hint(), -// &*default_ck_hint(), -// ); -// assert!(pp.is_err()); -// assert_eq!(pp.err(), Some(NovaError::InvalidStepCircuitIO)); - -// // produce public parameters with the trivial primary -// let circuit = CircuitWithInputize::< as -// Engine>::Scalar>::default(); let pp = PublicParams::::setup( -// &TrivialCircuit::default(), -// &circuit, -// &*default_ck_hint(), -// &*default_ck_hint(), -// ); -// assert!(pp.is_err()); -// assert_eq!(pp.err(), Some(NovaError::InvalidStepCircuitIO)); -// } - -// #[test] -// fn test_setup() { -// test_setup_with::(); -// } -// } From a4b14b6d055f7521f1e2df356cd4e11ea898bfbf Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 06:11:44 -0700 Subject: [PATCH 16/51] cleanup --- frontend/src/noir.rs | 201 ++++++++++++------------------------------- 1 file changed, 57 insertions(+), 144 deletions(-) diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 7160c91..91f1921 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -66,8 +66,6 @@ impl StepCircuit for NoirProgram { fn circuit_index(&self) -> usize { self.index } - #[allow(clippy::too_many_lines)] - #[allow(clippy::too_many_lines)] fn synthesize>( &self, cs: &mut CS, @@ -75,20 +73,10 @@ impl StepCircuit for NoirProgram { z: &[AllocatedNum], ) -> Result<(Option>, Vec>), SynthesisError> { trace!("Synthesizing NoirProgram with {} inputs", z.len()); - trace!("Inner pc: {:?}", pc); - trace!("Circuit index: {}", self.index); - trace!("ABI parameters: {:?}", self.abi.parameters); - trace!("ABI return type: {:?}", self.abi.return_type); - trace!("Private parameters: {:?}", self.circuit().private_parameters); - trace!("Public parameters: {:?}", self.circuit().public_parameters); - trace!("Return values: {:?}", self.circuit().return_values); - - // Create a map to track allocated variables for the cs - let mut allocated_vars: HashMap> = HashMap::new(); - // Initialize ACVM and populate witness map from inputs - let mut acvm_witness_map = if let Some(inputs) = &self.witness { - trace!("Witness is present, initializing ACVM"); + // Create variable tracker and initialize ACVM + let mut allocated_vars: HashMap> = HashMap::new(); + let acvm_witness_map = if let Some(inputs) = &self.witness { let mut acvm = ACVM::new( &StubbedBlackBoxSolver(false), &self.circuit().opcodes, @@ -97,212 +85,137 @@ impl StepCircuit for NoirProgram { &[], ); - // Convert InputMap to ACVM witness map - // We need to convert from Scalar to GenericFieldElement - // TODO: Shouldn't clone here, but it works for now. - let mut inputs = inputs.private_inputs.clone(); - inputs.insert( + // Prepare inputs with registers + let mut inputs_with_registers = inputs.private_inputs.clone(); + inputs_with_registers.insert( "registers".to_string(), InputValue::Vec( z.iter() - .map(|z| InputValue::Field(convert_to_acir_field(z.get_value().unwrap()))) + .filter_map(|var| var.get_value().map(|v| InputValue::Field(convert_to_acir_field(v)))) .collect(), ), ); - if let Ok(encoded_map) = self.abi.encode(&inputs, None) { + + // Encode inputs through ABI + if let Ok(encoded_map) = self.abi.encode(&inputs_with_registers, None) { for (witness, value) in encoded_map { - // Convert FieldElement to GenericFieldElement acvm.overwrite_witness(witness, value); } } - // Execute ACVM to get witness values + // Solve and get resulting witness map trace!("Executing ACVM solve..."); - let status = acvm.solve(); - trace!("ACVM solve status: {:?}", status); - - let witness_map = acvm.finalize(); - Some(witness_map) + acvm.solve(); + Some(acvm.finalize()) } else { - trace!("No witness provided, skipping ACVM initialization"); None }; - // Allocate public variables from z + // Allocate variables from public inputs (z) for (i, witness) in self.circuit().public_parameters.0.iter().enumerate() { if i < z.len() { - trace!("Allocating public input {} (witness {:?}) from z", i, witness); allocated_vars.insert(*witness, z[i].clone()); } } - // Allocate private variables - for &witness in &self.circuit().private_parameters { - if !allocated_vars.contains_key(&witness) { - let value = acvm_witness_map - .as_ref() - .and_then(|map| map.get(&witness)) - .map(|&v| convert_to_halo2_field(v)); - - trace!("Allocating private input (witness {:?}) with value: {:?}", witness, value); - let var = AllocatedNum::alloc( - &mut cs.namespace(|| format!("private_input_{}", witness.as_usize())), - || Ok(value.unwrap_or_else(Scalar::zero)), - )?; - - allocated_vars.insert(witness, var); - } - } - - // Helper function to get or create a variable for a witness + // Helper for getting/creating variables let get_var = |witness: &Witness, - allocated_vars: &mut HashMap>, + vars: &mut HashMap>, cs: &mut CS, - gate_idx: usize| - -> Result { - if let Some(var) = allocated_vars.get(witness) { - trace!("Using existing variable for witness {:?}", witness); - Ok(var.get_variable()) + idx: usize| { + if let Some(var) = vars.get(witness) { + Ok::<_, SynthesisError>(var.get_variable()) } else { - trace!("Allocating new variable for witness {:?} in gate {}", witness, gate_idx); - - // Get value from ACVM if available let value = acvm_witness_map .as_ref() .and_then(|map| map.get(witness)) .map(|&v| convert_to_halo2_field(v)); - let var = AllocatedNum::alloc( - cs.namespace(|| format!("aux_{}_w{}", gate_idx, witness.as_usize())), - || Ok(value.unwrap_or_else(Scalar::zero)), - )?; - - allocated_vars.insert(*witness, var.clone()); - trace!("Added auxiliary witness {:?} to allocated_vars", witness); + let var = AllocatedNum::alloc(cs.namespace(|| format!("w{}", witness.as_usize())), || { + Ok(value.unwrap_or_else(Scalar::zero)) + })?; + vars.insert(*witness, var.clone()); Ok(var.get_variable()) } }; // Process gates - trace!("Processing {} gates", self.circuit().opcodes.len()); - for (gate_idx, opcode) in self.circuit().opcodes.iter().enumerate() { + for (idx, opcode) in self.circuit().opcodes.iter().enumerate() { if let Opcode::AssertZero(gate) = opcode { - // Initialize empty linear combinations for each part of our R1CS constraint let mut left_terms = LinearCombination::zero(); let mut right_terms = LinearCombination::zero(); let mut final_terms = LinearCombination::zero(); - // Process multiplication terms (these form the A and B matrices in R1CS) + // Process multiplication terms for mul_term in &gate.mul_terms { - let coeff = convert_to_halo2_field(mul_term.0); - let left_var = get_var(&mul_term.1, &mut allocated_vars, cs, gate_idx)?; - let right_var = get_var(&mul_term.2, &mut allocated_vars, cs, gate_idx)?; - - // Build Az (left terms) with coefficient - left_terms = left_terms + (coeff, left_var); - // Build Bz (right terms) with coefficient 1 + let left_var = get_var(&mul_term.1, &mut allocated_vars, cs, idx)?; + let right_var = get_var(&mul_term.2, &mut allocated_vars, cs, idx)?; + left_terms = left_terms + (convert_to_halo2_field(mul_term.0), left_var); right_terms = right_terms + (Scalar::one(), right_var); } - // Process addition terms (these contribute to the C matrix in R1CS) + // Process addition terms for add_term in &gate.linear_combinations { - let coeff = convert_to_halo2_field(add_term.0); - let var = get_var(&add_term.1, &mut allocated_vars, cs, gate_idx)?; - final_terms = final_terms + (coeff, var); + let var = get_var(&add_term.1, &mut allocated_vars, cs, idx)?; + final_terms = final_terms + (convert_to_halo2_field(add_term.0), var); } - // Handle constant term if present + // Handle constant term if !gate.q_c.is_zero() { - let const_coeff = convert_to_halo2_field(gate.q_c); - // Negate the constant term since we're moving it to the other side of the equation - final_terms = final_terms - (const_coeff, Variable::new_unchecked(Index::Input(0))); + final_terms = final_terms + - (convert_to_halo2_field(gate.q_c), Variable::new_unchecked(Index::Input(0))); } - // Enforce the R1CS constraint: Az ∘ Bz = Cz - cs.enforce( - || format!("gate_{gate_idx}"), - |_| left_terms.clone(), - |_| right_terms.clone(), - |_| final_terms, - ); + // Enforce constraint + cs.enforce(|| format!("g{}", idx), |_| left_terms, |_| right_terms, |_| final_terms); } else { - panic!("non-AssertZero gate {} of type {:?}", gate_idx, opcode); + panic!("non-AssertZero gate {} of type {:?}", idx, opcode); } } // Prepare return values - trace!("Preparing return values"); let mut return_values = vec![]; - - // Ensure all return witnesses have allocated variables - for (i, ret) in self.circuit().return_values.0.iter().enumerate() { + for ret in &self.circuit().return_values.0 { + // Ensure return witness has an allocated variable if !allocated_vars.contains_key(ret) { - trace!("Return value {} (witness {:?}) not yet allocated, creating", i, ret); - - // Get value from ACVM if available let value = acvm_witness_map .as_ref() .and_then(|map| map.get(ret)) .map(|&v| convert_to_halo2_field(v)); - let var = AllocatedNum::alloc(&mut cs.namespace(|| format!("return_value_{}", i)), || { + let var = AllocatedNum::alloc(cs.namespace(|| format!("ret{}", ret.as_usize())), || { Ok(value.unwrap_or_else(Scalar::zero)) })?; allocated_vars.insert(*ret, var); } - - trace!("Adding return value {} (witness {:?}) to results", i, ret); return_values.push(allocated_vars[ret].clone()); } - // Extract return structure from ABI - if let Some(return_type) = &self.abi.return_type { - if let AbiType::Struct { fields, .. } = &return_type.abi_type { - // Find the registers field in the struct - let (registers_length, next_pc_index) = fields + // Extract return structure (registers and next_pc) + if let Some(noirc_abi::AbiReturnType { abi_type: AbiType::Struct { fields, .. }, .. }) = + &self.abi.return_type + { + let registers_length = fields .iter() .find(|(name, _)| name == "registers") .map(|(_, typ)| match typ { - AbiType::Array { length, .. } => (*length as usize, *length as usize), // next_pc follows registers - _ => panic!("Expected registers to be an array type, found {:?}", typ), + AbiType::Array { length, .. } => *length as usize, + _ => panic!("Expected registers to be an array type"), }) - .unwrap_or_else(|| panic!("Expected 'registers' field in return struct")); - - trace!( - "Return struct has registers_length={}, next_pc_index={}", - registers_length, - next_pc_index - ); - - if next_pc_index < return_values.len() { - // Extract next_pc and registers - let next_pc = Some(return_values[next_pc_index].clone()); - let registers = return_values[..registers_length].to_vec(); - - trace!( - "Returning next_pc at index {} and {} register values", - next_pc_index, - registers.len() - ); - return Ok((next_pc, registers)); - } else { - trace!( - "ERROR: next_pc_index {} out of bounds for return_values length {}", - next_pc_index, - return_values.len() - ); - return Err(SynthesisError::Unsatisfiable); - } - } else { - trace!("Return type is not a struct: {:?}", return_type.abi_type); - return Err(SynthesisError::Unsatisfiable); + .unwrap_or_else(|| panic!("Missing 'registers' field")); + + let next_pc_index = registers_length; + + if next_pc_index < return_values.len() { + let next_pc = Some(return_values[next_pc_index].clone()); + let registers = return_values[..registers_length].to_vec(); + return Ok((next_pc, registers)); } - } else { - trace!("No return type specified"); - return Err(SynthesisError::Unsatisfiable); } + + Err(SynthesisError::Unsatisfiable) } } From 41a22f75fbefa78d6cc718628bec77d6f814c234 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 06:19:52 -0700 Subject: [PATCH 17/51] feat: program counter getter --- prover/src/supernova/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/prover/src/supernova/mod.rs b/prover/src/supernova/mod.rs index a7b20a5..ad8a65d 100644 --- a/prover/src/supernova/mod.rs +++ b/prover/src/supernova/mod.rs @@ -760,6 +760,9 @@ where E1: CurveCycleEquipped /// Outputs of the primary circuits pub fn zi_primary(&self) -> &Vec { &self.zi_primary } + /// Current program counter + pub fn program_counter(&self) -> E1::Scalar { self.program_counter } + /// executing a step of the incremental computation #[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, name = "supernova::RecursiveSNARK::prove_step")] From 7a9de5f1bdbb93975ad170f42ed64624dcffcdd4 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 06:46:06 -0700 Subject: [PATCH 18/51] cleanup --- frontend/src/noir.rs | 26 +++++++------- frontend/src/program/mod.rs | 68 ++++++++++++++++++++++++++----------- 2 files changed, 61 insertions(+), 33 deletions(-) diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 91f1921..4180764 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -16,7 +16,7 @@ use bellpepper_core::{ }; use client_side_prover::supernova::StepCircuit; use ff::PrimeField; -use noirc_abi::{input_parser::InputValue, Abi, AbiParameter, AbiType, AbiVisibility}; +use noirc_abi::{input_parser::InputValue, Abi, AbiType}; use tracing::trace; use super::*; @@ -66,6 +66,7 @@ impl StepCircuit for NoirProgram { fn circuit_index(&self) -> usize { self.index } + #[allow(clippy::too_many_lines)] fn synthesize>( &self, cs: &mut CS, @@ -76,7 +77,7 @@ impl StepCircuit for NoirProgram { // Create variable tracker and initialize ACVM let mut allocated_vars: HashMap> = HashMap::new(); - let acvm_witness_map = if let Some(inputs) = &self.witness { + let acvm_witness_map = self.witness.as_ref().map(|inputs| { let mut acvm = ACVM::new( &StubbedBlackBoxSolver(false), &self.circuit().opcodes, @@ -86,6 +87,7 @@ impl StepCircuit for NoirProgram { ); // Prepare inputs with registers + // TODO: Can we reove this clone? let mut inputs_with_registers = inputs.private_inputs.clone(); inputs_with_registers.insert( "registers".to_string(), @@ -106,10 +108,8 @@ impl StepCircuit for NoirProgram { // Solve and get resulting witness map trace!("Executing ACVM solve..."); acvm.solve(); - Some(acvm.finalize()) - } else { - None - }; + acvm.finalize() + }); // Allocate variables from public inputs (z) for (i, witness) in self.circuit().public_parameters.0.iter().enumerate() { @@ -121,8 +121,7 @@ impl StepCircuit for NoirProgram { // Helper for getting/creating variables let get_var = |witness: &Witness, vars: &mut HashMap>, - cs: &mut CS, - idx: usize| { + cs: &mut CS| { if let Some(var) = vars.get(witness) { Ok::<_, SynthesisError>(var.get_variable()) } else { @@ -149,15 +148,15 @@ impl StepCircuit for NoirProgram { // Process multiplication terms for mul_term in &gate.mul_terms { - let left_var = get_var(&mul_term.1, &mut allocated_vars, cs, idx)?; - let right_var = get_var(&mul_term.2, &mut allocated_vars, cs, idx)?; + let left_var = get_var(&mul_term.1, &mut allocated_vars, cs)?; + let right_var = get_var(&mul_term.2, &mut allocated_vars, cs)?; left_terms = left_terms + (convert_to_halo2_field(mul_term.0), left_var); right_terms = right_terms + (Scalar::one(), right_var); } // Process addition terms for add_term in &gate.linear_combinations { - let var = get_var(&add_term.1, &mut allocated_vars, cs, idx)?; + let var = get_var(&add_term.1, &mut allocated_vars, cs)?; final_terms = final_terms + (convert_to_halo2_field(add_term.0), var); } @@ -168,9 +167,9 @@ impl StepCircuit for NoirProgram { } // Enforce constraint - cs.enforce(|| format!("g{}", idx), |_| left_terms, |_| right_terms, |_| final_terms); + cs.enforce(|| format!("g{idx}"), |_| left_terms, |_| right_terms, |_| final_terms); } else { - panic!("non-AssertZero gate {} of type {:?}", idx, opcode); + panic!("non-AssertZero gate {idx} of type {opcode:?}"); } } @@ -197,6 +196,7 @@ impl StepCircuit for NoirProgram { if let Some(noirc_abi::AbiReturnType { abi_type: AbiType::Struct { fields, .. }, .. }) = &self.abi.return_type { + // TODO: This should be an error. let registers_length = fields .iter() .find(|(name, _)| name == "registers") diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index 9e4c6e9..37fea54 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -1,8 +1,6 @@ -use std::collections::HashMap; - use client_side_prover::{ supernova::{NonUniformCircuit, RecursiveSNARK}, - traits::{snark::default_ck_hint, Dual}, + traits::snark::default_ck_hint, }; use halo2curves::grumpkin; use noirc_abi::InputMap; @@ -26,6 +24,7 @@ pub struct SwitchboardInputs { pub pc: usize, } +// TODO: Use a mapping of program counter to circuit index #[derive(Debug, Clone)] pub struct Switchboard { pub circuits: Vec, @@ -74,8 +73,7 @@ pub fn run(switchboard: &Switchboard) -> Result, ProofError> info!("Starting SuperNova program..."); info!("Setting up PublicParams..."); - // TODO: This is stupid to do, but I need to get around the original setting of the witness. - // Having separate setup is the way (we already know this) + // Create a witness-free clone for setup let mut memory_clone = switchboard.clone(); memory_clone.circuits.iter_mut().for_each(|circ| circ.witness = None); let public_params = PublicParams::setup(&memory_clone, &*default_ck_hint(), &*default_ck_hint()); @@ -83,44 +81,74 @@ pub fn run(switchboard: &Switchboard) -> Result, ProofError> let z0_primary = &switchboard.public_input; let z0_secondary = &[grumpkin::Fr::ZERO]; - let mut recursive_snark_option = None; - let time = std::time::Instant::now(); + + // Initialize recursive SNARK as None + let mut recursive_snark: Option> = None; + for (idx, switchboard_witness) in switchboard.switchboard_inputs.iter().enumerate() { info!("Step {} of {} witnesses", idx, switchboard.switchboard_inputs.len()); - debug!("Program counter = {:?}", switchboard_witness.pc); - let mut circuit_primary = switchboard.primary_circuit(switchboard_witness.pc); + // Determine program counter based on current state + let program_counter = match &recursive_snark { + None => switchboard.initial_circuit_index(), + Some(snark) => { + // TODO: I honestly am surprised that the prover chose to use a usize instead of a field + // element for the PC, it would be cleaner to do otherwise + let pc_bytes = snark.program_counter().to_bytes(); + + // Check if higher bytes are non-zero (which would be truncated in usize conversion) + let usize_size = std::mem::size_of::(); + if pc_bytes[usize_size..].iter().any(|&b| b != 0) { + return Err(ProofError::Other("Program counter value too large for usize".into())); + } + + // Convert the relevant bytes to usize (using little-endian order) + let mut pc_value = 0usize; + for (i, &b) in pc_bytes.iter().take(usize_size).enumerate() { + pc_value |= (b as usize) << (i * 8); + } + + pc_value + }, + }; + + debug!("Program counter = {:?}", program_counter); + + // Prepare circuits for this step + let mut circuit_primary = switchboard.primary_circuit(program_counter); circuit_primary.witness = Some(switchboard_witness.clone()); let circuit_secondary = switchboard.secondary_circuit(); - let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { - RecursiveSNARK::new( + // Initialize or update the recursive SNARK + if recursive_snark.is_none() { + // Initialize a new recursive SNARK for the first step + recursive_snark = Some(RecursiveSNARK::new( &public_params, switchboard, &circuit_primary, &circuit_secondary, z0_primary, z0_secondary, - ) - })?; + )?); + } + // Prove the next step info!("Proving single step..."); - recursive_snark.prove_step(&public_params, &circuit_primary, &circuit_secondary)?; + let snark = recursive_snark.as_mut().unwrap(); + snark.prove_step(&public_params, &circuit_primary, &circuit_secondary)?; info!("Done proving single step..."); // TODO: For some reason this is failing // info!("Verifying single step..."); - // recursive_snark.verify(&public_params, recursive_snark.z0_primary(), z0_secondary)?; + // snark.verify(&public_params, snark.z0_primary(), z0_secondary)?; // info!("Single step verification done"); - - recursive_snark_option = Some(Ok(recursive_snark)); } - // Note, this unwrap cannot fail - let recursive_snark = recursive_snark_option.unwrap(); + trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); - Ok(recursive_snark?) + // Return the completed recursive SNARK + Ok(recursive_snark.unwrap()) } // /// Compresses a proof without performing the setup step. From 9e6f58b29de7f06f0a37c56657297aabd7d728f0 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 06:50:58 -0700 Subject: [PATCH 19/51] refactor: remove `SwitchboardInputs` --- frontend/src/noir.rs | 9 ++-- frontend/src/program/mod.rs | 8 +-- frontend/tests/ivc/mod.rs | 103 +++++++++--------------------------- 3 files changed, 30 insertions(+), 90 deletions(-) diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 4180764..87b9be5 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -16,11 +16,10 @@ use bellpepper_core::{ }; use client_side_prover::supernova::StepCircuit; use ff::PrimeField; -use noirc_abi::{input_parser::InputValue, Abi, AbiType}; +use noirc_abi::{input_parser::InputValue, Abi, AbiType, InputMap}; use tracing::trace; use super::*; -use crate::program::SwitchboardInputs; // TODO: If we deserialize more here and get metadata, we could more easily look at witnesses, etc. // Especially if we want to output a constraint to the PC. Using the abi would be handy for @@ -41,7 +40,7 @@ pub struct NoirProgram { pub names: Vec, pub brillig_names: Vec, #[serde(skip)] - pub witness: Option, + pub witness: Option, #[serde(skip)] pub index: usize, } @@ -55,7 +54,7 @@ impl NoirProgram { &self.bytecode.unconstrained_functions } - pub fn set_inputs(&mut self, switchboard_witness: SwitchboardInputs) { + pub fn set_inputs(&mut self, switchboard_witness: InputMap) { self.witness = Some(switchboard_witness); } } @@ -88,7 +87,7 @@ impl StepCircuit for NoirProgram { // Prepare inputs with registers // TODO: Can we reove this clone? - let mut inputs_with_registers = inputs.private_inputs.clone(); + let mut inputs_with_registers = inputs.clone(); inputs_with_registers.insert( "registers".to_string(), InputValue::Vec( diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index 37fea54..15f3ed5 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -18,19 +18,13 @@ pub mod data; /// Compressed proof type pub type CompressedProof = FoldingProof, Scalar>; -#[derive(Debug, Clone)] -pub struct SwitchboardInputs { - pub private_inputs: InputMap, - pub pc: usize, -} - // TODO: Use a mapping of program counter to circuit index #[derive(Debug, Clone)] pub struct Switchboard { pub circuits: Vec, pub public_input: Vec, pub initial_circuit_index: usize, - pub switchboard_inputs: Vec, + pub switchboard_inputs: Vec, } impl NonUniformCircuit for Switchboard { diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 84516a3..7c593cc 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -1,63 +1,17 @@ -use std::collections::HashMap; - -use acvm::acir::{acir_field::GenericFieldElement, circuit::Opcode}; -use client_side_prover_frontend::program::{run, Switchboard, SwitchboardInputs}; +use acvm::acir::acir_field::GenericFieldElement; +use client_side_prover_frontend::program::{run, Switchboard}; use noirc_abi::{input_parser::InputValue, InputMap}; -use tracing::trace; use super::*; -fn debug_acir_circuit(circuit: &NoirProgram) { - trace!("=== ACIR Circuit Debug ==="); - trace!("ABI: {:?}", circuit.abi); - - trace!("Private parameters: {:?}", circuit.circuit().private_parameters); - trace!("Public parameters: {:?}", circuit.circuit().public_parameters); - trace!("Return values: {:?}", circuit.circuit().return_values); - - trace!("ACIR Opcodes:"); - for (i, op) in circuit.circuit().opcodes.iter().enumerate() { - if let Opcode::AssertZero(gate) = op { - trace!( - " Gate {}: mul_terms={:?}, linear_combinations={:?}, q_c={:?}", - i, - gate.mul_terms, - gate.linear_combinations, - gate.q_c - ); - } else { - trace!(" Opcode {}: {:?}", i, op); - } - } - trace!("=== End Debug ==="); -} - #[test] #[traced_test] fn test_ivc() { let circuit = square_zeroth(); let switchboard_inputs = vec![ - SwitchboardInputs { - private_inputs: InputMap::from([( - "next_pc".to_string(), - InputValue::Field(GenericFieldElement::from(0_u64)), - )]), - pc: 0, - }, - SwitchboardInputs { - private_inputs: InputMap::from([( - "next_pc".to_string(), - InputValue::Field(GenericFieldElement::from(0_u64)), - )]), - pc: 0, - }, - SwitchboardInputs { - private_inputs: InputMap::from([( - "next_pc".to_string(), - InputValue::Field(GenericFieldElement::from(0_u64)), - )]), - pc: 0, - }, + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), ]; let memory = Switchboard { @@ -77,34 +31,27 @@ fn test_ivc() { #[traced_test] fn test_ivc_private_inputs() { let circuit = add_external(); - debug_acir_circuit(&circuit); let switchboard_inputs = vec![ - SwitchboardInputs { - private_inputs: InputMap::from([ - ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), - ( - "external".to_string(), - InputValue::Vec(vec![ - InputValue::Field(GenericFieldElement::from(3_u64)), - InputValue::Field(GenericFieldElement::from(3_u64)), - ]), - ), - ]), - pc: 0, - }, - SwitchboardInputs { - private_inputs: InputMap::from([ - ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), - ( - "external".to_string(), - InputValue::Vec(vec![ - InputValue::Field(GenericFieldElement::from(420_u64)), - InputValue::Field(GenericFieldElement::from(69_u64)), - ]), - ), - ]), - pc: 0, - }, + InputMap::from([ + ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), + ( + "external".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(3_u64)), + InputValue::Field(GenericFieldElement::from(3_u64)), + ]), + ), + ]), + InputMap::from([ + ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), + ( + "external".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(420_u64)), + InputValue::Field(GenericFieldElement::from(69_u64)), + ]), + ), + ]), ]; let memory = Switchboard { From 30c1e45a833ab8359b153fde6570cefa745d29d4 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 07:06:08 -0700 Subject: [PATCH 20/51] cleanup --- frontend/src/noir.rs | 1 + frontend/tests/ivc/mod.rs | 96 ++++++++++++++++++--------------------- frontend/tests/lib.rs | 4 +- 3 files changed, 46 insertions(+), 55 deletions(-) diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 87b9be5..441a9cb 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -209,6 +209,7 @@ impl StepCircuit for NoirProgram { if next_pc_index < return_values.len() { let next_pc = Some(return_values[next_pc_index].clone()); + dbg!(&next_pc); let registers = return_values[..registers_length].to_vec(); return Ok((next_pc, registers)); } diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 7c593cc..c3a549f 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -7,7 +7,6 @@ use super::*; #[test] #[traced_test] fn test_ivc() { - let circuit = square_zeroth(); let switchboard_inputs = vec![ InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), @@ -15,7 +14,7 @@ fn test_ivc() { ]; let memory = Switchboard { - circuits: vec![circuit], + circuits: vec![square_zeroth()], public_input: vec![Scalar::from(2), Scalar::from(1)], initial_circuit_index: 0, switchboard_inputs, @@ -30,7 +29,6 @@ fn test_ivc() { #[test] #[traced_test] fn test_ivc_private_inputs() { - let circuit = add_external(); let switchboard_inputs = vec![ InputMap::from([ ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), @@ -55,7 +53,7 @@ fn test_ivc_private_inputs() { ]; let memory = Switchboard { - circuits: vec![circuit], + circuits: vec![add_external()], public_input: vec![Scalar::from(1), Scalar::from(2)], initial_circuit_index: 0, switchboard_inputs, @@ -68,54 +66,46 @@ fn test_ivc_private_inputs() { assert_eq!(zi[1], Scalar::from(74)); } -// #[test] -// #[traced_test] -// fn test_mock_noir_nivc() { -// let mut add_external = NoirProgram::new(ADD_EXTERNAL); -// add_external.set_private_inputs(vec![Scalar::from(5), Scalar::from(7)]); -// let add_external = -// NoirRomCircuit { circuit: add_external, circuit_index: 0, rom_size: 3 }; - -// // TODO: The issue is the private inputs need to be an empty vector or else this isn't computed -// at // all. Be careful, this is insanely touchy and I hate that it is this way. -// let mut square_zeroth = NoirProgram::new(SQUARE_ZEROTH); -// square_zeroth.set_private_inputs(vec![]); -// let square_zeroth = -// NoirRomCircuit { circuit: square_zeroth, circuit_index: 1, rom_size: 3 }; -// let mut swap_memory = NoirProgram::new(SWAP_MEMORY); -// swap_memory.set_private_inputs(vec![]); -// let swap_memory = -// NoirRomCircuit { circuit: swap_memory, circuit_index: 2, rom_size: 3 }; +#[test] +#[traced_test] +fn test_mock_noir_nivc() { + let switchboard_inputs = vec![ + InputMap::from([ + ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(1_u64))), + ( + "external".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(3_u64)), + InputValue::Field(GenericFieldElement::from(3_u64)), + ]), + ), + ]), + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(2_u64)))]), + InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(-1_i128)), + )]), + ]; -// let memory = NoirMemory { -// circuits: vec![add_external, square_zeroth, swap_memory], -// rom: vec![0, 1, 2], -// public_input: vec![ -// Scalar::from(1), // Actual input -// Scalar::from(2), // Actual input -// Scalar::from(0), // PC -// Scalar::from(0), // ROM -// Scalar::from(1), // ROM -// Scalar::from(2), // ROM -// ], -// }; + let memory = Switchboard { + circuits: vec![add_external(), square_zeroth(), swap_memory()], + public_input: vec![Scalar::from(1), Scalar::from(2)], + initial_circuit_index: 0, + switchboard_inputs, + }; -// let snark = run(&memory).unwrap(); -// let zi = snark.zi_primary(); -// dbg!(zi); -// // First fold: -// // step_out[0] == 1 + 5 == 6 -// // step_out[1] == 2 + 7 == 9 -// // Second fold: -// // step_out[0] == 6 ** 2 == 36 -// // step_out[1] == 9 -// // Third fold: -// // step_out[0] == 9 -// // step_out[1] == 36 -// assert_eq!(zi[0], Scalar::from(9)); -// assert_eq!(zi[1], Scalar::from(36)); -// assert_eq!(zi[2], Scalar::from(3)); -// assert_eq!(zi[3], Scalar::from(0)); -// assert_eq!(zi[4], Scalar::from(1)); -// assert_eq!(zi[5], Scalar::from(2)); -// } + let snark = run(&memory).unwrap(); + let zi = snark.zi_primary(); + dbg!(zi); + // First fold: + // step_out[0] == 1 + 5 == 6 + // step_out[1] == 2 + 7 == 9 + // Second fold: + // step_out[0] == 6 ** 2 == 36 + // step_out[1] == 9 + // Third fold: + // step_out[0] == 9 + // step_out[1] == 36 + assert_eq!(zi[0], Scalar::from(9)); + assert_eq!(zi[1], Scalar::from(36)); +} diff --git a/frontend/tests/lib.rs b/frontend/tests/lib.rs index 4e70d39..d5e0404 100644 --- a/frontend/tests/lib.rs +++ b/frontend/tests/lib.rs @@ -11,12 +11,12 @@ pub fn add_external() -> NoirProgram { pub fn square_zeroth() -> NoirProgram { let bytecode = - std::fs::read("..//target/square_zeroth.json").expect("Failed to read Noir program file"); + std::fs::read("../target/square_zeroth.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } pub fn swap_memory() -> NoirProgram { let bytecode = - std::fs::read("../arget/swap_memory.json").expect("Failed to read Noir program file"); + std::fs::read("../target/swap_memory.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } From faca08035e2eaa6362f063a2b2fb8d68d4e98347 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 07:16:20 -0700 Subject: [PATCH 21/51] working NIVC --- frontend/src/program/mod.rs | 23 ++++++++++++++++++----- frontend/tests/ivc/mod.rs | 34 +++++++++++++++++----------------- 2 files changed, 35 insertions(+), 22 deletions(-) diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index 15f3ed5..f47e1d5 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -21,10 +21,23 @@ pub type CompressedProof = FoldingProof, Scalar>; // TODO: Use a mapping of program counter to circuit index #[derive(Debug, Clone)] pub struct Switchboard { - pub circuits: Vec, - pub public_input: Vec, - pub initial_circuit_index: usize, - pub switchboard_inputs: Vec, + pub(crate) circuits: Vec, + pub(crate) public_input: Vec, + pub(crate) initial_circuit_index: usize, + pub(crate) switchboard_inputs: Vec, +} + +impl Switchboard { + pub fn new( + mut circuits: Vec, + switchboard_inputs: Vec, + public_input: Vec, + initial_circuit_index: usize, + ) -> Self { + // Set the index of each circuit given the order they are passed in + circuits.iter_mut().enumerate().for_each(|(i, c)| c.index = i); + Self { circuits, public_input, initial_circuit_index, switchboard_inputs } + } } impl NonUniformCircuit for Switchboard { @@ -81,7 +94,7 @@ pub fn run(switchboard: &Switchboard) -> Result, ProofError> let mut recursive_snark: Option> = None; for (idx, switchboard_witness) in switchboard.switchboard_inputs.iter().enumerate() { - info!("Step {} of {} witnesses", idx, switchboard.switchboard_inputs.len()); + info!("Step {} of {} witnesses", idx + 1, switchboard.switchboard_inputs.len()); // Determine program counter based on current state let program_counter = match &recursive_snark { diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index c3a549f..4bbd7d9 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -13,12 +13,12 @@ fn test_ivc() { InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), ]; - let memory = Switchboard { - circuits: vec![square_zeroth()], - public_input: vec![Scalar::from(2), Scalar::from(1)], - initial_circuit_index: 0, + let memory = Switchboard::new( + vec![square_zeroth()], switchboard_inputs, - }; + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); let snark = run(&memory).unwrap(); dbg!(&snark.zi_primary()); @@ -52,12 +52,12 @@ fn test_ivc_private_inputs() { ]), ]; - let memory = Switchboard { - circuits: vec![add_external()], - public_input: vec![Scalar::from(1), Scalar::from(2)], - initial_circuit_index: 0, + let memory = Switchboard::new( + vec![add_external()], switchboard_inputs, - }; + vec![Scalar::from(1), Scalar::from(2)], + 0, + ); let snark = run(&memory).unwrap(); let zi = snark.zi_primary(); @@ -75,8 +75,8 @@ fn test_mock_noir_nivc() { ( "external".to_string(), InputValue::Vec(vec![ - InputValue::Field(GenericFieldElement::from(3_u64)), - InputValue::Field(GenericFieldElement::from(3_u64)), + InputValue::Field(GenericFieldElement::from(5_u64)), + InputValue::Field(GenericFieldElement::from(7_u64)), ]), ), ]), @@ -87,12 +87,12 @@ fn test_mock_noir_nivc() { )]), ]; - let memory = Switchboard { - circuits: vec![add_external(), square_zeroth(), swap_memory()], - public_input: vec![Scalar::from(1), Scalar::from(2)], - initial_circuit_index: 0, + let memory = Switchboard::new( + vec![add_external(), square_zeroth(), swap_memory()], switchboard_inputs, - }; + vec![Scalar::from(1), Scalar::from(2)], + 0, + ); let snark = run(&memory).unwrap(); let zi = snark.zi_primary(); From 55cffa0bee098bed61fbecf2c08d02dced024151 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 07:19:42 -0700 Subject: [PATCH 22/51] comments --- frontend/src/program/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index f47e1d5..b865511 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -18,7 +18,8 @@ pub mod data; /// Compressed proof type pub type CompressedProof = FoldingProof, Scalar>; -// TODO: Use a mapping of program counter to circuit index +// NOTE: These are `pub(crate)` to avoid exposing the `index` field to the +// outside world. #[derive(Debug, Clone)] pub struct Switchboard { pub(crate) circuits: Vec, @@ -34,7 +35,8 @@ impl Switchboard { public_input: Vec, initial_circuit_index: usize, ) -> Self { - // Set the index of each circuit given the order they are passed in + // Set the index of each circuit given the order they are passed in since this is skipped in + // serde circuits.iter_mut().enumerate().for_each(|(i, c)| c.index = i); Self { circuits, public_input, initial_circuit_index, switchboard_inputs } } From 97443bf1413e04c6466bdf69a40a6b4eb1e60e2c Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 07:48:20 -0700 Subject: [PATCH 23/51] fix: constraints --- frontend/src/noir.rs | 56 +++++++++++++++++++++++++++++-------- frontend/src/program/mod.rs | 6 ++-- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 441a9cb..20e36eb 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -17,7 +17,7 @@ use bellpepper_core::{ use client_side_prover::supernova::StepCircuit; use ff::PrimeField; use noirc_abi::{input_parser::InputValue, Abi, AbiType, InputMap}; -use tracing::trace; +use tracing::{debug, error, info, trace}; use super::*; @@ -141,32 +141,64 @@ impl StepCircuit for NoirProgram { // Process gates for (idx, opcode) in self.circuit().opcodes.iter().enumerate() { if let Opcode::AssertZero(gate) = opcode { - let mut left_terms = LinearCombination::zero(); - let mut right_terms = LinearCombination::zero(); - let mut final_terms = LinearCombination::zero(); + // In noir/ACVM, the constraint is (left*right + linear_terms + constant = 0) + // We need to build this as a single LinearCombination that must equal zero + let mut constraint = LinearCombination::zero(); - // Process multiplication terms + // For multiplication terms, we should create intermediate variables for mul_term in &gate.mul_terms { + // Get variables for the factors let left_var = get_var(&mul_term.1, &mut allocated_vars, cs)?; let right_var = get_var(&mul_term.2, &mut allocated_vars, cs)?; - left_terms = left_terms + (convert_to_halo2_field(mul_term.0), left_var); - right_terms = right_terms + (Scalar::one(), right_var); + + // Create a variable for their product (done implicitly by bellpepper) + let product = cs.alloc( + || format!("mul_term_product_g{idx}"), + || { + // Retrieve witness values if available (or default to zero) + let left_val = acvm_witness_map + .as_ref() + .and_then(|map| map.get(&mul_term.1).copied()) + .unwrap_or_default(); + let right_val = acvm_witness_map + .as_ref() + .and_then(|map| map.get(&mul_term.2).copied()) + .unwrap_or_default(); + Ok(convert_to_halo2_field(left_val * right_val)) + }, + )?; + + // Add a constraint that product = left * right + cs.enforce( + || format!("mul_constraint_g{idx}"), + |lc| lc + left_var, + |lc| lc + right_var, + |lc| lc + product, + ); + + // Add this product term to our main constraint + constraint = constraint + (convert_to_halo2_field(mul_term.0), product); } // Process addition terms for add_term in &gate.linear_combinations { let var = get_var(&add_term.1, &mut allocated_vars, cs)?; - final_terms = final_terms + (convert_to_halo2_field(add_term.0), var); + constraint = constraint + (convert_to_halo2_field(add_term.0), var); } // Handle constant term if !gate.q_c.is_zero() { - final_terms = final_terms - - (convert_to_halo2_field(gate.q_c), Variable::new_unchecked(Index::Input(0))); + constraint = constraint + + (convert_to_halo2_field(gate.q_c), Variable::new_unchecked(Index::Input(0))); } - // Enforce constraint - cs.enforce(|| format!("g{idx}"), |_| left_terms, |_| right_terms, |_| final_terms); + // Enforce constraint: 1 * 0 = constraint (i.e., constraint must be zero) + cs.enforce( + || format!("gate_constraint_g{idx}"), + |lc| lc + Variable::new_unchecked(Index::Input(0)), // 1 + |lc| lc, // 0 + |_| constraint, + ); } else { panic!("non-AssertZero gate {idx} of type {opcode:?}"); } diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index b865511..fba6aa0 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -149,9 +149,9 @@ pub fn run(switchboard: &Switchboard) -> Result, ProofError> info!("Done proving single step..."); // TODO: For some reason this is failing - // info!("Verifying single step..."); - // snark.verify(&public_params, snark.z0_primary(), z0_secondary)?; - // info!("Single step verification done"); + info!("Verifying single step..."); + snark.verify(&public_params, snark.z0_primary(), z0_secondary)?; + info!("Single step verification done"); } trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); From d075dc461cbe79270a6a483b46e64e28984e7a4c Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 08:11:35 -0700 Subject: [PATCH 24/51] tests: fix and add --- frontend/src/noir.rs | 35 ++++++++++++++++++++++++++--------- frontend/src/program/mod.rs | 8 ++++---- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 20e36eb..5f87fd5 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -267,8 +267,17 @@ fn convert_to_acir_field(f: Scalar) -> GenericFieldElement { #[cfg(test)] mod tests { + use client_side_prover::bellpepper::shape_cs::ShapeCS; + use super::*; + fn add_external() -> NoirProgram { + let json_path = "../target/add_external.json"; + let json_data = std::fs::read(json_path).expect("Failed to read add_external.json"); + + serde_json::from_slice(&json_data).expect("Failed to deserialize add_external.json") + } + #[test] fn test_conversions() { let f = Scalar::from(5); @@ -282,30 +291,38 @@ mod tests { #[test] fn test_deserialize_abi() { - let json_path = "../examples/add_external/target/add_external.json"; - let json_data = std::fs::read(json_path).expect("Failed to read add_external.json"); - - let program: NoirProgram = - serde_json::from_slice(&json_data).expect("Failed to deserialize add_external.json"); + let program = add_external(); // Verify basic structure assert_eq!(program.version, "1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355"); - assert_eq!(program.hash, 2789485860577127199); + assert_eq!(program.hash, 4842196402509912449); // Verify parameters assert_eq!(program.abi.parameters.len(), 3); - assert_eq!(program.abi.parameters[0].name, "external"); - assert_eq!(program.abi.parameters[1].name, "registers"); + assert_eq!(program.abi.parameters[0].name, "registers"); + assert_eq!(program.abi.parameters[1].name, "external"); assert_eq!(program.abi.parameters[2].name, "next_pc"); // Verify return type if let AbiType::Struct { fields, path } = &program.abi.return_type.as_ref().unwrap().abi_type { assert_eq!(fields.len(), 2); - assert_eq!(path, "FoldingIO"); + assert_eq!(path, "nivc::FoldingOutput"); assert_eq!(fields[0].0, "registers"); assert_eq!(fields[1].0, "next_pc"); } else { panic!("Expected tuple return type, got {:?}", program.abi.return_type); } } + + #[test] + fn test_constraint_system() { + let program = add_external(); + + let mut cs = ShapeCS::::new(); + let pc = Some(AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(0))).unwrap()); + let z = vec![AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap()]; + + let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); + assert_eq!(cs.num_constraints(), 3); + } } diff --git a/frontend/src/program/mod.rs b/frontend/src/program/mod.rs index fba6aa0..1405aac 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program/mod.rs @@ -148,10 +148,10 @@ pub fn run(switchboard: &Switchboard) -> Result, ProofError> snark.prove_step(&public_params, &circuit_primary, &circuit_secondary)?; info!("Done proving single step..."); - // TODO: For some reason this is failing - info!("Verifying single step..."); - snark.verify(&public_params, snark.z0_primary(), z0_secondary)?; - info!("Single step verification done"); + // TODO: Feature gate this or just remove it + // info!("Verifying single step..."); + // snark.verify(&public_params, snark.z0_primary(), z0_secondary)?; + // info!("Single step verification done"); } trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); From 6e62a017b3c9510159155df7eb708cfecf2639ae Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 09:24:42 -0700 Subject: [PATCH 25/51] feat: basic `Setup` --- Cargo.lock | 1 + Cargo.toml | 5 + frontend/Cargo.toml | 3 + frontend/src/lib.rs | 23 + frontend/src/{program/mod.rs => program.rs} | 34 +- frontend/src/program/data.rs | 575 -------------------- frontend/src/setup.rs | 112 ++-- frontend/tests/ivc/mod.rs | 68 ++- frontend/tests/lib.rs | 21 +- frontend/tests/setup/mod.rs | 3 + prover/src/supernova/mod.rs | 6 + 11 files changed, 131 insertions(+), 720 deletions(-) rename frontend/src/{program/mod.rs => program.rs} (88%) delete mode 100644 frontend/src/program/data.rs create mode 100644 frontend/tests/setup/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 3f5fbaf..48b383c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -636,6 +636,7 @@ dependencies = [ "bincode", "byteorder", "client-side-prover", + "client-side-prover-frontend", "ff 0.13.0", "halo2curves", "hex", diff --git a/Cargo.toml b/Cargo.toml index bbf021c..f6e35c0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,3 +64,8 @@ serde_json ="1.0.1" lto =true codegen-units=1 panic ="abort" + +[profile.dev] +opt-level =1 +split-debuginfo="unpacked" +incremental =true \ No newline at end of file diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index b444777..7bc497b 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -36,6 +36,9 @@ wasm-bindgen-futures="0.4.37" tracing-test={workspace=true} tempdir ="0.3.7" tokio ={ version="1.43", features=["full"] } +client-side-prover-frontend = { path = ".", features = ["demo"] } [features] verify-steps=[] +demo = [] + diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index 4be4416..183c4da 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -79,3 +79,26 @@ type EE2 = client_side_prover::provider::ipa_pc::EvaluationEngine; type S1 = BatchedRelaxedR1CSSNARK; /// Represents the SNARK for the second elliptic curve. type S2 = BatchedRelaxedR1CSSNARK; + +#[cfg(any(test, feature = "demo"))] +pub mod demo { + use crate::noir::NoirProgram; + + pub fn add_external() -> NoirProgram { + let bytecode = + std::fs::read("../target/add_external.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } + + pub fn square_zeroth() -> NoirProgram { + let bytecode = + std::fs::read("../target/square_zeroth.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } + + pub fn swap_memory() -> NoirProgram { + let bytecode = + std::fs::read("../target/swap_memory.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } +} diff --git a/frontend/src/program/mod.rs b/frontend/src/program.rs similarity index 88% rename from frontend/src/program/mod.rs rename to frontend/src/program.rs index 1405aac..7d2efc2 100644 --- a/frontend/src/program/mod.rs +++ b/frontend/src/program.rs @@ -8,9 +8,7 @@ use proof::FoldingProof; use tracing::trace; use super::*; -use crate::noir::NoirProgram; - -pub mod data; +use crate::{noir::NoirProgram, setup::Setup}; // TODO: Consider moving contents of mod.rs files to a separate files. mod.rs // files should only be used to adjust the visibility of exported items. @@ -57,35 +55,9 @@ impl NonUniformCircuit for Switchboard { fn initial_circuit_index(&self) -> usize { self.initial_circuit_index } } -// TODO: This is like a one-time use setup that overlaps some with -// `ProgramData::into_online()`. Worth checking out how to make this simpler, -// clearer, more efficient. -// Setup function -// pub fn setup(setup_data: &UninitializedSetup) -> PublicParams { -// // Optionally time the setup stage for the program -// let time = std::time::Instant::now(); - -// // TODO: I don't think we want to have to call `initialize_circuit_list` more -// // than once on setup ever and it seems like it may get used more -// // frequently. -// let initilized_setup = initialize_setup_data(setup_data).unwrap(); -// let circuits = initialize_circuit_list(&initilized_setup); // TODO, change the type signature -// of trait to use arbitrary error types. let memory = Switchboard { circuits }; -// let public_params = PublicParams::setup(&memory, &*default_ck_hint(), &*default_ck_hint()); - -// trace!("`PublicParams::setup()` elapsed: {:?}", time.elapsed()); - -// public_params -// } - -pub fn run(switchboard: &Switchboard) -> Result, ProofError> { +pub fn run(setup: Setup, switchboard: &Switchboard) -> Result, ProofError> { info!("Starting SuperNova program..."); - - info!("Setting up PublicParams..."); - // Create a witness-free clone for setup - let mut memory_clone = switchboard.clone(); - memory_clone.circuits.iter_mut().for_each(|circ| circ.witness = None); - let public_params = PublicParams::setup(&memory_clone, &*default_ck_hint(), &*default_ck_hint()); + let public_params = setup.into_public_params(&switchboard.circuits); let z0_primary = &switchboard.public_input; let z0_secondary = &[grumpkin::Fr::ZERO]; diff --git a/frontend/src/program/data.rs b/frontend/src/program/data.rs deleted file mode 100644 index 5a96c35..0000000 --- a/frontend/src/program/data.rs +++ /dev/null @@ -1,575 +0,0 @@ -// //! # Data Module -// //! -// //! The `data` module contains data structures and types used in the proof system. -// //! -// //! ## Structs -// //! -// //! - `FoldInput`: Represents the fold input for any circuit containing signal names and values. -// //! - `R1CSType`: Represents the R1CS file type, which can be either a file path or raw bytes. - -// use std::{ -// fs::{self, File}, -// io::Write, -// sync::Arc, -// }; - -// use client_side_prover::{fast_serde::FastSerde, supernova::get_circuit_shapes}; -// use serde_json::json; - -// use super::*; -// use crate::setup::ProvingParams; - -// /// Fold input for any circuit containing signals name and vector of values. Inputs are -// distributed /// evenly across folds after the ROM is finalised by the prover. -// #[derive(Clone, Debug, Serialize, Deserialize)] -// pub struct FoldInput { -// /// circuit name and consolidated values -// #[serde(flatten)] -// pub value: HashMap>, -// } - -// impl FoldInput { -// /// splits the inputs evenly across folds as per instruction frequency -// pub fn split(&self, freq: usize) -> Vec> { -// let mut res = vec![HashMap::new(); freq]; - -// for (key, value) in self.value.clone().into_iter() { -// debug!("key: {:?}, freq: {}, value_len: {}", key, freq, value.len()); -// assert_eq!(value.len() % freq, 0); -// let chunk_size = value.len() / freq; -// let chunks: Vec> = value.chunks(chunk_size).map(|chunk| -// chunk.to_vec()).collect(); for i in 0..freq { -// res[i].insert(key.clone(), json!(chunks[i].clone())); -// } -// } - -// res -// } -// } - -// // Note, the below are typestates that prevent misuse of our current API. -// /// Setup status trait -// pub trait SetupStatus { -// /// Public parameters type -// type PublicParams; -// /// Setup data type -// type SetupData; -// } - -// /// Online setup status -// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -// pub struct Online; -// impl SetupStatus for Online { -// type PublicParams = Arc>; -// type SetupData = Arc; -// } - -// /// Offline setup status -// #[derive(Serialize, Deserialize, Debug, PartialEq)] -// pub struct Offline; -// impl SetupStatus for Offline { -// type PublicParams = Vec; -// type SetupData = UninitializedSetup; -// } - -// /// Witness status trait -// pub trait WitnessStatus { -// /// Private input for a circuit containing signals name and vector of values -// /// - For [`Expanded`] status, it is a vector of private inputs for each fold of a circuit -// /// - For [`NotExpanded`] status, it is a tuple of private input and fold input of a circuit -// type PrivateInputs; -// } - -// /// Expanded witness status -// pub struct Expanded; -// impl WitnessStatus for Expanded { -// /// expanded input for each fold of each circuit in the ROM -// type PrivateInputs = Vec>; -// } - -// /// Not expanded witness status -// pub struct NotExpanded; -// impl WitnessStatus for NotExpanded { -// /// Private input and fold input for each circuit in the ROM -// type PrivateInputs = (Vec>, HashMap); -// } - -// /// Auxiliary circuit data required to execute the ROM -// #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -// pub struct CircuitData { -// /// circuit instruction opcode in [`S::SetupData`] -// pub opcode: u64, -// } - -// /// ROM data type -// pub type RomData = HashMap; -// /// ROM type -// pub type Rom = Vec; -// /// NIVC input type -// pub type NivcInput = Vec>; - -// /// Represents configuration and circuit data required for initializing the proving system. -// #[derive(Serialize, Deserialize, Clone, Debug)] -// pub struct SetupParams { -// /// Public parameters of the proving system. Maps to the client-side prover parameters. -// pub public_params: S::PublicParams, -// // TODO: Refactor this onto the PublicParams object and share the ProvingParams abstraction -// /// Setup-specific verification key digest for the primary elliptic curve. -// pub vk_digest_primary: ::Scalar, -// /// Setup-specific verification key digest for the secondary elliptic curve. -// pub vk_digest_secondary: as Engine>::Scalar, -// /// Describes R1CS configurations used in proving setup. -// pub setup_data: S::SetupData, -// /// A mapping between ROM opcodes and circuit configuration. -// pub rom_data: RomData, -// } - -// impl PartialEq for SetupParams -// where S::SetupData: PartialEq -// { -// fn eq(&self, other: &Self) -> bool { -// // TODO: Supernova types are not supporting PartialEq -// // self.public_params == other.public_params && -// self.vk_digest_primary == other.vk_digest_primary -// && self.vk_digest_secondary == other.vk_digest_secondary -// && self.setup_data == other.setup_data -// && self.rom_data == other.rom_data -// } -// } - -// /// Contains inputs and state specific to a single proof generation instance. -// #[derive(Debug)] -// pub struct InstanceParams { -// /// Initial public input for NIVC -// pub nivc_input: NivcInput, -// /// Private inputs for each fold -// pub private_inputs: W::PrivateInputs, -// } - -// impl InstanceParams { -// /// Converts proving instance parameters into an expanded form by distributing fold inputs -// across /// their corresponding circuit instances in the ROM. -// /// -// /// This method performs the following steps: -// /// 1. Creates a map of circuit names to their positions in the ROM -// /// 2. Collects private inputs from each ROM opcode configuration -// /// 3. Distributes fold inputs across matching circuit instances based on their labels -// /// 4. Combines the distributed inputs with existing private inputs for each ROM position -// /// -// /// # Arguments -// /// -// /// * `self` - The program data instance to expand -// /// -// /// # Returns -// /// -// /// Returns a `Result` containing either: -// /// * `Ok(InstanceParams)` - The expanded instance with distributed inputs -// /// * `Err(ProofError)` - If the expansion process fails -// /// -// /// # Errors -// /// -// /// This function will return an error if: -// /// * A circuit label in the inputs is not found in the ROM -// /// * Input distribution fails -// /// -// /// # Details -// /// -// /// The expansion process handles fold inputs, which are inputs that need to be distributed -// across /// multiple instances of the same circuit in the ROM. For each circuit label in the -// inputs: /// 1. Finds all positions of that circuit in the ROM -// /// 2. Splits the fold inputs into equal parts -// /// 3. Assigns each part to the corresponding circuit instance -// /// -// /// The resulting expanded form contains individual private inputs for each ROM position, with -// /// fold inputs properly distributed according to circuit usage. -// pub fn into_expanded( -// self, -// proof_params: &ProofParams, -// ) -> Result, ProofError> { -// assert_eq!(self.private_inputs.0.len(), proof_params.rom.len()); - -// let mut instruction_usage: HashMap> = HashMap::new(); -// for (index, circuit) in proof_params.rom.iter().enumerate() { -// if let Some(usage) = instruction_usage.get_mut(circuit.as_str()) { -// usage.push(index); -// } else { -// instruction_usage.insert(circuit.clone(), vec![index]); -// } -// } -// let mut private_inputs: Vec> = self.private_inputs.0; - -// // add fold input sliced to chunks and add to private input -// for (circuit_label, fold_inputs) in self.private_inputs.1.iter() { -// let inputs = match instruction_usage.get(circuit_label) { -// Some(inputs) => inputs, -// None => -// Err(ProofError::Other(format!("Circuit label '{}' not found in rom", circuit_label)))?, -// }; -// let split_inputs = fold_inputs.split(inputs.len()); -// for (idx, input) in inputs.iter().zip(split_inputs) { -// private_inputs[*idx].extend(input); -// } -// } - -// assert_eq!(private_inputs.len(), proof_params.rom.len()); - -// let Self { nivc_input: initial_nivc_input, .. } = self; -// Ok(InstanceParams { nivc_input: initial_nivc_input, private_inputs }) -// } -// } - -// impl SetupParams { -// /// Converts an offline setup parameters instance back into an online version by decompressing -// and /// deserializing the public parameters and reconstructing the circuit shapes. -// /// -// /// This method performs the following steps: -// /// 1. Deserializes raw bytes into an AuxParams object -// /// 2. Initializes the circuit list from setup data -// /// 3. Generates circuit shapes from the initialized memory -// /// 4. Reconstructs full public parameters from circuit shapes and auxiliary parameters -// /// 5. Constructs a new online program data instance -// /// -// /// # Arguments -// /// -// /// * `self` - The offline program data instance to convert -// /// -// /// # Returns -// /// -// /// Returns a `Result` containing either: -// /// * `Ok(SetupParams)` - The converted online program data -// /// * `Err(ProofError)` - If any step in the conversion process fails -// /// -// /// # Errors -// /// -// /// This function will return an error if: -// /// * Circuit initialization fails -// /// * Circuit shape generation fails -// /// -// /// # Features -// /// -// /// When compiled with the "timing" feature, this function will output timing information for: -// /// * Reading and deserializing auxiliary parameters -// /// * Generating circuit shapes -// pub fn into_online(self) -> Result, ProofError> { -// debug!("init proving params, proving_param_bytes={:?}", self.public_params.len()); -// let proving_params = ProvingParams::from_bytes(&self.public_params).unwrap(); - -// info!("init setup"); -// let initialized_setup = initialize_setup_data(&self.setup_data).unwrap(); - -// let circuits = initialize_circuit_list(&initialized_setup); -// let memory = Switchboard { circuits }; - -// // TODO: This converts the r1cs memory into sparse matrices, which doubles -// // the memory usage. Can we re-used these sparse matrices in our constraint -// // system? -// info!("init circuit shapes"); -// let circuit_shapes = get_circuit_shapes(&memory); - -// info!("init public params from parts"); -// let public_params = -// PublicParams::::from_parts_unchecked(circuit_shapes, proving_params.aux_params); -// let Self { rom_data, .. } = self; - -// Ok(SetupParams { -// public_params: Arc::new(public_params), -// vk_digest_primary: proving_params.vk_digest_primary, -// vk_digest_secondary: proving_params.vk_digest_secondary, -// setup_data: Arc::new(initialized_setup), -// rom_data, -// }) -// } -// } - -// impl SetupParams { -// /// Converts an online setup parameters instance into an offline version by serializing -// /// the public parameters to disk. -// /// -// /// This method performs the following steps: -// /// 1. Extracts auxiliary parameters from the public parameters -// /// 2. Serializes the auxiliary parameters to bytes -// /// 3. Writes the compressed data to the specified path -// /// 4. Constructs a new offline program data instance -// /// -// /// # Arguments -// /// -// /// * `self` - The online program data instance to convert -// /// * `path` - The file path where compressed public parameters will be saved -// /// -// /// # Returns -// /// -// /// Returns a `Result` containing either: -// /// * `Ok(SetupParams)` - The converted offline program data -// /// * `Err(ProofError)` - If any step in the conversion process fails -// /// -// /// # Errors -// /// -// /// This function will return an error if: -// /// * Bytes serialization fails -// /// * File system operations fail (creating directories or writing file) -// pub fn into_offline(self, path: PathBuf) -> Result, ProofError> { -// let exclusive = Arc::try_unwrap(self.public_params).unwrap(); -// let (_, aux_params) = exclusive.into_parts(); -// let vk_digest_primary = self.vk_digest_primary; -// let vk_digest_secondary = self.vk_digest_secondary; -// let proving_param_bytes = -// ProvingParams { aux_params, vk_digest_primary, vk_digest_secondary }.to_bytes(); - -// if let Some(parent) = path.parent() { -// fs::create_dir_all(parent)?; -// } - -// let bytes_path = path.with_extension("bytes"); -// debug!("bytes_path={:?}", bytes_path); -// File::create(&bytes_path)?.write_all(&proving_param_bytes)?; - -// let Self { rom_data, .. } = self; -// Ok(SetupParams { -// public_params: proving_param_bytes, -// vk_digest_primary, -// vk_digest_secondary, -// // TODO: This approach is odd, refactor with #375 -// setup_data: Default::default(), -// rom_data, -// }) -// } - -// /// Extends and prepares the public inputs for the zero-knowledge proof circuits. -// /// -// /// This function performs two main operations: -// /// 1. Expands the ROM (Read-Only Memory) to the maximum length specified in `setup_data` -// /// 2. Constructs the primary public input vector `z0_primary` by combining: -// /// - The initial NIVC (Non-Interactive Verifiable Computation) input -// /// - An initial ROM index of zero -// /// - The expanded ROM opcodes converted to field elements -// /// -// /// # Arguments -// /// -// /// * `rom` - A reference to the ROM (sequence of circuit operations) containing circuit -// /// configurations. -// /// * `initial_nivc_input` - The initial public input required for NIVC. -// /// -// /// # Returns -// /// -// /// Returns a tuple containing: -// /// - `Vec>`: The extended primary public input vector (z0_primary) -// /// - `Vec`: The expanded ROM containing opcodes -// /// -// /// # Errors -// /// -// /// Returns a `ProofError` if: -// /// - Any opcode configuration specified in the ROM is not found in `rom_data` -// pub fn extend_public_inputs( -// &self, -// rom: &Rom, -// initial_nivc_input: &NivcInput, -// ) -> Result<(Vec>, Vec), ProofError> { -// // TODO: This is currently enabled for _either_ Expanded or NotExpanded -// let mut rom = rom -// .iter() -// .map(|opcode_config| { -// self -// .rom_data -// .get(opcode_config) -// .ok_or_else(|| { -// ProofError::Other(format!("Opcode config '{}' not found in rom_data", opcode_config)) -// }) -// .map(|config| config.opcode) -// }) -// .collect::, ProofError>>()?; - -// rom.resize(self.setup_data.max_rom_length, u64::MAX); - -// let mut z0_primary: Vec> = initial_nivc_input.clone(); -// z0_primary.push(F::::ZERO); // rom_index = 0 -// z0_primary.extend(rom.iter().map(|opcode| ::Scalar::from(*opcode))); -// debug!("z0_primary={:?}", z0_primary); -// Ok((z0_primary, rom.clone())) -// } -// } - -// impl SetupParams { -// /// Generates NIVC proof from [`InstanceParams`] -// /// - run NIVC recursive proving -// /// - run CompressedSNARK to compress proof -// /// - serialize proof -// pub async fn generate_proof( -// &self, -// proof_params: &ProofParams, -// instance_params: &InstanceParams, -// ) -> Result, String>, ProofError> { -// debug!("starting recursive proving"); -// let program_output = program::run(self, proof_params, instance_params).await?; - -// debug!("starting proof compression"); -// let compressed_snark_proof = program::compress_proof_no_setup( -// &program_output, -// &self.public_params, -// self.vk_digest_primary, -// self.vk_digest_secondary, -// )?; -// compressed_snark_proof.serialize() -// } -// } - -// #[cfg(test)] -// mod tests { -// use super::*; - -// const JSON: &str = r#" -// { -// "input": [ -// [ -// {},{},{} -// ], -// { -// "CIRCUIT_1": { -// "external": [5,7], -// "plaintext": [1,2,3,4] -// }, -// "CIRCUIT_2": { -// "ciphertext": [1, 2, 3, 4], -// "external": [2, 4] -// }, -// "CIRCUIT_3": { -// "key": [2, 3], -// "value": [4, 5] -// } -// } -// ] -// }"#; - -// #[derive(Debug, Deserialize)] -// struct MockInputs { -// input: (Vec>, HashMap), -// } - -// // Helper function to create test program data -// fn create_test_program_data() -> (SetupParams, ProofParams, InstanceParams) { -// // Load add.r1cs from examples -// let add_r1cs = crate::tests::inputs::ADD_EXTERNAL_R1CS.to_vec(); -// let r1cs = R1CSType::Raw(add_r1cs.to_vec()); -// // Create ROM data with proper circuit data -// let mut rom_data = HashMap::new(); -// rom_data.insert("add".to_string(), CircuitData { opcode: 1u64 }); -// rom_data.insert("mul".to_string(), CircuitData { opcode: 2u64 }); - -// // Rest of the function remains same -// let rom: Vec = vec!["add".to_string(), "mul".to_string()]; - -// let setup_data = UninitializedSetup { -// max_rom_length: 4, -// r1cs_types: vec![r1cs], -// witness_generator_types: vec![WitnessGeneratorType::Raw(vec![])], -// }; -// let initialized_setup = initialize_setup_data(&setup_data).unwrap(); - -// let public_params = program::setup(&setup_data); -// let (prover_key, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); - -// let setup_params = SetupParams { -// public_params: Arc::new(public_params), -// setup_data: Arc::new(initialized_setup), -// vk_digest_primary: prover_key.pk_primary.vk_digest, -// vk_digest_secondary: prover_key.pk_secondary.vk_digest, -// rom_data, -// }; -// let proof_params = ProofParams { rom }; -// let instance_params = InstanceParams { -// nivc_input: vec![F::::ONE], -// private_inputs: vec![HashMap::new(), HashMap::new()], -// }; - -// (setup_params, proof_params, instance_params) -// } - -// #[test] -// fn test_extend_public_inputs() { -// // Setup test data -// let (setup_params, proof_params, instance_params) = create_test_program_data(); - -// // Test successful case -// let result = setup_params.extend_public_inputs(&proof_params.rom, -// &instance_params.nivc_input); assert!(result.is_ok()); - -// let (z0_primary, expanded_rom) = result.unwrap(); - -// // Verify z0_primary structure -// assert_eq!( -// z0_primary.len(), -// instance_params.nivc_input.len() + 1 + setup_params.setup_data.max_rom_length -// ); -// assert_eq!(z0_primary[instance_params.nivc_input.len()], F::::ZERO); // Check ROM index -// is 0 - -// // Verify ROM expansion -// assert_eq!(expanded_rom.len(), setup_params.setup_data.max_rom_length); -// assert_eq!(expanded_rom[0], 1u64); // First opcode -// assert_eq!(expanded_rom[1], 2u64); // Second opcode -// assert_eq!(expanded_rom[2], u64::MAX); // Padding -// } - -// #[test] -// fn test_extend_public_inputs_missing_opcode() { -// let (setup_params, mut proof_params, instance_params) = create_test_program_data(); - -// // Add an opcode config that doesn't exist in rom_data -// proof_params.rom.push("nonexistent".to_string()); - -// let result = setup_params.extend_public_inputs(&proof_params.rom, -// &instance_params.nivc_input); assert!(result.is_err()); -// assert!(matches!( -// result.unwrap_err(), -// ProofError::Other(e) if e.contains("not found in rom_data") -// )); -// } - -// #[test] -// #[tracing_test::traced_test] -// fn test_deserialize_inputs() { -// let mock_inputs: MockInputs = serde_json::from_str(JSON).unwrap(); -// dbg!(&mock_inputs.input); -// assert!(mock_inputs.input.1.contains_key("CIRCUIT_1")); -// assert!(mock_inputs.input.1.contains_key("CIRCUIT_2")); -// assert!(mock_inputs.input.1.contains_key("CIRCUIT_3")); -// } - -// #[test] -// #[tracing_test::traced_test] -// fn test_expand_private_inputs() { -// let mock_inputs: MockInputs = serde_json::from_str(JSON).unwrap(); -// let proof_params = ProofParams { -// rom: vec![String::from("CIRCUIT_1"), String::from("CIRCUIT_2"), String::from("CIRCUIT_3")], -// }; -// let instance_params = -// InstanceParams:: { nivc_input: vec![], private_inputs: mock_inputs.input } -// .into_expanded(&proof_params) -// .unwrap(); -// dbg!(&instance_params.private_inputs); -// assert!(!instance_params.private_inputs[0].is_empty()); -// assert!(!instance_params.private_inputs[1].is_empty()); -// assert!(!instance_params.private_inputs[2].is_empty()); -// } - -// #[test] -// fn test_online_to_offline_serialization_round_trip() { -// let temp_dir = tempdir::TempDir::new("setup").unwrap(); -// let offline_path = temp_dir.path().join("offline"); - -// let (setup_params_online, ..) = create_test_program_data(); -// let setup_params_offline = setup_params_online.into_offline(offline_path).unwrap(); - -// // Matches itself -// assert_eq!(setup_params_offline, setup_params_offline); - -// // Verify round-trip serialization for `Offline` -// let serialized_offline = serde_json::to_string(&setup_params_offline).unwrap(); -// let deserialized_offline: SetupParams = -// serde_json::from_str(&serialized_offline).unwrap(); -// assert_eq!(setup_params_offline, deserialized_offline); - -// // Can be "onlined" -// let result = deserialized_offline.into_online(); -// assert!(result.is_ok()); -// } -// } diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 9dee833..967b356 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -1,39 +1,16 @@ -//! # Setup Module -//! -//! The `setup` module contains utilities and structures for setting up the proof system. -//! -//! ## Structs -//! -//! - `ProvingParams`: Represents the parameters needed for proving, including auxiliary parameters -//! and verification key digests. -//! -//! ## Functions -//! -//! - `from_bytes`: Initializes `ProvingParams` from an efficiently serializable data format. -//! - `to_bytes`: Converts `ProvingParams` to an efficient serialization. -//! -//! ## Types -//! -//! - `AuxParams`: Represents the auxiliary parameters needed to create `PublicParams`. -//! - `ProverKey`: Represents the prover key needed to create a `CompressedSNARK`. -//! - `UninitializedSetup`: Represents the uninitialized setup data for circuits, including R1CS and -//! witness generator types. -//! - `WitnessGeneratorType`: Represents the type of witness generator, including raw bytes and -//! paths to Wasm binaries. - use std::io::Cursor; use client_side_prover::{ fast_serde::{self, FastSerde, SerdeByteError, SerdeByteTypes}, - supernova::snark::CompressedSNARK, - traits::{Dual, Engine}, + supernova::{get_circuit_shapes, snark::CompressedSNARK, PublicParams}, + traits::{snark::default_ck_hint, Dual, Engine}, }; -use crate::{error::ProofError, program, AuxParams, ProverKey, E1, S1, S2}; +use crate::{error::ProofError, noir::NoirProgram, program, AuxParams, ProverKey, E1, S1, S2}; -/// Proving parameters -#[derive(Debug)] -pub struct ProvingParams { +// TODO: This could probably just store the programs with it +#[derive(Clone, Debug)] +pub struct Setup { /// Auxiliary parameters pub aux_params: AuxParams, /// Primary verification key digest @@ -42,7 +19,29 @@ pub struct ProvingParams { pub vk_digest_secondary: as Engine>::Scalar, } -impl FastSerde for ProvingParams { +impl Setup { + pub fn new(programs: &[NoirProgram]) -> Self { + let switchboard = program::Switchboard::new(programs.to_vec(), vec![], vec![], 0); + let public_params = PublicParams::setup(&switchboard, &*default_ck_hint(), &*default_ck_hint()); + let (pk, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); + let (_, aux_params) = public_params.into_parts(); + + Setup { + aux_params, + vk_digest_primary: pk.pk_primary.vk_digest, + vk_digest_secondary: pk.pk_secondary.vk_digest, + } + } + + pub fn into_public_params(self, programs: &[NoirProgram]) -> PublicParams { + let switchboard = program::Switchboard::new(programs.to_vec(), vec![], vec![], 0); + // TODO: This can print out the constraints and variables for each circuit + PublicParams::from_parts(get_circuit_shapes(&switchboard), self.aux_params) + } +} + +// TODO: We may be able to just use rkyv +impl FastSerde for Setup { /// Initialize ProvingParams from an efficiently serializable data format. fn from_bytes(bytes: &[u8]) -> Result { let mut cursor = Cursor::new(bytes); @@ -63,7 +62,7 @@ impl FastSerde for ProvingParams { .into_option() .ok_or(SerdeByteError::G1DecodeError)?; - Ok(ProvingParams { aux_params, vk_digest_primary, vk_digest_secondary }) + Ok(Setup { aux_params, vk_digest_primary, vk_digest_secondary }) } /// Convert ProvingParams to an efficient serialization. @@ -81,47 +80,14 @@ impl FastSerde for ProvingParams { } } -impl ProvingParams { - /// Method used externally to initialize all the backend data needed to create a verifiable proof - /// with [`client_side_prover`] and `proofs` crate. Intended to be used in combination with setup, - /// which creates these values offline to be loaded at or before proof creation or verification. - /// - /// # Arguments - /// - `aux_params`: the data that defines what types of supernova programs can be run, i.e., - /// specified by a list of circuit R1CS and max ROM length. - /// - `prover_key`: The key used for generating proofs, allows us to pin a specific verifier. - pub fn new(aux_params: AuxParams, prover_key: ProverKey) -> Result { - Ok(ProvingParams { - aux_params, - vk_digest_primary: prover_key.pk_primary.vk_digest, - vk_digest_secondary: prover_key.pk_secondary.vk_digest, - }) +#[cfg(test)] +mod tests { + use super::*; + use crate::demo::square_zeroth; + + #[test] + fn test_setup() { + let setup = Setup::new(&[square_zeroth()]); + todo!("Actually do something here. "); } } - -// /// Create a setup for a given list of R1CS files including the necessary -// /// setup for compressed proving. -// /// -// /// # Arguments -// /// - `r1cs_files`: A list of r1cs files that are accessible by the program using the setup -// /// -// /// # Returns -// /// * `Result, ProofError>` - Bytes ready to be written to disk -// pub fn setup(r1cs_files: &[R1CSType], rom_length: usize) -> Vec { -// let setup_data = UninitializedSetup { -// r1cs_types: r1cs_files.to_vec(), -// witness_generator_types: vec![WitnessGeneratorType::Browser; r1cs_files.len()], -// max_rom_length: rom_length, -// }; - -// let public_params = program::setup(&setup_data); -// let (pk, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); -// let (_, aux_params) = public_params.into_parts(); - -// ProvingParams { -// aux_params, -// vk_digest_primary: pk.pk_primary.vk_digest, -// vk_digest_secondary: pk.pk_secondary.vk_digest, -// } -// .to_bytes() -// } diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 4bbd7d9..5007a6d 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -1,5 +1,12 @@ use acvm::acir::acir_field::GenericFieldElement; -use client_side_prover_frontend::program::{run, Switchboard}; +use client_side_prover::provider::GrumpkinEngine; +use client_side_prover_frontend::{ + program::{run, Switchboard}, + setup::Setup, + Scalar, +}; +use ff::Field; +use halo2curves::grumpkin; use noirc_abi::{input_parser::InputValue, InputMap}; use super::*; @@ -7,20 +14,18 @@ use super::*; #[test] #[traced_test] fn test_ivc() { + let programs = vec![square_zeroth()]; + let setup = Setup::new(&programs); let switchboard_inputs = vec![ InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), ]; - let memory = Switchboard::new( - vec![square_zeroth()], - switchboard_inputs, - vec![Scalar::from(2), Scalar::from(1)], - 0, - ); + let memory = + Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); - let snark = run(&memory).unwrap(); + let snark = run(setup, &memory).unwrap(); dbg!(&snark.zi_primary()); assert_eq!(snark.zi_primary()[0], Scalar::from(256)); assert_eq!(snark.zi_primary()[1], Scalar::from(1)); @@ -29,6 +34,8 @@ fn test_ivc() { #[test] #[traced_test] fn test_ivc_private_inputs() { + let programs = vec![add_external()]; + let setup = Setup::new(&programs); let switchboard_inputs = vec![ InputMap::from([ ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), @@ -52,14 +59,10 @@ fn test_ivc_private_inputs() { ]), ]; - let memory = Switchboard::new( - vec![add_external()], - switchboard_inputs, - vec![Scalar::from(1), Scalar::from(2)], - 0, - ); + let memory = + Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(1), Scalar::from(2)], 0); - let snark = run(&memory).unwrap(); + let snark = run(setup, &memory).unwrap(); let zi = snark.zi_primary(); dbg!(zi); assert_eq!(zi[0], Scalar::from(424)); @@ -69,6 +72,8 @@ fn test_ivc_private_inputs() { #[test] #[traced_test] fn test_mock_noir_nivc() { + let programs = vec![add_external(), square_zeroth(), swap_memory()]; + let setup = Setup::new(&programs); let switchboard_inputs = vec![ InputMap::from([ ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(1_u64))), @@ -87,14 +92,10 @@ fn test_mock_noir_nivc() { )]), ]; - let memory = Switchboard::new( - vec![add_external(), square_zeroth(), swap_memory()], - switchboard_inputs, - vec![Scalar::from(1), Scalar::from(2)], - 0, - ); + let memory = + Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(1), Scalar::from(2)], 0); - let snark = run(&memory).unwrap(); + let snark = run(setup, &memory).unwrap(); let zi = snark.zi_primary(); dbg!(zi); // First fold: @@ -109,3 +110,26 @@ fn test_mock_noir_nivc() { assert_eq!(zi[0], Scalar::from(9)); assert_eq!(zi[1], Scalar::from(36)); } + +#[test] +#[traced_test] +fn test_ivc_verify() { + let programs = vec![square_zeroth()]; + let setup = Setup::new(&programs); + let pp = setup.clone().into_public_params(&programs); + let switchboard_inputs = vec![InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(0_u64)), + )])]; + + let memory = + Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + + let snark = run(setup, &memory).unwrap(); + let (z1_primary, z1_secondary) = + snark.verify(&pp, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); + assert_eq!(&z1_primary, snark.zi_primary()); + assert_eq!(&z1_secondary, snark.zi_secondary()); + assert_eq!(z1_primary, vec![Scalar::from(4), Scalar::from(1)]); + assert_eq!(z1_secondary, vec![grumpkin::Fr::ZERO]); +} diff --git a/frontend/tests/lib.rs b/frontend/tests/lib.rs index d5e0404..c4675f8 100644 --- a/frontend/tests/lib.rs +++ b/frontend/tests/lib.rs @@ -1,22 +1,5 @@ -use client_side_prover_frontend::{noir::NoirProgram, Scalar}; +use client_side_prover_frontend::demo::*; use tracing_test::traced_test; mod ivc; - -pub fn add_external() -> NoirProgram { - let bytecode = - std::fs::read("../target/add_external.json").expect("Failed to read Noir program file"); - NoirProgram::new(&bytecode) -} - -pub fn square_zeroth() -> NoirProgram { - let bytecode = - std::fs::read("../target/square_zeroth.json").expect("Failed to read Noir program file"); - NoirProgram::new(&bytecode) -} - -pub fn swap_memory() -> NoirProgram { - let bytecode = - std::fs::read("../target/swap_memory.json").expect("Failed to read Noir program file"); - NoirProgram::new(&bytecode) -} +mod setup; diff --git a/frontend/tests/setup/mod.rs b/frontend/tests/setup/mod.rs new file mode 100644 index 0000000..7cec84d --- /dev/null +++ b/frontend/tests/setup/mod.rs @@ -0,0 +1,3 @@ +use client_side_prover_frontend::setup::Setup; + +use super::*; diff --git a/prover/src/supernova/mod.rs b/prover/src/supernova/mod.rs index ad8a65d..7944679 100644 --- a/prover/src/supernova/mod.rs +++ b/prover/src/supernova/mod.rs @@ -760,6 +760,12 @@ where E1: CurveCycleEquipped /// Outputs of the primary circuits pub fn zi_primary(&self) -> &Vec { &self.zi_primary } + /// Inputs of the secondary circuits + pub fn z0_secondary(&self) -> &Vec< as Engine>::Scalar> { &self.z0_secondary } + + /// Outputs of the secondary circuits + pub fn zi_secondary(&self) -> &Vec< as Engine>::Scalar> { &self.zi_secondary } + /// Current program counter pub fn program_counter(&self) -> E1::Scalar { self.program_counter } From 35447ba77902de3fdac9fc18d5670ce41eef2d0d Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 09:41:14 -0700 Subject: [PATCH 26/51] feat: store setup --- Cargo.lock | 58 +++++++++++++++++++++++++++++++++---------- frontend/Cargo.toml | 1 + frontend/src/setup.rs | 44 ++++++++++++++++++++++++++++++-- 3 files changed, 88 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 48b383c..95a13b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -592,7 +592,7 @@ dependencies = [ "ff 0.13.0", "flate2", "generic-array 1.1.0", - "getrandom", + "getrandom 0.2.15", "group 0.13.0", "grumpkin-msm", "halo2curves", @@ -648,6 +648,7 @@ dependencies = [ "serde-wasm-bindgen", "serde_json", "tempdir", + "tempfile", "thiserror", "tokio", "tracing", @@ -880,12 +881,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1008,10 +1009,22 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets", +] + [[package]] name = "gimli" version = "0.31.1" @@ -1055,7 +1068,7 @@ source = "git+https://github.com/argumentcomputer/grumpkin-msm?branch=dev#414da3 dependencies = [ "blst", "cc", - "getrandom", + "getrandom 0.2.15", "halo2curves", "pasta_curves", "rand 0.8.5", @@ -1354,7 +1367,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -1758,7 +1771,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -1909,15 +1922,15 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2221,12 +2234,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.13.0" +version = "3.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "22e5a0acb1f3f55f65cc4a866c361b2fb2a0ff6366785ae6fbb5f85df07ba230" dependencies = [ "cfg-if", "fastrand", + "getrandom 0.3.1", "once_cell", "rustix", "windows-sys 0.59.0", @@ -2521,6 +2535,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" version = "0.2.93" @@ -2732,6 +2755,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index 7bc497b..df49e2d 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -37,6 +37,7 @@ tracing-test={workspace=true} tempdir ="0.3.7" tokio ={ version="1.43", features=["full"] } client-side-prover-frontend = { path = ".", features = ["demo"] } +tempfile = "3.17" [features] verify-steps=[] diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 967b356..22598f1 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -5,6 +5,7 @@ use client_side_prover::{ supernova::{get_circuit_shapes, snark::CompressedSNARK, PublicParams}, traits::{snark::default_ck_hint, Dual, Engine}, }; +use tracing::debug; use crate::{error::ProofError, noir::NoirProgram, program, AuxParams, ProverKey, E1, S1, S2}; @@ -19,6 +20,14 @@ pub struct Setup { pub vk_digest_secondary: as Engine>::Scalar, } +#[cfg(test)] +impl PartialEq for Setup { + fn eq(&self, other: &Self) -> bool { + self.vk_digest_primary == other.vk_digest_primary + && self.vk_digest_secondary == other.vk_digest_secondary + } +} + impl Setup { pub fn new(programs: &[NoirProgram]) -> Self { let switchboard = program::Switchboard::new(programs.to_vec(), vec![], vec![], 0); @@ -38,6 +47,18 @@ impl Setup { // TODO: This can print out the constraints and variables for each circuit PublicParams::from_parts(get_circuit_shapes(&switchboard), self.aux_params) } + + pub fn store_file(&self, path: &std::path::PathBuf) -> Result, ProofError> { + let bytes = self.to_bytes(); + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } + + debug!("using path={:?}", path); + std::io::Write::write_all(&mut std::fs::File::create(path)?, &bytes)?; + + Ok(bytes) + } } // TODO: We may be able to just use rkyv @@ -86,8 +107,27 @@ mod tests { use crate::demo::square_zeroth; #[test] - fn test_setup() { + fn test_setup_and_params() { + let setup = Setup::new(&[square_zeroth()]); + let _ = setup.into_public_params(&[square_zeroth()]); + } + + #[test] + fn test_setup_serialize() { + let setup = Setup::new(&[square_zeroth()]); + let serialized = setup.to_bytes(); + let deserialized = Setup::from_bytes(&serialized).unwrap(); + assert_eq!(setup, deserialized); + } + + #[test] + fn test_setup_store_file() { let setup = Setup::new(&[square_zeroth()]); - todo!("Actually do something here. "); + let path = tempfile::tempdir().unwrap().into_path(); + let bytes = setup.store_file(&path.join("setup.bytes")).unwrap(); + assert!(!bytes.is_empty()); + let stored_bytes = std::fs::read(path.join("setup.bytes")).unwrap(); + let deserialized = Setup::from_bytes(&stored_bytes).unwrap(); + assert_eq!(setup, deserialized); } } From 8fd06ca014cdfa29860cae100b47d98a260ebde4 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 10:01:45 -0700 Subject: [PATCH 27/51] test: compress verification --- frontend/src/error.rs | 4 + frontend/src/program.rs | 200 ++++---------------- frontend/src/setup.rs | 2 +- frontend/tests/{setup => end_to_end}/mod.rs | 0 frontend/tests/ivc/mod.rs | 30 ++- frontend/tests/lib.rs | 2 +- 6 files changed, 69 insertions(+), 169 deletions(-) rename frontend/tests/{setup => end_to_end}/mod.rs (100%) diff --git a/frontend/src/error.rs b/frontend/src/error.rs index b4b8c6c..74751a2 100644 --- a/frontend/src/error.rs +++ b/frontend/src/error.rs @@ -54,6 +54,10 @@ pub enum ProofError { #[error(transparent)] Bincode(#[from] Box), + /// The error is a client_side_prover::errors::NovaError + #[error(transparent)] + Nova(#[from] client_side_prover::errors::NovaError), + /// The error is a client_side_prover::supernova::error::SuperNovaError #[error(transparent)] SuperNova(#[from] client_side_prover::supernova::error::SuperNovaError), diff --git a/frontend/src/program.rs b/frontend/src/program.rs index 7d2efc2..6e4e51a 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -1,6 +1,6 @@ use client_side_prover::{ supernova::{NonUniformCircuit, RecursiveSNARK}, - traits::snark::default_ck_hint, + traits::snark::{default_ck_hint, BatchedRelaxedR1CSSNARKTrait}, }; use halo2curves::grumpkin; use noirc_abi::InputMap; @@ -132,169 +132,39 @@ pub fn run(setup: Setup, switchboard: &Switchboard) -> Result Ok(recursive_snark.unwrap()) } -// /// Compresses a proof without performing the setup step. -// /// -// /// This function takes an existing `RecursiveSNARK` and compresses it into a -// /// `CompressedProof` using pre-initialized proving keys. This is useful when -// /// the setup step has already been performed and the proving keys are -// /// available, allowing for more efficient proof generation. -// /// -// /// # Arguments -// /// -// /// * `recursive_snark` - A reference to the `RecursiveSNARK` that needs to be compressed. -// /// * `public_params` - The public parameters required for the proof system. -// /// * `vk_digest_primary` - The primary verification key digest. -// /// * `vk_digest_secondary` - The secondary verification key digest. -// /// -// /// # Returns -// /// -// /// A `Result` containing the `CompressedProof` if successful, or a `ProofError` -// /// if an error occurs. -// /// -// /// # Errors -// /// -// /// This function will return a `ProofError` if the compression process fails at -// /// any step. -// pub fn compress_proof_no_setup( -// recursive_snark: &RecursiveSNARK, -// public_params: &PublicParams, -// vk_digest_primary: ::Scalar, -// vk_digest_secondary: as Engine>::Scalar, -// ) -> Result { -// let pk = CompressedSNARK::::initialize_pk( -// public_params, -// vk_digest_primary, -// vk_digest_secondary, -// ) -// .unwrap(); -// debug!( -// "initialized pk pk_primary.digest={:?}, pk_secondary.digest={:?}", -// pk.pk_primary.vk_digest, pk.pk_secondary.vk_digest -// ); - -// debug!("`CompressedSNARK::prove STARTING PROVING!"); -// let proof = FoldingProof { -// proof: CompressedSNARK::::prove(public_params, &pk, recursive_snark)?, -// verifier_digest: pk.pk_primary.vk_digest, -// }; -// debug!("`CompressedSNARK::prove completed!"); - -// Ok(proof) -// } - -// /// Compresses a proof by performing the setup step and generating a compressed -// /// proof. -// /// -// /// This function initializes the proving keys by performing the setup step, and -// /// then uses these keys to generate a compressed proof from an existing -// /// `RecursiveSNARK`. This is useful when the setup step has not been performed -// /// yet, and the proving keys need to be initialized before generating the -// /// proof. -// /// -// /// # Arguments -// /// -// /// * `recursive_snark` - A reference to the `RecursiveSNARK` that needs to be compressed. -// /// * `public_params` - The public parameters required for the proof system. -// /// -// /// # Returns -// /// -// /// A `Result` containing the `CompressedProof` if successful, or a `ProofError` -// /// if an error occurs. -// /// -// /// # Errors -// /// -// /// This function will return a `ProofError` if the setup or compression process -// /// fails at any step. -// pub fn compress_proof( -// recursive_snark: &RecursiveSNARK, -// public_params: &PublicParams, -// ) -> Result { -// debug!("Setting up `CompressedSNARK`"); -// let time = std::time::Instant::now(); -// let (pk, _vk) = CompressedSNARK::::setup(public_params)?; -// debug!("Done setting up `CompressedSNARK`"); -// trace!("`CompressedSNARK::setup` elapsed: {:?}", time.elapsed()); - -// let time = std::time::Instant::now(); - -// let proof = FoldingProof { -// proof: CompressedSNARK::::prove(public_params, &pk, recursive_snark)?, -// verifier_digest: pk.pk_primary.vk_digest, -// }; -// debug!("`CompressedSNARK::prove completed!"); - -// trace!("`CompressedSNARK::prove` elapsed: {:?}", time.elapsed()); - -// Ok(proof) -// } - -// /// Initializes the setup data for the program. -// /// -// /// This function takes an `UninitializedSetup` and converts it into an -// /// `InitializedSetup` by iterating over the R1CS types and witness generator -// /// types, creating `R1CS` instances and collecting them into vectors. It then -// /// returns an `InitializedSetup` containing the R1CS and witness generator -// /// types, along with the maximum ROM length. -// /// -// /// # Arguments -// /// -// /// * `setup_data` - The `UninitializedSetup` to initialize. -// /// -// /// # Returns -// /// -// /// A `Result` containing the `InitializedSetup` if successful, or a -// /// `ProofError` if an error occurs. -// pub fn initialize_setup_data( -// setup_data: &UninitializedSetup, -// ) -> Result { -// let (r1cs, witness_generator_types) = setup_data -// .r1cs_types -// .iter() -// .zip(setup_data.witness_generator_types.iter()) -// .map(|(r1cs_type, generator)| { -// let r1cs = R1CS::try_from(r1cs_type)?; -// Ok::<(Arc, data::WitnessGeneratorType), ProofError>(( -// Arc::new(r1cs), -// generator.clone(), -// )) -// }) -// .collect::, _>>()? -// .into_iter() -// .unzip(); +// TODO: We need to make this not take in the programs +pub fn compress( + setup: Setup, + recursive_snark: &RecursiveSNARK, + programs: &[NoirProgram], +) -> Result { + let pk = ProverKey { + pk_primary: S1::initialize_pk(setup.aux_params.ck_primary.clone(), setup.vk_digest_primary)?, + pk_secondary: S2::initialize_pk( + setup.aux_params.ck_secondary.clone(), + setup.vk_digest_secondary, + )?, + }; + // let pk: = CompressedSNARK::::initialize_pk( + // public_params, + // vk_digest_primary, + // vk_digest_secondary, + // ) + // .unwrap(); + debug!( + "initialized pk pk_primary.digest={:?}, pk_secondary.digest={:?}", + pk.pk_primary.vk_digest, pk.pk_secondary.vk_digest + ); + let public_params = setup.into_public_params(programs); + + debug!("`CompressedSNARK::prove STARTING PROVING!"); + let proof = FoldingProof { + proof: CompressedSNARK::::prove(&public_params, &pk, recursive_snark)?, + verifier_digest: pk.pk_primary.vk_digest, + }; + debug!("`CompressedSNARK::prove completed!"); + + Ok(proof) +} -// Ok(InitializedSetup { r1cs, witness_generator_types, max_rom_length: setup_data.max_rom_length -// }) } -// /// Initializes a list of ROM circuits from the provided setup data. -// /// -// /// This function takes an `InitializedSetup` and creates a vector of -// /// `RomCircuit` instances. Each `RomCircuit` is constructed using the R1CS and -// /// witness generator types from the setup data, and is assigned a unique -// /// circuit index and the maximum ROM length. -// /// -// /// # Arguments -// /// -// /// * `setup_data` - The `InitializedSetup` containing the R1CS and witness generator types. -// /// -// /// # Returns -// /// -// /// A vector of `RomCircuit` instances initialized with the provided setup data. -// pub fn initialize_circuit_list(setup_data: &InitializedSetup) -> Vec { -// setup_data -// .r1cs -// .iter() -// .zip(setup_data.witness_generator_types.iter()) -// .enumerate() -// .map(|(i, (r1cs, generator))| { -// let circuit = circom::CircomCircuit { r1cs: r1cs.clone(), witness: None }; -// RomCircuit { -// circuit, -// circuit_index: i, -// rom_size: setup_data.max_rom_length, -// nivc_io: None, -// private_input: None, -// witness_generator_type: generator.clone(), -// } -// }) -// .collect::>() -// } diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 22598f1..cf26131 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -7,7 +7,7 @@ use client_side_prover::{ }; use tracing::debug; -use crate::{error::ProofError, noir::NoirProgram, program, AuxParams, ProverKey, E1, S1, S2}; +use crate::{error::ProofError, noir::NoirProgram, program, AuxParams, E1, S1, S2}; // TODO: This could probably just store the programs with it #[derive(Clone, Debug)] diff --git a/frontend/tests/setup/mod.rs b/frontend/tests/end_to_end/mod.rs similarity index 100% rename from frontend/tests/setup/mod.rs rename to frontend/tests/end_to_end/mod.rs diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 5007a6d..97232ac 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -1,7 +1,7 @@ use acvm::acir::acir_field::GenericFieldElement; -use client_side_prover::provider::GrumpkinEngine; +use client_side_prover::{provider::GrumpkinEngine, supernova::snark::CompressedSNARK}; use client_side_prover_frontend::{ - program::{run, Switchboard}, + program::{compress, run, Switchboard}, setup::Setup, Scalar, }; @@ -133,3 +133,29 @@ fn test_ivc_verify() { assert_eq!(z1_primary, vec![Scalar::from(4), Scalar::from(1)]); assert_eq!(z1_secondary, vec![grumpkin::Fr::ZERO]); } + +// TODO: Lots of clones here now. +#[test] +#[traced_test] +fn test_ivc_compression() { + let programs = vec![square_zeroth()]; + let setup = Setup::new(&programs); + let pp = setup.clone().into_public_params(&programs); + let switchboard_inputs = vec![InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(0_u64)), + )])]; + + let memory = Switchboard::new( + programs.clone(), + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); + + let snark = run(setup.clone(), &memory).unwrap(); + let compressed_proof = compress(setup, &snark, &programs).unwrap(); + + let (_, vk) = CompressedSNARK::setup(&pp).unwrap(); + compressed_proof.proof.verify(&pp, &vk, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); +} diff --git a/frontend/tests/lib.rs b/frontend/tests/lib.rs index c4675f8..e15f91a 100644 --- a/frontend/tests/lib.rs +++ b/frontend/tests/lib.rs @@ -1,5 +1,5 @@ use client_side_prover_frontend::demo::*; use tracing_test::traced_test; +mod end_to_end; mod ivc; -mod setup; From 2088c6791298cf20532e069d88e35db8ff38eae7 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 11:00:56 -0700 Subject: [PATCH 28/51] cleanup api --- frontend/src/program.rs | 54 ++++++++------------ frontend/src/setup.rs | 103 +++++++++++++++++++++++++++----------- frontend/tests/ivc/mod.rs | 59 ++++++++++------------ 3 files changed, 123 insertions(+), 93 deletions(-) diff --git a/frontend/src/program.rs b/frontend/src/program.rs index 6e4e51a..c816f30 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -8,7 +8,10 @@ use proof::FoldingProof; use tracing::trace; use super::*; -use crate::{noir::NoirProgram, setup::Setup}; +use crate::{ + noir::NoirProgram, + setup::{Ready, Setup}, +}; // TODO: Consider moving contents of mod.rs files to a separate files. mod.rs // files should only be used to adjust the visibility of exported items. @@ -55,11 +58,10 @@ impl NonUniformCircuit for Switchboard { fn initial_circuit_index(&self) -> usize { self.initial_circuit_index } } -pub fn run(setup: Setup, switchboard: &Switchboard) -> Result, ProofError> { +pub fn run(setup: &Setup) -> Result, ProofError> { info!("Starting SuperNova program..."); - let public_params = setup.into_public_params(&switchboard.circuits); - let z0_primary = &switchboard.public_input; + let z0_primary = &setup.switchboard.public_input; let z0_secondary = &[grumpkin::Fr::ZERO]; let time = std::time::Instant::now(); @@ -67,12 +69,12 @@ pub fn run(setup: Setup, switchboard: &Switchboard) -> Result // Initialize recursive SNARK as None let mut recursive_snark: Option> = None; - for (idx, switchboard_witness) in switchboard.switchboard_inputs.iter().enumerate() { - info!("Step {} of {} witnesses", idx + 1, switchboard.switchboard_inputs.len()); + for (idx, switchboard_witness) in setup.switchboard.switchboard_inputs.iter().enumerate() { + info!("Step {} of {} witnesses", idx + 1, setup.switchboard.switchboard_inputs.len()); // Determine program counter based on current state let program_counter = match &recursive_snark { - None => switchboard.initial_circuit_index(), + None => setup.switchboard.initial_circuit_index(), Some(snark) => { // TODO: I honestly am surprised that the prover chose to use a usize instead of a field // element for the PC, it would be cleaner to do otherwise @@ -97,16 +99,16 @@ pub fn run(setup: Setup, switchboard: &Switchboard) -> Result debug!("Program counter = {:?}", program_counter); // Prepare circuits for this step - let mut circuit_primary = switchboard.primary_circuit(program_counter); + let mut circuit_primary = setup.switchboard.primary_circuit(program_counter); circuit_primary.witness = Some(switchboard_witness.clone()); - let circuit_secondary = switchboard.secondary_circuit(); + let circuit_secondary = setup.switchboard.secondary_circuit(); // Initialize or update the recursive SNARK if recursive_snark.is_none() { // Initialize a new recursive SNARK for the first step recursive_snark = Some(RecursiveSNARK::new( - &public_params, - switchboard, + &setup.params, + &setup.switchboard, &circuit_primary, &circuit_secondary, z0_primary, @@ -117,7 +119,7 @@ pub fn run(setup: Setup, switchboard: &Switchboard) -> Result // Prove the next step info!("Proving single step..."); let snark = recursive_snark.as_mut().unwrap(); - snark.prove_step(&public_params, &circuit_primary, &circuit_secondary)?; + snark.prove_step(&setup.params, &circuit_primary, &circuit_secondary)?; info!("Done proving single step..."); // TODO: Feature gate this or just remove it @@ -132,39 +134,27 @@ pub fn run(setup: Setup, switchboard: &Switchboard) -> Result Ok(recursive_snark.unwrap()) } -// TODO: We need to make this not take in the programs pub fn compress( - setup: Setup, + setup: &Setup, recursive_snark: &RecursiveSNARK, - programs: &[NoirProgram], ) -> Result { - let pk = ProverKey { - pk_primary: S1::initialize_pk(setup.aux_params.ck_primary.clone(), setup.vk_digest_primary)?, - pk_secondary: S2::initialize_pk( - setup.aux_params.ck_secondary.clone(), - setup.vk_digest_secondary, - )?, - }; - // let pk: = CompressedSNARK::::initialize_pk( - // public_params, - // vk_digest_primary, - // vk_digest_secondary, - // ) - // .unwrap(); + let pk = CompressedSNARK::::initialize_pk( + &setup.params, + setup.vk_digest_primary, + setup.vk_digest_secondary, + ) + .unwrap(); debug!( "initialized pk pk_primary.digest={:?}, pk_secondary.digest={:?}", pk.pk_primary.vk_digest, pk.pk_secondary.vk_digest ); - let public_params = setup.into_public_params(programs); debug!("`CompressedSNARK::prove STARTING PROVING!"); let proof = FoldingProof { - proof: CompressedSNARK::::prove(&public_params, &pk, recursive_snark)?, + proof: CompressedSNARK::::prove(&setup.params, &pk, recursive_snark)?, verifier_digest: pk.pk_primary.vk_digest, }; debug!("`CompressedSNARK::prove completed!"); Ok(proof) } - - diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index cf26131..2e52161 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -7,49 +7,80 @@ use client_side_prover::{ }; use tracing::debug; -use crate::{error::ProofError, noir::NoirProgram, program, AuxParams, E1, S1, S2}; +use crate::{ + error::ProofError, + noir::NoirProgram, + program::{self, Switchboard}, + AuxParams, E1, S1, S2, +}; + +// TODO: Seal this +pub trait Status { + type Switchboard; + type PublicParams; +} + +#[derive(Debug, Clone)] +pub struct Ready; + +impl Status for Ready { + type PublicParams = PublicParams; + type Switchboard = Switchboard; +} + +#[derive(Debug, Clone)] +pub struct Empty; + +impl Status for Empty { + type PublicParams = AuxParams; + type Switchboard = (); +} // TODO: This could probably just store the programs with it #[derive(Clone, Debug)] -pub struct Setup { +pub struct Setup { /// Auxiliary parameters - pub aux_params: AuxParams, + pub params: S::PublicParams, /// Primary verification key digest pub vk_digest_primary: ::Scalar, /// Secondary verification key digest pub vk_digest_secondary: as Engine>::Scalar, + + pub switchboard: S::Switchboard, } #[cfg(test)] -impl PartialEq for Setup { +impl PartialEq for Setup { fn eq(&self, other: &Self) -> bool { self.vk_digest_primary == other.vk_digest_primary && self.vk_digest_secondary == other.vk_digest_secondary } } -impl Setup { - pub fn new(programs: &[NoirProgram]) -> Self { - let switchboard = program::Switchboard::new(programs.to_vec(), vec![], vec![], 0); +impl Setup { + pub fn new(switchboard: Switchboard) -> Self { let public_params = PublicParams::setup(&switchboard, &*default_ck_hint(), &*default_ck_hint()); let (pk, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); - let (_, aux_params) = public_params.into_parts(); Setup { - aux_params, + params: public_params, vk_digest_primary: pk.pk_primary.vk_digest, vk_digest_secondary: pk.pk_secondary.vk_digest, + switchboard, } } - pub fn into_public_params(self, programs: &[NoirProgram]) -> PublicParams { - let switchboard = program::Switchboard::new(programs.to_vec(), vec![], vec![], 0); - // TODO: This can print out the constraints and variables for each circuit - PublicParams::from_parts(get_circuit_shapes(&switchboard), self.aux_params) + fn into_empty(self) -> Setup { + Setup { + params: self.params.into_parts().1, + vk_digest_primary: self.vk_digest_primary, + vk_digest_secondary: self.vk_digest_secondary, + switchboard: (), + } } - pub fn store_file(&self, path: &std::path::PathBuf) -> Result, ProofError> { - let bytes = self.to_bytes(); + pub fn store_file(self, path: &std::path::PathBuf) -> Result, ProofError> { + let bytes = self.into_empty().to_bytes(); if let Some(parent) = path.parent() { std::fs::create_dir_all(parent)?; } @@ -61,14 +92,24 @@ impl Setup { } } +impl Setup { + pub fn into_ready(self, switchboard: Switchboard) -> Setup { + Setup { + params: PublicParams::from_parts(get_circuit_shapes(&switchboard), self.params), + vk_digest_primary: self.vk_digest_primary, + vk_digest_secondary: self.vk_digest_secondary, + switchboard, + } + } +} // TODO: We may be able to just use rkyv -impl FastSerde for Setup { +impl FastSerde for Setup { /// Initialize ProvingParams from an efficiently serializable data format. fn from_bytes(bytes: &[u8]) -> Result { let mut cursor = Cursor::new(bytes); Self::validate_header(&mut cursor, SerdeByteTypes::ProverParams, 3)?; - let aux_params = + let params = Self::read_section_bytes(&mut cursor, 1).map(|bytes| AuxParams::from_bytes(&bytes))??; let vk_digest_primary = Self::read_section_bytes(&mut cursor, 2) @@ -83,7 +124,7 @@ impl FastSerde for Setup { .into_option() .ok_or(SerdeByteError::G1DecodeError)?; - Ok(Setup { aux_params, vk_digest_primary, vk_digest_secondary }) + Ok(Setup { params, vk_digest_primary, vk_digest_secondary, switchboard: () }) } /// Convert ProvingParams to an efficient serialization. @@ -93,7 +134,7 @@ impl FastSerde for Setup { out.push(SerdeByteTypes::ProverParams as u8); out.push(3); // num_sections - Self::write_section_bytes(&mut out, 1, &self.aux_params.to_bytes()); + Self::write_section_bytes(&mut out, 1, &self.params.to_bytes()); Self::write_section_bytes(&mut out, 2, &self.vk_digest_primary.to_bytes()); Self::write_section_bytes(&mut out, 3, &self.vk_digest_secondary.to_bytes()); @@ -108,26 +149,32 @@ mod tests { #[test] fn test_setup_and_params() { - let setup = Setup::new(&[square_zeroth()]); - let _ = setup.into_public_params(&[square_zeroth()]); + let setup = Setup::new(Switchboard::new(vec![square_zeroth()], vec![], vec![], 0)); + assert_eq!(setup.params.num_constraints_and_variables(0), (10008, 10001)); } #[test] fn test_setup_serialize() { - let setup = Setup::new(&[square_zeroth()]); - let serialized = setup.to_bytes(); - let deserialized = Setup::from_bytes(&serialized).unwrap(); - assert_eq!(setup, deserialized); + let setup = Setup::new(Switchboard::new(vec![square_zeroth()], vec![], vec![], 0)); + let empty_setup = setup.into_empty(); + let serialized = empty_setup.to_bytes(); + let deserialized = Setup::::from_bytes(&serialized).unwrap(); + assert_eq!(empty_setup, deserialized); } #[test] fn test_setup_store_file() { - let setup = Setup::new(&[square_zeroth()]); + let switchboard = Switchboard::new(vec![square_zeroth()], vec![], vec![], 0); + let setup = Setup::new(switchboard.clone()); + let vk_digest_primary = setup.vk_digest_primary; + let vk_digest_secondary = setup.vk_digest_secondary; let path = tempfile::tempdir().unwrap().into_path(); let bytes = setup.store_file(&path.join("setup.bytes")).unwrap(); assert!(!bytes.is_empty()); let stored_bytes = std::fs::read(path.join("setup.bytes")).unwrap(); - let deserialized = Setup::from_bytes(&stored_bytes).unwrap(); - assert_eq!(setup, deserialized); + let deserialized = Setup::::from_bytes(&stored_bytes).unwrap(); + let ready_setup = deserialized.into_ready(switchboard); + assert_eq!(vk_digest_primary, ready_setup.vk_digest_primary); + assert_eq!(vk_digest_secondary, ready_setup.vk_digest_secondary); } } diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 97232ac..208bf35 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -1,5 +1,5 @@ use acvm::acir::acir_field::GenericFieldElement; -use client_side_prover::{provider::GrumpkinEngine, supernova::snark::CompressedSNARK}; +use client_side_prover::supernova::snark::CompressedSNARK; use client_side_prover_frontend::{ program::{compress, run, Switchboard}, setup::Setup, @@ -15,17 +15,15 @@ use super::*; #[traced_test] fn test_ivc() { let programs = vec![square_zeroth()]; - let setup = Setup::new(&programs); let switchboard_inputs = vec![ InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), ]; - - let memory = + let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); - - let snark = run(setup, &memory).unwrap(); + let setup = Setup::new(switchboard); + let snark = run(&setup).unwrap(); dbg!(&snark.zi_primary()); assert_eq!(snark.zi_primary()[0], Scalar::from(256)); assert_eq!(snark.zi_primary()[1], Scalar::from(1)); @@ -35,7 +33,6 @@ fn test_ivc() { #[traced_test] fn test_ivc_private_inputs() { let programs = vec![add_external()]; - let setup = Setup::new(&programs); let switchboard_inputs = vec![ InputMap::from([ ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), @@ -58,11 +55,10 @@ fn test_ivc_private_inputs() { ), ]), ]; - - let memory = + let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(1), Scalar::from(2)], 0); - - let snark = run(setup, &memory).unwrap(); + let setup = Setup::new(switchboard); + let snark = run(&setup).unwrap(); let zi = snark.zi_primary(); dbg!(zi); assert_eq!(zi[0], Scalar::from(424)); @@ -73,7 +69,6 @@ fn test_ivc_private_inputs() { #[traced_test] fn test_mock_noir_nivc() { let programs = vec![add_external(), square_zeroth(), swap_memory()]; - let setup = Setup::new(&programs); let switchboard_inputs = vec![ InputMap::from([ ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(1_u64))), @@ -91,11 +86,10 @@ fn test_mock_noir_nivc() { InputValue::Field(GenericFieldElement::from(-1_i128)), )]), ]; - - let memory = + let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(1), Scalar::from(2)], 0); - - let snark = run(setup, &memory).unwrap(); + let setup = Setup::new(switchboard); + let snark = run(&setup).unwrap(); let zi = snark.zi_primary(); dbg!(zi); // First fold: @@ -115,19 +109,16 @@ fn test_mock_noir_nivc() { #[traced_test] fn test_ivc_verify() { let programs = vec![square_zeroth()]; - let setup = Setup::new(&programs); - let pp = setup.clone().into_public_params(&programs); let switchboard_inputs = vec![InputMap::from([( "next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)), )])]; - - let memory = + let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); - - let snark = run(setup, &memory).unwrap(); + let setup = Setup::new(switchboard); + let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = - snark.verify(&pp, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); + snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); assert_eq!(&z1_primary, snark.zi_primary()); assert_eq!(&z1_secondary, snark.zi_secondary()); assert_eq!(z1_primary, vec![Scalar::from(4), Scalar::from(1)]); @@ -139,23 +130,25 @@ fn test_ivc_verify() { #[traced_test] fn test_ivc_compression() { let programs = vec![square_zeroth()]; - let setup = Setup::new(&programs); - let pp = setup.clone().into_public_params(&programs); let switchboard_inputs = vec![InputMap::from([( "next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)), )])]; - - let memory = Switchboard::new( + let switchboard = Switchboard::new( programs.clone(), switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0, ); - - let snark = run(setup.clone(), &memory).unwrap(); - let compressed_proof = compress(setup, &snark, &programs).unwrap(); - - let (_, vk) = CompressedSNARK::setup(&pp).unwrap(); - compressed_proof.proof.verify(&pp, &vk, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); + let setup = Setup::new(switchboard); + let snark = run(&setup).unwrap(); + let compressed_proof = compress(&setup, &snark).unwrap(); + + let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); + compressed_proof + .proof + .verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()) + .unwrap(); } + +// TODO: Add some fail cases for circuits. From 2adca43810fb9acd4247b0e799418763814b021e Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 11:20:27 -0700 Subject: [PATCH 29/51] fix: constraints --- Cargo.toml | 4 --- frontend/src/noir.rs | 78 +++++++++++++++++++------------------------ frontend/src/setup.rs | 2 +- 3 files changed, 35 insertions(+), 49 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f6e35c0..b39a4d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -65,7 +65,3 @@ lto =true codegen-units=1 panic ="abort" -[profile.dev] -opt-level =1 -split-debuginfo="unpacked" -incremental =true \ No newline at end of file diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 5f87fd5..47c31ea 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -138,66 +138,43 @@ impl StepCircuit for NoirProgram { } }; - // Process gates + // Process gates using R1CS approach for (idx, opcode) in self.circuit().opcodes.iter().enumerate() { if let Opcode::AssertZero(gate) = opcode { - // In noir/ACVM, the constraint is (left*right + linear_terms + constant = 0) - // We need to build this as a single LinearCombination that must equal zero - let mut constraint = LinearCombination::zero(); + // Convert the gate to R1CS form: A * B - C = 0 + // First, build the A, B, and C linear combinations + let mut a_lc = LinearCombination::zero(); + let mut b_lc = LinearCombination::zero(); + let mut c_lc = LinearCombination::zero(); - // For multiplication terms, we should create intermediate variables + // Handle mul terms (a product of two variables) for mul_term in &gate.mul_terms { - // Get variables for the factors let left_var = get_var(&mul_term.1, &mut allocated_vars, cs)?; let right_var = get_var(&mul_term.2, &mut allocated_vars, cs)?; - // Create a variable for their product (done implicitly by bellpepper) - let product = cs.alloc( - || format!("mul_term_product_g{idx}"), - || { - // Retrieve witness values if available (or default to zero) - let left_val = acvm_witness_map - .as_ref() - .and_then(|map| map.get(&mul_term.1).copied()) - .unwrap_or_default(); - let right_val = acvm_witness_map - .as_ref() - .and_then(|map| map.get(&mul_term.2).copied()) - .unwrap_or_default(); - Ok(convert_to_halo2_field(left_val * right_val)) - }, - )?; - - // Add a constraint that product = left * right - cs.enforce( - || format!("mul_constraint_g{idx}"), - |lc| lc + left_var, - |lc| lc + right_var, - |lc| lc + product, - ); - - // Add this product term to our main constraint - constraint = constraint + (convert_to_halo2_field(mul_term.0), product); + // Add to A and B linear combinations (negated due to the `AssertZero` gate versus the A*B + // = C form) + a_lc = a_lc + (-convert_to_halo2_field(mul_term.0), left_var); + b_lc = b_lc + (Scalar::one(), right_var); } - // Process addition terms + // Handle linear terms (these go into C with negative coefficients) for add_term in &gate.linear_combinations { let var = get_var(&add_term.1, &mut allocated_vars, cs)?; - constraint = constraint + (convert_to_halo2_field(add_term.0), var); + c_lc = c_lc + (convert_to_halo2_field(add_term.0), var); } - // Handle constant term + // Handle constant term (this goes into C as well) if !gate.q_c.is_zero() { - constraint = constraint - + (convert_to_halo2_field(gate.q_c), Variable::new_unchecked(Index::Input(0))); + c_lc = c_lc + (convert_to_halo2_field(gate.q_c), CS::one()); } - // Enforce constraint: 1 * 0 = constraint (i.e., constraint must be zero) + // Enforce A * B - C = 0 cs.enforce( - || format!("gate_constraint_g{idx}"), - |lc| lc + Variable::new_unchecked(Index::Input(0)), // 1 - |lc| lc, // 0 - |_| constraint, + || format!("constraint_g{idx}"), + |_| a_lc.clone(), + |_| b_lc.clone(), + |_| c_lc.clone(), ); } else { panic!("non-AssertZero gate {idx} of type {opcode:?}"); @@ -270,6 +247,7 @@ mod tests { use client_side_prover::bellpepper::shape_cs::ShapeCS; use super::*; + use crate::demo::square_zeroth; fn add_external() -> NoirProgram { let json_path = "../target/add_external.json"; @@ -315,7 +293,7 @@ mod tests { } #[test] - fn test_constraint_system() { + fn test_constraint_system_add_external() { let program = add_external(); let mut cs = ShapeCS::::new(); @@ -325,4 +303,16 @@ mod tests { let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); assert_eq!(cs.num_constraints(), 3); } + + #[test] + fn test_constraint_system_square_zeroth() { + let program = square_zeroth(); + + let mut cs = ShapeCS::::new(); + let pc = Some(AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(0))).unwrap()); + let z = vec![AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap()]; + + let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); + assert_eq!(cs.num_constraints(), 3); + } } diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 2e52161..94d00b4 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -150,7 +150,7 @@ mod tests { #[test] fn test_setup_and_params() { let setup = Setup::new(Switchboard::new(vec![square_zeroth()], vec![], vec![], 0)); - assert_eq!(setup.params.num_constraints_and_variables(0), (10008, 10001)); + assert_eq!(setup.params.num_constraints_and_variables(0), (10008, 10000)); } #[test] From bb7c6f83d4d4b2d11faebf44d9d0190eb66b29f4 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 12:07:11 -0700 Subject: [PATCH 30/51] bug: only poseidon fails --- Nargo.toml | 2 +- examples/basic/Nargo.toml | 9 +++ examples/basic/src/main.nr | 17 +++++ examples/poseidon/Nargo.toml | 9 +++ examples/poseidon/src/main.nr | 6 ++ frontend/src/lib.rs | 11 ++++ frontend/src/noir.rs | 55 +++++++++++++--- frontend/tests/ivc/mod.rs | 115 ++++++++++++++++++++++++++++++++-- 8 files changed, 209 insertions(+), 15 deletions(-) create mode 100644 examples/basic/Nargo.toml create mode 100644 examples/basic/src/main.nr create mode 100644 examples/poseidon/Nargo.toml create mode 100644 examples/poseidon/src/main.nr diff --git a/Nargo.toml b/Nargo.toml index cf2964f..6663264 100644 --- a/Nargo.toml +++ b/Nargo.toml @@ -1,3 +1,3 @@ [workspace] -members = ["nivc", "examples/add_external", "examples/square_zeroth", "examples/swap_memory"] +members = ["nivc", "examples/add_external", "examples/square_zeroth", "examples/swap_memory", "examples/poseidon", "examples/basic"] diff --git a/examples/basic/Nargo.toml b/examples/basic/Nargo.toml new file mode 100644 index 0000000..02c2dda --- /dev/null +++ b/examples/basic/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors = ["Colin Roberts"] +compiler_version = ">=0.36.0" +name = "basic" +type = "bin" +version = "0.1.0" + +[dependencies] +nivc = { path = "../../nivc" } diff --git a/examples/basic/src/main.nr b/examples/basic/src/main.nr new file mode 100644 index 0000000..5d7b67f --- /dev/null +++ b/examples/basic/src/main.nr @@ -0,0 +1,17 @@ +use nivc::FoldingOutput; + +/// Checks general arithmetic operations. +pub fn main( + registers: pub [Field; 2], + external_mul: [Field; 2], + external_add: [Field; 2], + next_pc: Field, +) -> pub FoldingOutput<2> { + FoldingOutput { + registers: [ + external_mul[0] * registers[0] + external_add[0] + 420, + external_mul[1] * registers[1] + external_add[1] + 69, + ], + next_pc, + } +} diff --git a/examples/poseidon/Nargo.toml b/examples/poseidon/Nargo.toml new file mode 100644 index 0000000..50dc97b --- /dev/null +++ b/examples/poseidon/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors = ["Colin Roberts"] +compiler_version = ">=0.36.0" +name = "poseidon" +type = "bin" +version = "0.1.0" + +[dependencies] +nivc = { path = "../../nivc" } diff --git a/examples/poseidon/src/main.nr b/examples/poseidon/src/main.nr new file mode 100644 index 0000000..df76667 --- /dev/null +++ b/examples/poseidon/src/main.nr @@ -0,0 +1,6 @@ +use nivc::FoldingOutput; +use std::hash::poseidon::bn254::hash_2; + +pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub FoldingOutput<2> { + FoldingOutput { registers: [hash_2(registers), 0], next_pc } +} diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index 183c4da..edf93ef 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -84,6 +84,11 @@ type S2 = BatchedRelaxedR1CSSNARK; pub mod demo { use crate::noir::NoirProgram; + pub fn basic() -> NoirProgram { + let bytecode = std::fs::read("../target/basic.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } + pub fn add_external() -> NoirProgram { let bytecode = std::fs::read("../target/add_external.json").expect("Failed to read Noir program file"); @@ -101,4 +106,10 @@ pub mod demo { std::fs::read("../target/swap_memory.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } + + pub fn poseidon() -> NoirProgram { + let bytecode = + std::fs::read("../target/poseidon.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } } diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 47c31ea..2874e5e 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -35,8 +35,11 @@ pub struct NoirProgram { deserialize_with = "Program::deserialize_program_base64" )] pub bytecode: Program>, - pub debug_symbols: String, - pub file_map: HashMap, + // TODO: We likely don't need these. + pub debug_symbols: serde_json::Value, + // TODO: We likely don't need these. + pub file_map: serde_json::Value, + pub names: Vec, pub brillig_names: Vec, #[serde(skip)] @@ -152,8 +155,7 @@ impl StepCircuit for NoirProgram { let left_var = get_var(&mul_term.1, &mut allocated_vars, cs)?; let right_var = get_var(&mul_term.2, &mut allocated_vars, cs)?; - // Add to A and B linear combinations (negated due to the `AssertZero` gate versus the A*B - // = C form) + // Add to A and B (negated due to the `AssertZero` gate versus the A*B = C form) a_lc = a_lc + (-convert_to_halo2_field(mul_term.0), left_var); b_lc = b_lc + (Scalar::one(), right_var); } @@ -169,7 +171,7 @@ impl StepCircuit for NoirProgram { c_lc = c_lc + (convert_to_halo2_field(gate.q_c), CS::one()); } - // Enforce A * B - C = 0 + // Enforce A * B = C cs.enforce( || format!("constraint_g{idx}"), |_| a_lc.clone(), @@ -247,7 +249,7 @@ mod tests { use client_side_prover::bellpepper::shape_cs::ShapeCS; use super::*; - use crate::demo::square_zeroth; + use crate::demo::{basic, poseidon, square_zeroth}; fn add_external() -> NoirProgram { let json_path = "../target/add_external.json"; @@ -292,13 +294,32 @@ mod tests { } } + // TODO: Worth checking here that each gate has mul, add, and constant terms. + #[test] + fn test_constraint_system_basic() { + let program = basic(); + + let mut cs = ShapeCS::::new(); + let pc = Some(AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(0))).unwrap()); + let z = vec![ + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(2))).unwrap(), + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap(), + ]; + + let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); + assert_eq!(cs.num_constraints(), 3); + } + #[test] fn test_constraint_system_add_external() { let program = add_external(); let mut cs = ShapeCS::::new(); let pc = Some(AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(0))).unwrap()); - let z = vec![AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap()]; + let z = vec![ + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(2))).unwrap(), + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap(), + ]; let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); assert_eq!(cs.num_constraints(), 3); @@ -310,9 +331,27 @@ mod tests { let mut cs = ShapeCS::::new(); let pc = Some(AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(0))).unwrap()); - let z = vec![AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap()]; + let z = vec![ + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(2))).unwrap(), + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap(), + ]; let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); assert_eq!(cs.num_constraints(), 3); } + + #[test] + fn test_constraint_system_poseidon() { + let program = poseidon(); + + let mut cs = ShapeCS::::new(); + let pc = Some(AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(0))).unwrap()); + let z = vec![ + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(2))).unwrap(), + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap(), + ]; + + let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); + assert_eq!(cs.num_constraints(), 320); + } } diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 208bf35..1ae6fa9 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -134,12 +134,115 @@ fn test_ivc_compression() { "next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)), )])]; - let switchboard = Switchboard::new( - programs.clone(), - switchboard_inputs, - vec![Scalar::from(2), Scalar::from(1)], - 0, - ); + let switchboard = + Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let setup = Setup::new(switchboard); + let snark = run(&setup).unwrap(); + let compressed_proof = compress(&setup, &snark).unwrap(); + + let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); + compressed_proof + .proof + .verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()) + .unwrap(); +} + +#[test] +#[traced_test] +fn test_ivc_verify_basic() { + let programs = vec![basic()]; + let switchboard_inputs = vec![InputMap::from([ + ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), + ( + "external_mul".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(3_u64)), + InputValue::Field(GenericFieldElement::from(2_u64)), + ]), + ), + ( + "external_add".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(10_u64)), + InputValue::Field(GenericFieldElement::from(7_u64)), + ]), + ), + ])]; + let switchboard = + Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let setup = Setup::new(switchboard); + let snark = run(&setup).unwrap(); + let (z1_primary, z1_secondary) = + snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); + assert_eq!(&z1_primary, snark.zi_primary()); + assert_eq!(&z1_secondary, snark.zi_secondary()); + assert_eq!(z1_primary, vec![Scalar::from(436), Scalar::from(78)]); + assert_eq!(z1_secondary, vec![grumpkin::Fr::ZERO]); +} + +#[test] +#[traced_test] +fn test_ivc_compression_basic() { + let programs = vec![basic()]; + let switchboard_inputs = vec![InputMap::from([ + ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), + ( + "external_mul".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(3_u64)), + InputValue::Field(GenericFieldElement::from(2_u64)), + ]), + ), + ( + "external_add".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(10_u64)), + InputValue::Field(GenericFieldElement::from(7_u64)), + ]), + ), + ])]; + let switchboard = + Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let setup = Setup::new(switchboard); + let snark = run(&setup).unwrap(); + let compressed_proof = compress(&setup, &snark).unwrap(); + let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); + compressed_proof + .proof + .verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()) + .unwrap(); +} + +#[test] +#[traced_test] +fn test_ivc_verify_poseidon() { + let programs = vec![poseidon()]; + let switchboard_inputs = vec![InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(0_u64)), + )])]; + let switchboard = + Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let setup = Setup::new(switchboard); + let snark = run(&setup).unwrap(); + let (z1_primary, z1_secondary) = + snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); + assert_eq!(&z1_primary, snark.zi_primary()); + assert_eq!(&z1_secondary, snark.zi_secondary()); + assert_eq!(z1_primary, vec![Scalar::from(4), Scalar::from(1)]); + assert_eq!(z1_secondary, vec![grumpkin::Fr::ZERO]); +} + +#[test] +#[traced_test] +fn test_ivc_compression_poseidon() { + let programs = vec![poseidon()]; + let switchboard_inputs = vec![InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(0_u64)), + )])]; + let switchboard = + Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let compressed_proof = compress(&setup, &snark).unwrap(); From 42142eb1f273b1b1473bca1f2f7f5accf91ea49c Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 17:09:32 -0700 Subject: [PATCH 31/51] fix: all passing again --- Cargo.toml | 9 +++- Nargo.toml | 10 ++++- examples/add_external/Nargo.toml | 12 +++--- examples/basic/Nargo.toml | 12 +++--- examples/poseidon/Nargo.toml | 12 +++--- examples/square_zeroth/Nargo.toml | 12 +++--- examples/swap_memory/Nargo.toml | 12 +++--- frontend/Cargo.toml | 13 +++--- frontend/src/lib.rs | 5 +++ frontend/src/noir.rs | 71 +++++++++++++++++++++---------- frontend/src/setup.rs | 2 +- frontend/tests/ivc/mod.rs | 4 -- nivc/Nargo.toml | 10 ++--- 13 files changed, 111 insertions(+), 73 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b39a4d6..b016d36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,7 +51,7 @@ criterion={ version="0.5", features=["html_reports"] } # dev dependencies flate2 ="1.0" sha2 ="0.10.7" -tracing-test ={ version="0.2.4", features = ["no-env-filter"] } +tracing-test ={ version="0.2.4", features=["no-env-filter"] } expect-test ="1.4.1" anyhow ="1.0.72" tap ="1.0.1" @@ -65,3 +65,10 @@ lto =true codegen-units=1 panic ="abort" +[profile.dev] +opt-level =1 +debug =true +incremental =true +codegen-units=256 +lto =false +panic ="unwind" diff --git a/Nargo.toml b/Nargo.toml index 6663264..b2adf58 100644 --- a/Nargo.toml +++ b/Nargo.toml @@ -1,3 +1,9 @@ [workspace] -members = ["nivc", "examples/add_external", "examples/square_zeroth", "examples/swap_memory", "examples/poseidon", "examples/basic"] - +members=[ + "nivc", + "examples/add_external", + "examples/square_zeroth", + "examples/swap_memory", + "examples/poseidon", + "examples/basic", +] diff --git a/examples/add_external/Nargo.toml b/examples/add_external/Nargo.toml index cd37b1b..4fde61f 100644 --- a/examples/add_external/Nargo.toml +++ b/examples/add_external/Nargo.toml @@ -1,9 +1,9 @@ [package] -authors = ["Colin Roberts"] -compiler_version = ">=0.36.0" -name = "add_external" -type = "bin" -version = "0.1.0" +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="add_external" +type ="bin" +version ="0.1.0" [dependencies] -nivc = { path = "../../nivc" } +nivc={ path="../../nivc" } diff --git a/examples/basic/Nargo.toml b/examples/basic/Nargo.toml index 02c2dda..a09a1c4 100644 --- a/examples/basic/Nargo.toml +++ b/examples/basic/Nargo.toml @@ -1,9 +1,9 @@ [package] -authors = ["Colin Roberts"] -compiler_version = ">=0.36.0" -name = "basic" -type = "bin" -version = "0.1.0" +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="basic" +type ="bin" +version ="0.1.0" [dependencies] -nivc = { path = "../../nivc" } +nivc={ path="../../nivc" } diff --git a/examples/poseidon/Nargo.toml b/examples/poseidon/Nargo.toml index 50dc97b..472e553 100644 --- a/examples/poseidon/Nargo.toml +++ b/examples/poseidon/Nargo.toml @@ -1,9 +1,9 @@ [package] -authors = ["Colin Roberts"] -compiler_version = ">=0.36.0" -name = "poseidon" -type = "bin" -version = "0.1.0" +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="poseidon" +type ="bin" +version ="0.1.0" [dependencies] -nivc = { path = "../../nivc" } +nivc={ path="../../nivc" } diff --git a/examples/square_zeroth/Nargo.toml b/examples/square_zeroth/Nargo.toml index c8ab6e7..f203d4a 100644 --- a/examples/square_zeroth/Nargo.toml +++ b/examples/square_zeroth/Nargo.toml @@ -1,9 +1,9 @@ [package] -authors = ["Colin Roberts"] -compiler_version = ">=0.36.0" -name = "square_zeroth" -type = "bin" -version = "0.1.0" +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="square_zeroth" +type ="bin" +version ="0.1.0" [dependencies] -nivc = { path = "../../nivc" } +nivc={ path="../../nivc" } diff --git a/examples/swap_memory/Nargo.toml b/examples/swap_memory/Nargo.toml index e495845..27b99ed 100644 --- a/examples/swap_memory/Nargo.toml +++ b/examples/swap_memory/Nargo.toml @@ -1,9 +1,9 @@ [package] -authors = ["Colin Roberts"] -compiler_version = ">=0.36.0" -name = "swap_memory" -type = "bin" -version = "0.1.0" +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="swap_memory" +type ="bin" +version ="0.1.0" [dependencies] -nivc = { path = "../../nivc" } +nivc={ path="../../nivc" } diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index df49e2d..62029ea 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -33,13 +33,12 @@ js-sys ="0.3.64" wasm-bindgen-futures="0.4.37" [dev-dependencies] -tracing-test={workspace=true} -tempdir ="0.3.7" -tokio ={ version="1.43", features=["full"] } -client-side-prover-frontend = { path = ".", features = ["demo"] } -tempfile = "3.17" +tracing-test ={ workspace=true } +tempdir ="0.3.7" +tokio ={ version="1.43", features=["full"] } +client-side-prover-frontend={ path=".", features=["demo"] } +tempfile ="3.17" [features] verify-steps=[] -demo = [] - +demo =[] diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index edf93ef..f98f1a6 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -112,4 +112,9 @@ pub mod demo { std::fs::read("../target/poseidon.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } + + pub fn http() -> NoirProgram { + let bytecode = std::fs::read("../target/http.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } } diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 2874e5e..294ec4e 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -17,7 +17,7 @@ use bellpepper_core::{ use client_side_prover::supernova::StepCircuit; use ff::PrimeField; use noirc_abi::{input_parser::InputValue, Abi, AbiType, InputMap}; -use tracing::{debug, error, info, trace}; +use tracing::{debug, error, info, trace, warn}; use super::*; @@ -144,42 +144,67 @@ impl StepCircuit for NoirProgram { // Process gates using R1CS approach for (idx, opcode) in self.circuit().opcodes.iter().enumerate() { if let Opcode::AssertZero(gate) = opcode { - // Convert the gate to R1CS form: A * B - C = 0 - // First, build the A, B, and C linear combinations - let mut a_lc = LinearCombination::zero(); - let mut b_lc = LinearCombination::zero(); - let mut c_lc = LinearCombination::zero(); + // Create a single linear combination that will be constrained to zero + let mut zero_lc = LinearCombination::zero(); - // Handle mul terms (a product of two variables) + // Handle mul terms by creating intermediate variables for each product for mul_term in &gate.mul_terms { let left_var = get_var(&mul_term.1, &mut allocated_vars, cs)?; let right_var = get_var(&mul_term.2, &mut allocated_vars, cs)?; - // Add to A and B (negated due to the `AssertZero` gate versus the A*B = C form) - a_lc = a_lc + (-convert_to_halo2_field(mul_term.0), left_var); - b_lc = b_lc + (Scalar::one(), right_var); + // Get the values if available + let left_val = acvm_witness_map + .as_ref() + .and_then(|map| map.get(&mul_term.1)) + .map(|&v| convert_to_halo2_field(v)); + + let right_val = acvm_witness_map + .as_ref() + .and_then(|map| map.get(&mul_term.2)) + .map(|&v| convert_to_halo2_field(v)); + + // Create a new variable for the product + let product = AllocatedNum::alloc( + cs.namespace(|| format!("prod_g{idx}_t{}", mul_term.1.as_usize())), + || { + let l = left_val.unwrap_or_else(Scalar::zero); + let r = right_val.unwrap_or_else(Scalar::zero); + Ok(l * r) + }, + )?; + + // Enforce that this is indeed the product + cs.enforce( + || format!("prod_constraint_g{idx}_t{}", mul_term.1.as_usize()), + |lc| lc + left_var, + |lc| lc + right_var, + |lc| lc + product.get_variable(), + ); + + // Add this product to our zero linear combination with the coefficient + zero_lc = zero_lc + (convert_to_halo2_field(mul_term.0), product.get_variable()); } - // Handle linear terms (these go into C with negative coefficients) + // Handle linear terms (these go into the zero linear combination) for add_term in &gate.linear_combinations { let var = get_var(&add_term.1, &mut allocated_vars, cs)?; - c_lc = c_lc + (convert_to_halo2_field(add_term.0), var); + zero_lc = zero_lc + (convert_to_halo2_field(add_term.0), var); } - // Handle constant term (this goes into C as well) + // Handle constant term if !gate.q_c.is_zero() { - c_lc = c_lc + (convert_to_halo2_field(gate.q_c), CS::one()); + zero_lc = zero_lc + (convert_to_halo2_field(gate.q_c), CS::one()); } - // Enforce A * B = C + // Enforce that the entire expression equals zero cs.enforce( || format!("constraint_g{idx}"), - |_| a_lc.clone(), - |_| b_lc.clone(), - |_| c_lc.clone(), + |_| LinearCombination::zero() + CS::one(), + |_| zero_lc.clone(), + |_| LinearCombination::zero(), ); } else { - panic!("non-AssertZero gate {idx} of type {opcode:?}"); + warn!("non-AssertZero gate {idx} of type {opcode:?}"); } } @@ -249,7 +274,7 @@ mod tests { use client_side_prover::bellpepper::shape_cs::ShapeCS; use super::*; - use crate::demo::{basic, poseidon, square_zeroth}; + use crate::demo::{basic, http, poseidon, square_zeroth}; fn add_external() -> NoirProgram { let json_path = "../target/add_external.json"; @@ -307,7 +332,7 @@ mod tests { ]; let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); - assert_eq!(cs.num_constraints(), 3); + assert_eq!(cs.num_constraints(), 5); } #[test] @@ -337,7 +362,7 @@ mod tests { ]; let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); - assert_eq!(cs.num_constraints(), 3); + assert_eq!(cs.num_constraints(), 4); } #[test] @@ -352,6 +377,6 @@ mod tests { ]; let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); - assert_eq!(cs.num_constraints(), 320); + assert_eq!(cs.num_constraints(), 560); } } diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 94d00b4..7c04397 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -150,7 +150,7 @@ mod tests { #[test] fn test_setup_and_params() { let setup = Setup::new(Switchboard::new(vec![square_zeroth()], vec![], vec![], 0)); - assert_eq!(setup.params.num_constraints_and_variables(0), (10008, 10000)); + assert_eq!(setup.params.num_constraints_and_variables(0), (10009, 10001)); } #[test] diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 1ae6fa9..55012f1 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -229,8 +229,6 @@ fn test_ivc_verify_poseidon() { snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); assert_eq!(&z1_primary, snark.zi_primary()); assert_eq!(&z1_secondary, snark.zi_secondary()); - assert_eq!(z1_primary, vec![Scalar::from(4), Scalar::from(1)]); - assert_eq!(z1_secondary, vec![grumpkin::Fr::ZERO]); } #[test] @@ -253,5 +251,3 @@ fn test_ivc_compression_poseidon() { .verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()) .unwrap(); } - -// TODO: Add some fail cases for circuits. diff --git a/nivc/Nargo.toml b/nivc/Nargo.toml index 8de5e69..8cff867 100644 --- a/nivc/Nargo.toml +++ b/nivc/Nargo.toml @@ -1,6 +1,6 @@ [package] -authors = ["Colin Roberts"] -compiler_version = ">=0.36.0" -name = "nivc" -type = "lib" -version = "0.1.0" +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="nivc" +type ="lib" +version ="0.1.0" From 393e0bcbd073d8a0c67b95ef1daa49e41f16b9ea Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Sun, 2 Mar 2025 18:02:51 -0700 Subject: [PATCH 32/51] feat: clean af --- examples/add_external/src/main.nr | 14 ++- examples/basic/src/main.nr | 18 ++-- examples/poseidon/src/main.nr | 9 +- examples/square_zeroth/src/main.nr | 12 ++- examples/swap_memory/src/main.nr | 9 +- frontend/src/noir.rs | 142 ++++++++++++++++++++++++----- frontend/tests/ivc/mod.rs | 55 +++-------- nivc/src/lib.nr | 4 +- 8 files changed, 170 insertions(+), 93 deletions(-) diff --git a/examples/add_external/src/main.nr b/examples/add_external/src/main.nr index c9eaf44..0452191 100644 --- a/examples/add_external/src/main.nr +++ b/examples/add_external/src/main.nr @@ -1,10 +1,16 @@ -use nivc::FoldingOutput; +use nivc::FoldingVariables; /// Add two external values to two registers that are folded across circuits. pub fn main( - registers: pub [Field; 2], + folding_variables: pub FoldingVariables<2>, external: [Field; 2], next_pc: Field, -) -> pub FoldingOutput<2> { - FoldingOutput { registers: [external[0] + registers[0], external[1] + registers[1]], next_pc } +) -> pub FoldingVariables<2> { + FoldingVariables { + registers: [ + external[0] + folding_variables.registers[0], + external[1] + folding_variables.registers[1], + ], + program_counter: next_pc, + } } diff --git a/examples/basic/src/main.nr b/examples/basic/src/main.nr index 5d7b67f..02d122a 100644 --- a/examples/basic/src/main.nr +++ b/examples/basic/src/main.nr @@ -1,17 +1,15 @@ -use nivc::FoldingOutput; +use nivc::FoldingVariables; /// Checks general arithmetic operations. pub fn main( - registers: pub [Field; 2], - external_mul: [Field; 2], - external_add: [Field; 2], - next_pc: Field, -) -> pub FoldingOutput<2> { - FoldingOutput { + folding_variables: pub FoldingVariables<1>, + external_mul: Field, + external_add: Field, +) -> pub FoldingVariables<1> { + FoldingVariables { registers: [ - external_mul[0] * registers[0] + external_add[0] + 420, - external_mul[1] * registers[1] + external_add[1] + 69, + external_mul * folding_variables.registers[0] + external_add + 420, ], - next_pc, + program_counter: folding_variables.program_counter, } } diff --git a/examples/poseidon/src/main.nr b/examples/poseidon/src/main.nr index df76667..11914c6 100644 --- a/examples/poseidon/src/main.nr +++ b/examples/poseidon/src/main.nr @@ -1,6 +1,9 @@ -use nivc::FoldingOutput; +use nivc::FoldingVariables; use std::hash::poseidon::bn254::hash_2; -pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub FoldingOutput<2> { - FoldingOutput { registers: [hash_2(registers), 0], next_pc } +pub fn main(folding_variables: pub FoldingVariables<2>) -> pub FoldingVariables<2> { + FoldingVariables { + registers: [hash_2(folding_variables.registers), 0], + program_counter: folding_variables.program_counter, + } } diff --git a/examples/square_zeroth/src/main.nr b/examples/square_zeroth/src/main.nr index 92ea52a..e5e030e 100644 --- a/examples/square_zeroth/src/main.nr +++ b/examples/square_zeroth/src/main.nr @@ -1,7 +1,13 @@ -use nivc::FoldingOutput; +use nivc::FoldingVariables; /// Square only the first register. -pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub FoldingOutput<2> { - FoldingOutput { registers: [registers[0] * registers[0], registers[1]], next_pc } +pub fn main(folding_variables: pub FoldingVariables<2>, next_pc: Field) -> pub FoldingVariables<2> { + FoldingVariables { + registers: [ + folding_variables.registers[0] * folding_variables.registers[0], + folding_variables.registers[1], + ], + program_counter: next_pc, + } } diff --git a/examples/swap_memory/src/main.nr b/examples/swap_memory/src/main.nr index 9baf654..dfaea29 100644 --- a/examples/swap_memory/src/main.nr +++ b/examples/swap_memory/src/main.nr @@ -1,6 +1,9 @@ -use nivc::FoldingOutput; +use nivc::FoldingVariables; /// Swap the two registers. -pub fn main(registers: pub [Field; 2], next_pc: Field) -> pub FoldingOutput<2> { - FoldingOutput { registers: [registers[1], registers[0]], next_pc } +pub fn main(folding_variables: pub FoldingVariables<2>, next_pc: Field) -> pub FoldingVariables<2> { + FoldingVariables { + registers: [folding_variables.registers[1], folding_variables.registers[0]], + program_counter: next_pc, + } } diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 294ec4e..af010bb 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use acvm::{ acir::{ @@ -63,8 +63,68 @@ impl NoirProgram { } impl StepCircuit for NoirProgram { - // TODO: This is a bit hacky. We need to add 1 for the PC - fn arity(&self) -> usize { self.circuit().public_parameters.0.len() } + fn arity(&self) -> usize { + // Find input type with FoldingVariables type (regardless of parameter name) + let input_type = self + .abi + .parameters + .iter() + .find(|param| { + if let AbiType::Struct { path, .. } = ¶m.typ { + path == "nivc::FoldingVariables" + } else { + false + } + }) + .map(|param| ¶m.typ); + + // Get the return type + let return_type = self.abi.return_type.as_ref().map(|ret| &ret.abi_type); + + trace!("Input type: {:?}", input_type); + trace!("Return type: {:?}", return_type); + + // Extract register length from a FoldingVariables struct + let get_register_length = |typ: &AbiType| -> usize { + if let AbiType::Struct { fields, .. } = typ { + if let Some((_, AbiType::Array { length, .. })) = + fields.iter().find(|(name, _)| name == "registers") + { + *length as usize + } else { + panic!("FoldingVariables missing registers array or invalid type") + } + } else { + panic!("Expected struct type for FoldingVariables") + } + }; + + // Check types and extract register length + match (input_type, return_type) { + (Some(input), Some(output)) => { + // Check that both are FoldingVariables + if let (AbiType::Struct { path: in_path, .. }, AbiType::Struct { path: out_path, .. }) = + (input, output) + { + if in_path == "nivc::FoldingVariables" && out_path == "nivc::FoldingVariables" { + let in_len = get_register_length(input); + let out_len = get_register_length(output); + + if in_len != out_len { + panic!( + "Input and output must have same number of registers: {} vs {}", + in_len, out_len + ); + } + + return in_len; + } + } + panic!("Both input and output must be nivc::FoldingVariables structs") + }, + _ => panic!("Missing input or output FoldingVariables type"), + } + } fn circuit_index(&self) -> usize { self.index } @@ -89,19 +149,30 @@ impl StepCircuit for NoirProgram { ); // Prepare inputs with registers - // TODO: Can we reove this clone? - let mut inputs_with_registers = inputs.clone(); - inputs_with_registers.insert( - "registers".to_string(), - InputValue::Vec( - z.iter() - .filter_map(|var| var.get_value().map(|v| InputValue::Field(convert_to_acir_field(v)))) - .collect(), + // TODO: Can we remove this clone since it may be a lot of data? + let mut inputs_with_folding_variables = inputs.clone(); + // Create folding variables + let folding_variables = InputValue::Struct(BTreeMap::from([ + ( + "registers".to_string(), + InputValue::Vec( + z.iter() + .filter_map(|var| { + var.get_value().map(|v| InputValue::Field(convert_to_acir_field(v))) + }) + .collect(), + ), ), - ); + ( + // TODO: This is a bit hacky with unwraps + "program_counter".to_string(), + InputValue::Field(convert_to_acir_field(pc.unwrap().get_value().unwrap())), + ), + ])); + inputs_with_folding_variables.insert("folding_variables".to_string(), folding_variables); // Encode inputs through ABI - if let Ok(encoded_map) = self.abi.encode(&inputs_with_registers, None) { + if let Ok(encoded_map) = self.abi.encode(&inputs_with_folding_variables, None) { for (witness, value) in encoded_map { acvm.overwrite_witness(witness, value); } @@ -231,23 +302,46 @@ impl StepCircuit for NoirProgram { if let Some(noirc_abi::AbiReturnType { abi_type: AbiType::Struct { fields, .. }, .. }) = &self.abi.return_type { - // TODO: This should be an error. - let registers_length = fields + // Print debug information + trace!("Return type fields: {:?}", fields.iter().map(|(name, _)| name).collect::>()); + trace!("Return values length: {}", return_values.len()); + + // Check if we have the expected FoldingVariables structure + let registers_field = fields .iter() .find(|(name, _)| name == "registers") - .map(|(_, typ)| match typ { - AbiType::Array { length, .. } => *length as usize, - _ => panic!("Expected registers to be an array type"), - }) .unwrap_or_else(|| panic!("Missing 'registers' field")); - let next_pc_index = registers_length; + let registers_length = match ®isters_field.1 { + AbiType::Array { length, .. } => *length as usize, + _ => panic!("Expected registers to be an array type"), + }; + + trace!("Registers length from ABI: {}", registers_length); + + // Find the index of the program_counter in the return values + let pc_index = fields + .iter() + .position(|(name, _)| name == "program_counter") + .unwrap_or_else(|| panic!("Missing 'program_counter' field")); - if next_pc_index < return_values.len() { - let next_pc = Some(return_values[next_pc_index].clone()); - dbg!(&next_pc); - let registers = return_values[..registers_length].to_vec(); + trace!("Program counter index: {}", pc_index); + + // The Noir ABI returns fields in order, so we can directly map them to return_values + // First n values are the registers, followed by program_counter + if return_values.len() >= registers_length + 1 { + let registers = return_values[0..registers_length].to_vec(); + let next_pc = Some(return_values[registers_length].clone()); + + trace!("Extracted {} registers and program counter", registers.len()); return Ok((next_pc, registers)); + } else { + error!( + "Not enough return values. Expected at least {}, got {}", + registers_length + 1, + return_values.len() + ); + return Err(SynthesisError::Unsatisfiable); } } diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 55012f1..5d6e336 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -15,6 +15,7 @@ use super::*; #[traced_test] fn test_ivc() { let programs = vec![square_zeroth()]; + // TODO: This is a hack to get the correct number of folds when there are no external inputs. let switchboard_inputs = vec![ InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), @@ -67,7 +68,7 @@ fn test_ivc_private_inputs() { #[test] #[traced_test] -fn test_mock_noir_nivc() { +fn test_nivc() { let programs = vec![add_external(), square_zeroth(), swap_memory()]; let switchboard_inputs = vec![ InputMap::from([ @@ -152,31 +153,17 @@ fn test_ivc_compression() { fn test_ivc_verify_basic() { let programs = vec![basic()]; let switchboard_inputs = vec![InputMap::from([ - ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), - ( - "external_mul".to_string(), - InputValue::Vec(vec![ - InputValue::Field(GenericFieldElement::from(3_u64)), - InputValue::Field(GenericFieldElement::from(2_u64)), - ]), - ), - ( - "external_add".to_string(), - InputValue::Vec(vec![ - InputValue::Field(GenericFieldElement::from(10_u64)), - InputValue::Field(GenericFieldElement::from(7_u64)), - ]), - ), + ("external_mul".to_string(), InputValue::Field(GenericFieldElement::from(3_u64))), + ("external_add".to_string(), InputValue::Field(GenericFieldElement::from(10_u64))), ])]; - let switchboard = - Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); assert_eq!(&z1_primary, snark.zi_primary()); assert_eq!(&z1_secondary, snark.zi_secondary()); - assert_eq!(z1_primary, vec![Scalar::from(436), Scalar::from(78)]); + assert_eq!(z1_primary, vec![Scalar::from(436)]); assert_eq!(z1_secondary, vec![grumpkin::Fr::ZERO]); } @@ -185,24 +172,10 @@ fn test_ivc_verify_basic() { fn test_ivc_compression_basic() { let programs = vec![basic()]; let switchboard_inputs = vec![InputMap::from([ - ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), - ( - "external_mul".to_string(), - InputValue::Vec(vec![ - InputValue::Field(GenericFieldElement::from(3_u64)), - InputValue::Field(GenericFieldElement::from(2_u64)), - ]), - ), - ( - "external_add".to_string(), - InputValue::Vec(vec![ - InputValue::Field(GenericFieldElement::from(10_u64)), - InputValue::Field(GenericFieldElement::from(7_u64)), - ]), - ), + ("external_mul".to_string(), InputValue::Field(GenericFieldElement::from(3_u64))), + ("external_add".to_string(), InputValue::Field(GenericFieldElement::from(10_u64))), ])]; - let switchboard = - Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let compressed_proof = compress(&setup, &snark).unwrap(); @@ -217,10 +190,7 @@ fn test_ivc_compression_basic() { #[traced_test] fn test_ivc_verify_poseidon() { let programs = vec![poseidon()]; - let switchboard_inputs = vec![InputMap::from([( - "next_pc".to_string(), - InputValue::Field(GenericFieldElement::from(0_u64)), - )])]; + let switchboard_inputs = vec![InputMap::new()]; let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); let setup = Setup::new(switchboard); @@ -235,10 +205,7 @@ fn test_ivc_verify_poseidon() { #[traced_test] fn test_ivc_compression_poseidon() { let programs = vec![poseidon()]; - let switchboard_inputs = vec![InputMap::from([( - "next_pc".to_string(), - InputValue::Field(GenericFieldElement::from(0_u64)), - )])]; + let switchboard_inputs = vec![InputMap::new()]; let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); let setup = Setup::new(switchboard); diff --git a/nivc/src/lib.nr b/nivc/src/lib.nr index d477996..49ac633 100644 --- a/nivc/src/lib.nr +++ b/nivc/src/lib.nr @@ -1,6 +1,6 @@ -pub struct FoldingOutput { +pub struct FoldingVariables { pub registers: [Field; N], - pub next_pc: Field, + pub program_counter: Field, } // TODO: It would be nice to be able to force the `main` function to have a specific signature. In particular, we want: From fbf9b2ffdab6738891d7fff5a5e767cb01cf61aa Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Tue, 4 Mar 2025 05:34:53 -0700 Subject: [PATCH 33/51] example: collatz vm --- Nargo.toml | 2 ++ examples/collatz_even/Nargo.toml | 9 +++++++++ examples/collatz_even/src/main.nr | 25 +++++++++++++++++++++++++ examples/collatz_odd/Nargo.toml | 9 +++++++++ examples/collatz_odd/src/main.nr | 14 ++++++++++++++ frontend/src/lib.rs | 11 +++++++++-- frontend/tests/ivc/mod.rs | 16 ++++++++++++++++ 7 files changed, 84 insertions(+), 2 deletions(-) create mode 100644 examples/collatz_even/Nargo.toml create mode 100644 examples/collatz_even/src/main.nr create mode 100644 examples/collatz_odd/Nargo.toml create mode 100644 examples/collatz_odd/src/main.nr diff --git a/Nargo.toml b/Nargo.toml index b2adf58..d6dd3b6 100644 --- a/Nargo.toml +++ b/Nargo.toml @@ -6,4 +6,6 @@ members=[ "examples/swap_memory", "examples/poseidon", "examples/basic", + "examples/collatz_even", + "examples/collatz_odd", ] diff --git a/examples/collatz_even/Nargo.toml b/examples/collatz_even/Nargo.toml new file mode 100644 index 0000000..45b7a6c --- /dev/null +++ b/examples/collatz_even/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="collatz_even" +type ="bin" +version ="0.1.0" + +[dependencies] +nivc={ path="../../nivc" } diff --git a/examples/collatz_even/src/main.nr b/examples/collatz_even/src/main.nr new file mode 100644 index 0000000..27c35c3 --- /dev/null +++ b/examples/collatz_even/src/main.nr @@ -0,0 +1,25 @@ +use nivc::FoldingVariables; + +/// When the value is even, divide it by 2. +/// If the value is 1, return -1 to indicate the end of the sequence. +/// Otherwise, return 0 or 1 to indicate the next program counter, i.e., collatz_even or collatz_odd. +pub fn main(folding_variables: pub FoldingVariables<1>) -> pub FoldingVariables<1> { + // Get the value from the first register as a u64 for easier manipulation. + let value = folding_variables.registers[0] as u64; + + // Run the even-value transformation. { f(n) = n / 2 } + let next_value = value / 2; + + // If the next value is 1, we've reached the end of the sequence (the fixed cycle predicted by the Collatz conjecture). + // We return -1 to indicate that the sequence has ended. + let next_pc = if next_value % 2 == 0 { + 0 + } else if next_value == 1 { + -1 + } else { + 1 + }; + + // Return the updated folding variables with the new value and program counter. + FoldingVariables { registers: [next_value as Field], program_counter: next_pc } +} diff --git a/examples/collatz_odd/Nargo.toml b/examples/collatz_odd/Nargo.toml new file mode 100644 index 0000000..8761dd1 --- /dev/null +++ b/examples/collatz_odd/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="collatz_odd" +type ="bin" +version ="0.1.0" + +[dependencies] +nivc={ path="../../nivc" } diff --git a/examples/collatz_odd/src/main.nr b/examples/collatz_odd/src/main.nr new file mode 100644 index 0000000..44e5eee --- /dev/null +++ b/examples/collatz_odd/src/main.nr @@ -0,0 +1,14 @@ +use nivc::FoldingVariables; + +/// When the value is odd, multiply it by 3 and add 1. +/// If the result is even, return 0 to indicate the next program counter, i.e., collatz_even. +/// Otherwise, return 1 to indicate the next program counter, i.e., collatz_odd. +/// Note, { f(n) = 3n + 1 } can never be 1, so we don't need to check for that. +pub fn main(folding_variables: pub FoldingVariables<1>) -> pub FoldingVariables<1> { + let value = folding_variables.registers[0] as u64; + + let next_value = 3 * value + 1; + let next_pc = if next_value % 2 == 0 { 0 } else { 1 }; + + FoldingVariables { registers: [next_value as Field], program_counter: next_pc } +} diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index f98f1a6..7f3c0bd 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -113,8 +113,15 @@ pub mod demo { NoirProgram::new(&bytecode) } - pub fn http() -> NoirProgram { - let bytecode = std::fs::read("../target/http.json").expect("Failed to read Noir program file"); + pub fn collatz_even() -> NoirProgram { + let bytecode = + std::fs::read("../target/collatz_even.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } + + pub fn collatz_odd() -> NoirProgram { + let bytecode = + std::fs::read("../target/collatz_odd.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } } diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 5d6e336..f386f02 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -218,3 +218,19 @@ fn test_ivc_compression_poseidon() { .verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()) .unwrap(); } + +#[test] +#[traced_test] +fn test_collatz() { + let programs = vec![collatz_even(), collatz_odd()]; + let switchboard_inputs = vec![InputMap::new()]; + let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); + let setup = Setup::new(switchboard); + let snark = run(&setup).unwrap(); + let (z1_primary, z1_secondary) = + snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); + dbg!(&z1_primary); + dbg!(&snark.program_counter()); + assert_eq!(&z1_primary, snark.zi_primary()); + assert_eq!(&z1_secondary, snark.zi_secondary()); +} From e03fc1e4394bca66b3a431a3ee46f37830d925d1 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Tue, 4 Mar 2025 06:32:33 -0700 Subject: [PATCH 34/51] feat: RAM and ROM variants --- frontend/src/noir.rs | 5 -- frontend/src/program.rs | 124 +++++++++++++++++++++++++++++++++++--- frontend/src/setup.rs | 28 +++++---- frontend/tests/ivc/mod.rs | 74 +++++++++++++++++------ 4 files changed, 187 insertions(+), 44 deletions(-) diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index af010bb..0c286c4 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -81,9 +81,6 @@ impl StepCircuit for NoirProgram { // Get the return type let return_type = self.abi.return_type.as_ref().map(|ret| &ret.abi_type); - trace!("Input type: {:?}", input_type); - trace!("Return type: {:?}", return_type); - // Extract register length from a FoldingVariables struct let get_register_length = |typ: &AbiType| -> usize { if let AbiType::Struct { fields, .. } = typ { @@ -274,8 +271,6 @@ impl StepCircuit for NoirProgram { |_| zero_lc.clone(), |_| LinearCombination::zero(), ); - } else { - warn!("non-AssertZero gate {idx} of type {opcode:?}"); } } diff --git a/frontend/src/program.rs b/frontend/src/program.rs index c816f30..02caf5d 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -19,20 +19,36 @@ use crate::{ /// Compressed proof type pub type CompressedProof = FoldingProof, Scalar>; +pub trait Memory { + type Data; +} + +#[derive(Debug, Clone)] +pub struct ROM {} +impl Memory for ROM { + type Data = Vec; +} + +#[derive(Debug, Clone)] +pub struct RAM {} +impl Memory for RAM { + type Data = (); +} + // NOTE: These are `pub(crate)` to avoid exposing the `index` field to the // outside world. #[derive(Debug, Clone)] -pub struct Switchboard { +pub struct Switchboard { pub(crate) circuits: Vec, pub(crate) public_input: Vec, pub(crate) initial_circuit_index: usize, - pub(crate) switchboard_inputs: Vec, + pub(crate) switchboard_inputs: M::Data, } -impl Switchboard { +impl Switchboard { pub fn new( mut circuits: Vec, - switchboard_inputs: Vec, + switchboard_inputs: M::Data, public_input: Vec, initial_circuit_index: usize, ) -> Self { @@ -43,7 +59,7 @@ impl Switchboard { } } -impl NonUniformCircuit for Switchboard { +impl NonUniformCircuit for Switchboard { type C1 = NoirProgram; type C2 = TrivialCircuit; @@ -58,7 +74,7 @@ impl NonUniformCircuit for Switchboard { fn initial_circuit_index(&self) -> usize { self.initial_circuit_index } } -pub fn run(setup: &Setup) -> Result, ProofError> { +pub fn run_rom(setup: &Setup>) -> Result, ProofError> { info!("Starting SuperNova program..."); let z0_primary = &setup.switchboard.public_input; @@ -134,8 +150,100 @@ pub fn run(setup: &Setup) -> Result, ProofError> { Ok(recursive_snark.unwrap()) } -pub fn compress( - setup: &Setup, +pub fn run_ram(setup: &Setup>) -> Result, ProofError> { + info!("Starting SuperNova program..."); + + let z0_primary = &setup.switchboard.public_input; + let z0_secondary = &[grumpkin::Fr::ZERO]; + + let time = std::time::Instant::now(); + + // Initialize recursive SNARK as None + let mut recursive_snark: Option> = None; + let termination_pc = Scalar::ZERO - Scalar::ONE; + + loop { + // Determine program counter based on current state + let program_counter = match &recursive_snark { + None => setup.switchboard.initial_circuit_index(), + Some(snark) => { + dbg!(&snark.program_counter()); + let current_pc = snark.program_counter(); + if current_pc == termination_pc { + break; + } + + // Convert Scalar to usize for circuit indexing + let pc_bytes = current_pc.to_bytes(); + + // Check if higher bytes are non-zero (which would be truncated in usize conversion) + let usize_size = std::mem::size_of::(); + if pc_bytes[usize_size..].iter().any(|&b| b != 0) { + return Err(ProofError::Other("Program counter value too large for usize".into())); + } + + // Convert the relevant bytes to usize (using little-endian order) + let mut pc_value = 0usize; + for (i, &b) in pc_bytes.iter().take(usize_size).enumerate() { + pc_value |= (b as usize) << (i * 8); + } + + pc_value + }, + }; + + debug!("Program counter = {:?}", program_counter); + + // Prepare circuits for this step + dbg!(&program_counter); + let mut circuit_primary = setup.switchboard.primary_circuit(program_counter); + // TODO: This is a hack to get the witness to be non-empty so ACVM is spawned + circuit_primary.witness = Some(InputMap::new()); + let circuit_secondary = setup.switchboard.secondary_circuit(); + + // Initialize or update the recursive SNARK + if recursive_snark.is_none() { + // Initialize a new recursive SNARK for the first step + recursive_snark = Some(RecursiveSNARK::new( + &setup.params, + &setup.switchboard, + &circuit_primary, + &circuit_secondary, + z0_primary, + z0_secondary, + )?); + } + + // Prove the next step + info!("Proving single step..."); + let snark = recursive_snark.as_mut().unwrap(); + snark.prove_step(&setup.params, &circuit_primary, &circuit_secondary)?; + info!("Done proving single step..."); + dbg!(snark.program_counter()); + } + + trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); + + // Return the completed recursive SNARK + Ok(recursive_snark.unwrap()) +} + +pub fn run(setup: &Setup>) -> Result, ProofError> { + if std::any::type_name::() == std::any::type_name::() { + // Safety: We've verified the type matches ROM + let setup = unsafe { std::mem::transmute::<&Setup>, &Setup>>(setup) }; + run_rom(setup) + } else if std::any::type_name::() == std::any::type_name::() { + // Safety: We've verified the type matches RAM + let setup = unsafe { std::mem::transmute::<&Setup>, &Setup>>(setup) }; + run_ram(setup) + } else { + Err(ProofError::Other("Unsupported memory type".into())) + } +} + +pub fn compress( + setup: &Setup>, recursive_snark: &RecursiveSNARK, ) -> Result { let pk = CompressedSNARK::::initialize_pk( diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 7c04397..19eede7 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -10,7 +10,7 @@ use tracing::debug; use crate::{ error::ProofError, noir::NoirProgram, - program::{self, Switchboard}, + program::{self, Memory, Switchboard}, AuxParams, E1, S1, S2, }; @@ -21,17 +21,21 @@ pub trait Status { } #[derive(Debug, Clone)] -pub struct Ready; +pub struct Ready { + _marker: std::marker::PhantomData, +} -impl Status for Ready { +impl Status for Ready { type PublicParams = PublicParams; - type Switchboard = Switchboard; + type Switchboard = Switchboard; } #[derive(Debug, Clone)] -pub struct Empty; +pub struct Empty { + _marker: std::marker::PhantomData, +} -impl Status for Empty { +impl Status for Empty { type PublicParams = AuxParams; type Switchboard = (); } @@ -57,8 +61,8 @@ impl PartialEq for Setup { } } -impl Setup { - pub fn new(switchboard: Switchboard) -> Self { +impl Setup> { + pub fn new(switchboard: Switchboard) -> Self { let public_params = PublicParams::setup(&switchboard, &*default_ck_hint(), &*default_ck_hint()); let (pk, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); @@ -70,7 +74,7 @@ impl Setup { } } - fn into_empty(self) -> Setup { + fn into_empty(self) -> Setup> { Setup { params: self.params.into_parts().1, vk_digest_primary: self.vk_digest_primary, @@ -92,8 +96,8 @@ impl Setup { } } -impl Setup { - pub fn into_ready(self, switchboard: Switchboard) -> Setup { +impl Setup> { + pub fn into_ready(self, switchboard: Switchboard) -> Setup> { Setup { params: PublicParams::from_parts(get_circuit_shapes(&switchboard), self.params), vk_digest_primary: self.vk_digest_primary, @@ -103,7 +107,7 @@ impl Setup { } } // TODO: We may be able to just use rkyv -impl FastSerde for Setup { +impl FastSerde for Setup> { /// Initialize ProvingParams from an efficiently serializable data format. fn from_bytes(bytes: &[u8]) -> Result { let mut cursor = Cursor::new(bytes); diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index f386f02..37c5d1b 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -1,7 +1,7 @@ use acvm::acir::acir_field::GenericFieldElement; use client_side_prover::supernova::snark::CompressedSNARK; use client_side_prover_frontend::{ - program::{compress, run, Switchboard}, + program::{compress, run, Switchboard, RAM, ROM}, setup::Setup, Scalar, }; @@ -21,8 +21,12 @@ fn test_ivc() { InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), ]; - let switchboard = - Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); dbg!(&snark.zi_primary()); @@ -56,8 +60,12 @@ fn test_ivc_private_inputs() { ), ]), ]; - let switchboard = - Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(1), Scalar::from(2)], 0); + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(1), Scalar::from(2)], + 0, + ); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let zi = snark.zi_primary(); @@ -87,8 +95,12 @@ fn test_nivc() { InputValue::Field(GenericFieldElement::from(-1_i128)), )]), ]; - let switchboard = - Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(1), Scalar::from(2)], 0); + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(1), Scalar::from(2)], + 0, + ); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let zi = snark.zi_primary(); @@ -114,8 +126,12 @@ fn test_ivc_verify() { "next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)), )])]; - let switchboard = - Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = @@ -135,8 +151,12 @@ fn test_ivc_compression() { "next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)), )])]; - let switchboard = - Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let compressed_proof = compress(&setup, &snark).unwrap(); @@ -156,7 +176,7 @@ fn test_ivc_verify_basic() { ("external_mul".to_string(), InputValue::Field(GenericFieldElement::from(3_u64))), ("external_add".to_string(), InputValue::Field(GenericFieldElement::from(10_u64))), ])]; - let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); + let switchboard = Switchboard::::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = @@ -175,7 +195,7 @@ fn test_ivc_compression_basic() { ("external_mul".to_string(), InputValue::Field(GenericFieldElement::from(3_u64))), ("external_add".to_string(), InputValue::Field(GenericFieldElement::from(10_u64))), ])]; - let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); + let switchboard = Switchboard::::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let compressed_proof = compress(&setup, &snark).unwrap(); @@ -191,8 +211,12 @@ fn test_ivc_compression_basic() { fn test_ivc_verify_poseidon() { let programs = vec![poseidon()]; let switchboard_inputs = vec![InputMap::new()]; - let switchboard = - Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = @@ -206,8 +230,12 @@ fn test_ivc_verify_poseidon() { fn test_ivc_compression_poseidon() { let programs = vec![poseidon()]; let switchboard_inputs = vec![InputMap::new()]; - let switchboard = - Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2), Scalar::from(1)], 0); + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let compressed_proof = compress(&setup, &snark).unwrap(); @@ -223,8 +251,16 @@ fn test_ivc_compression_poseidon() { #[traced_test] fn test_collatz() { let programs = vec![collatz_even(), collatz_odd()]; - let switchboard_inputs = vec![InputMap::new()]; - let switchboard = Switchboard::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); + let collatz_start = 19; + let initial_circuit_index = collatz_start % 2; + let switchboard = Switchboard::::new( + programs, + (), + vec![Scalar::from(collatz_start)], + initial_circuit_index as usize, + ); + // let switchboard = + // Switchboard::::new(programs, vec![InputMap::new()], vec![Scalar::from(2)], 0); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = From f5aca444cbf92f331964eb42ac6e5fa6e3737cff Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 05:36:58 -0700 Subject: [PATCH 35/51] cleanup --- frontend/src/error.rs | 14 ++++---- frontend/src/noir.rs | 2 +- frontend/src/program.rs | 70 ++++++++++++++++++++++++--------------- frontend/src/setup.rs | 25 ++++++++------ frontend/tests/ivc/mod.rs | 3 -- frontend/tests/lib.rs | 2 ++ 6 files changed, 69 insertions(+), 47 deletions(-) diff --git a/frontend/src/error.rs b/frontend/src/error.rs index 74751a2..1e59dd4 100644 --- a/frontend/src/error.rs +++ b/frontend/src/error.rs @@ -22,15 +22,15 @@ use thiserror::Error; /// crate. #[derive(Debug, Error)] pub enum ProofError { - /// The error is a bellpepper_core::SynthesisError + /// The error is a `bellpepper_core::SynthesisError` #[error(transparent)] Synthesis(#[from] bellpepper_core::SynthesisError), - /// The error is a std::io::Error + /// The error is a `std::io::Error` #[error(transparent)] Io(#[from] std::io::Error), - /// The error is a serde_json::Error + /// The error is a `serde_json::Error` #[error(transparent)] Serde(#[from] serde_json::Error), @@ -42,7 +42,7 @@ pub enum ProofError { #[error("Failed to verify proof: {0}")] VerifyFailed(String), - /// The error is a num_bigint::ParseBigIntError + /// The error is a `num_bigint::ParseBigIntError` #[error(transparent)] Parse(#[from] num_bigint::ParseBigIntError), @@ -50,15 +50,15 @@ pub enum ProofError { #[error("Missing header section")] MissingSection, - /// The error is a bincode::ErrorKind + /// The error is a `bincode::ErrorKind` #[error(transparent)] Bincode(#[from] Box), - /// The error is a client_side_prover::errors::NovaError + /// The error is a `client_side_prover::errors::NovaError` #[error(transparent)] Nova(#[from] client_side_prover::errors::NovaError), - /// The error is a client_side_prover::supernova::error::SuperNovaError + /// The error is a `client_side_prover::supernova::error::SuperNovaError` #[error(transparent)] SuperNova(#[from] client_side_prover::supernova::error::SuperNovaError), diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 0c286c4..17ac912 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -363,7 +363,7 @@ mod tests { use client_side_prover::bellpepper::shape_cs::ShapeCS; use super::*; - use crate::demo::{basic, http, poseidon, square_zeroth}; + use crate::demo::{basic, poseidon, square_zeroth}; fn add_external() -> NoirProgram { let json_path = "../target/add_external.json"; diff --git a/frontend/src/program.rs b/frontend/src/program.rs index 02caf5d..273f71c 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -1,7 +1,4 @@ -use client_side_prover::{ - supernova::{NonUniformCircuit, RecursiveSNARK}, - traits::snark::{default_ck_hint, BatchedRelaxedR1CSSNARKTrait}, -}; +use client_side_prover::supernova::{NonUniformCircuit, RecursiveSNARK}; use halo2curves::grumpkin; use noirc_abi::InputMap; use proof::FoldingProof; @@ -19,10 +16,17 @@ use crate::{ /// Compressed proof type pub type CompressedProof = FoldingProof, Scalar>; -pub trait Memory { +pub trait Memory: private::Sealed { type Data; } +mod private { + use super::{RAM, ROM}; + pub trait Sealed {} + impl Sealed for ROM {} + impl Sealed for RAM {} +} + #[derive(Debug, Clone)] pub struct ROM {} impl Memory for ROM { @@ -45,10 +49,10 @@ pub struct Switchboard { pub(crate) switchboard_inputs: M::Data, } -impl Switchboard { +impl Switchboard { pub fn new( mut circuits: Vec, - switchboard_inputs: M::Data, + switchboard_inputs: Vec, public_input: Vec, initial_circuit_index: usize, ) -> Self { @@ -59,6 +63,19 @@ impl Switchboard { } } +impl Switchboard { + pub fn new( + mut circuits: Vec, + public_input: Vec, + initial_circuit_index: usize, + ) -> Self { + // Set the index of each circuit given the order they are passed in since this is skipped in + // serde + circuits.iter_mut().enumerate().for_each(|(i, c)| c.index = i); + Self { circuits, public_input, initial_circuit_index, switchboard_inputs: () } + } +} + impl NonUniformCircuit for Switchboard { type C1 = NoirProgram; type C2 = TrivialCircuit; @@ -74,6 +91,26 @@ impl NonUniformCircuit for Switchboard { fn initial_circuit_index(&self) -> usize { self.initial_circuit_index } } +pub fn run(setup: &Setup>) -> Result, ProofError> { + if std::any::type_name::() == std::any::type_name::() { + // Safety: We've verified the type matches ROM + let setup = unsafe { + &*std::ptr::from_ref::>>(setup) + .cast::>>() + }; + run_rom(setup) + } else if std::any::type_name::() == std::any::type_name::() { + // Safety: We've verified the type matches RAM + let setup = unsafe { + &*std::ptr::from_ref::>>(setup) + .cast::>>() + }; + run_ram(setup) + } else { + unreachable!("The trait `Memory` is sealed, so you cannot reach this point") + } +} + pub fn run_rom(setup: &Setup>) -> Result, ProofError> { info!("Starting SuperNova program..."); @@ -137,11 +174,6 @@ pub fn run_rom(setup: &Setup>) -> Result, ProofErr let snark = recursive_snark.as_mut().unwrap(); snark.prove_step(&setup.params, &circuit_primary, &circuit_secondary)?; info!("Done proving single step..."); - - // TODO: Feature gate this or just remove it - // info!("Verifying single step..."); - // snark.verify(&public_params, snark.z0_primary(), z0_secondary)?; - // info!("Single step verification done"); } trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); @@ -228,20 +260,6 @@ pub fn run_ram(setup: &Setup>) -> Result, ProofErr Ok(recursive_snark.unwrap()) } -pub fn run(setup: &Setup>) -> Result, ProofError> { - if std::any::type_name::() == std::any::type_name::() { - // Safety: We've verified the type matches ROM - let setup = unsafe { std::mem::transmute::<&Setup>, &Setup>>(setup) }; - run_rom(setup) - } else if std::any::type_name::() == std::any::type_name::() { - // Safety: We've verified the type matches RAM - let setup = unsafe { std::mem::transmute::<&Setup>, &Setup>>(setup) }; - run_ram(setup) - } else { - Err(ProofError::Other("Unsupported memory type".into())) - } -} - pub fn compress( setup: &Setup>, recursive_snark: &RecursiveSNARK, diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 19eede7..e6bb064 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -9,17 +9,22 @@ use tracing::debug; use crate::{ error::ProofError, - noir::NoirProgram, - program::{self, Memory, Switchboard}, + program::{Memory, Switchboard}, AuxParams, E1, S1, S2, }; -// TODO: Seal this -pub trait Status { +pub trait Status: private::Sealed { type Switchboard; type PublicParams; } +mod private { + use super::{Empty, Ready}; + pub trait Sealed {} + impl Sealed for Ready {} + impl Sealed for Empty {} +} + #[derive(Debug, Clone)] pub struct Ready { _marker: std::marker::PhantomData, @@ -149,26 +154,26 @@ impl FastSerde for Setup> { #[cfg(test)] mod tests { use super::*; - use crate::demo::square_zeroth; + use crate::{demo::square_zeroth, program::RAM}; #[test] fn test_setup_and_params() { - let setup = Setup::new(Switchboard::new(vec![square_zeroth()], vec![], vec![], 0)); + let setup = Setup::new(Switchboard::::new(vec![square_zeroth()], vec![], 0)); assert_eq!(setup.params.num_constraints_and_variables(0), (10009, 10001)); } #[test] fn test_setup_serialize() { - let setup = Setup::new(Switchboard::new(vec![square_zeroth()], vec![], vec![], 0)); + let setup = Setup::new(Switchboard::::new(vec![square_zeroth()], vec![], 0)); let empty_setup = setup.into_empty(); let serialized = empty_setup.to_bytes(); - let deserialized = Setup::::from_bytes(&serialized).unwrap(); + let deserialized = Setup::>::from_bytes(&serialized).unwrap(); assert_eq!(empty_setup, deserialized); } #[test] fn test_setup_store_file() { - let switchboard = Switchboard::new(vec![square_zeroth()], vec![], vec![], 0); + let switchboard = Switchboard::::new(vec![square_zeroth()], vec![], 0); let setup = Setup::new(switchboard.clone()); let vk_digest_primary = setup.vk_digest_primary; let vk_digest_secondary = setup.vk_digest_secondary; @@ -176,7 +181,7 @@ mod tests { let bytes = setup.store_file(&path.join("setup.bytes")).unwrap(); assert!(!bytes.is_empty()); let stored_bytes = std::fs::read(path.join("setup.bytes")).unwrap(); - let deserialized = Setup::::from_bytes(&stored_bytes).unwrap(); + let deserialized = Setup::>::from_bytes(&stored_bytes).unwrap(); let ready_setup = deserialized.into_ready(switchboard); assert_eq!(vk_digest_primary, ready_setup.vk_digest_primary); assert_eq!(vk_digest_secondary, ready_setup.vk_digest_secondary); diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 37c5d1b..86869d7 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -255,12 +255,9 @@ fn test_collatz() { let initial_circuit_index = collatz_start % 2; let switchboard = Switchboard::::new( programs, - (), vec![Scalar::from(collatz_start)], initial_circuit_index as usize, ); - // let switchboard = - // Switchboard::::new(programs, vec![InputMap::new()], vec![Scalar::from(2)], 0); let setup = Setup::new(switchboard); let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = diff --git a/frontend/tests/lib.rs b/frontend/tests/lib.rs index e15f91a..3fc2f0f 100644 --- a/frontend/tests/lib.rs +++ b/frontend/tests/lib.rs @@ -1,3 +1,5 @@ +#![warn(missing_docs, clippy::missing_docs_in_private_items)] + use client_side_prover_frontend::demo::*; use tracing_test::traced_test; From 74252c12a0d9ea3cf84bca1ca8486604b6536989 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 05:55:02 -0700 Subject: [PATCH 36/51] docs --- frontend/src/error.rs | 22 +------- frontend/src/lib.rs | 108 ++++++++++++++++++++++++++-------------- frontend/src/program.rs | 12 ++--- frontend/src/proof.rs | 4 +- frontend/src/setup.rs | 4 +- frontend/tests/lib.rs | 2 - 6 files changed, 83 insertions(+), 69 deletions(-) diff --git a/frontend/src/error.rs b/frontend/src/error.rs index 1e59dd4..6dc08d6 100644 --- a/frontend/src/error.rs +++ b/frontend/src/error.rs @@ -1,27 +1,7 @@ -//! Error type for the `proofs` crate. -//! -//! This enum represents the various error conditions that can occur within the -//! `proofs` crate. It provides a unified way to handle and propagate errors -//! throughout the crate. -//! -//! The possible error variants include: -//! -//! - `Synthesis`: Represents an error that occurred during the synthesis process. -//! - `Io`: Represents an I/O error. -//! - `Serde`: Represents a serialization or deserialization error. -//! - `Other`: Represents any other error with a custom error message. -//! - `VerifyFailed`: Indicates that the proof verification failed. -//! - `Parse`: Represents an error that occurred while parsing a big integer. -//! - `WitnessCalc`: Represents an error that occurred during witness calculation (only available -//! when not targeting `wasm32`). -//! - `MissingSection`: Indicates that a required section is missing. -//! - `Bincode`: Represents a Bincode serialization or deserialization error. use thiserror::Error; -/// Represents the various error conditions that can occur within the `proofs` -/// crate. #[derive(Debug, Error)] -pub enum ProofError { +pub enum FrontendError { /// The error is a `bellpepper_core::SynthesisError` #[error(transparent)] Synthesis(#[from] bellpepper_core::SynthesisError), diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index 7f3c0bd..c7e4bc1 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -1,49 +1,48 @@ -// TODO: Add back missing docs -// #![warn(missing_docs, clippy::missing_docs_in_private_items)] +#![warn(missing_docs, clippy::missing_docs_in_private_items)] -//! # Proofs Crate +//! # NIVC Folding for Noir Circuits //! -//! The `proofs` crate provides a comprehensive framework for creating and -//! verifying zero-knowledge proofs. It includes various modules and utilities -//! to facilitate the construction of proofs, circuits, and the necessary -//! cryptographic primitives. +//! This crate provides an frontend implementation to use Non-uniform Incrementally Verifiable +//! Computation (NIVC) folding for Noir circuits. NIVC allows for incremental verification of +//! computations across different circuit types, enabling complex proof systems that can switch +//! between different circuit implementations during execution. //! -//! ## Modules +//! ## Key Components //! -//! - `circom`: Contains utilities for working with Circom circuits. -//! - `circuits`: Provides the implementation of various circuits used in the proof system. -//! - `errors`: Defines error types used throughout the crate. -//! - `program`: Contains the core logic for setting up and running the proof system. -//! - `proof`: Provides the implementation of the proof generation and verification. -//! - `setup`: Contains utilities for setting up the proof system. -//! - `tests`: Contains tests for the proof system. +//! - **Noir Programs**: Representation and handling of Noir language programs +//! - **Switchboard**: Manages the flow between different circuit implementations +//! - **Setup**: Handles parameter generation and initialization for the proof system +//! - **Proof Generation**: Creation and verification of folding proofs //! -//! ## Types +//! ## Cryptographic Backends //! -//! - `E1`: Represents the first elliptic curve engine used in the proof system. -//! - `E2`: Represents the second elliptic curve engine used in the proof system. -//! - `G1`: Represents the group associated with the first elliptic curve engine. -//! - `G2`: Represents the group associated with the second elliptic curve engine. -//! - `EE1`: Represents the evaluation engine for the first elliptic curve. -//! - `EE2`: Represents the evaluation engine for the second elliptic curve. -//! - `S1`: Represents the SNARK for the first elliptic curve. -//! - `S2`: Represents the SNARK for the second elliptic curve. -//! - `F`: Represents the scalar field associated with a given group. -//! - `AuxParams`: Represents the auxiliary parameters needed to create `PublicParams`. -//! - `ProverKey`: Represents the prover key needed to create a `CompressedSNARK`. -//! - `VerifierKey`: Represents the verifier key needed to create a `CompressedSNARK`. +//! The crate uses several cryptographic backends: +//! - Primary curve: bn254 (also known as BN256) +//! - Secondary curve: Grumpkin +//! - Proof systems: SuperNova, Spartan R1CS SNARKs +//! +//! ## Memory Models +//! +//! The crate supports two memory models: +//! - **ROM (Read-Only Memory)**: All computation steps are known in advance +//! - **RAM (Random Access Memory)**: Computation steps are determined dynamically +//! +//! ## Example Usage +//! +//! The crate provides demo implementations accessible via the `demo` module when +//! built with the `demo` feature. use client_side_prover::{ provider::GrumpkinEngine, spartan::batched::BatchedRelaxedR1CSSNARK, - supernova::{snark::CompressedSNARK, PublicParams, TrivialCircuit}, + supernova::{snark::CompressedSNARK, TrivialCircuit}, traits::{Engine, Group}, }; use ff::Field; use serde::{Deserialize, Serialize}; use tracing::{debug, info}; -use crate::error::ProofError; +use crate::error::FrontendError; pub mod error; pub mod noir; @@ -52,73 +51,110 @@ pub mod proof; pub mod setup; /// Represents the scalar field for the primary curve (bn254) +/// +/// This type is used for representing field elements in the scalar field +/// of the primary elliptic curve used in the proof system. pub type Scalar = ::Scalar; /// Represents the params needed to create `PublicParams` alongside the -/// circuits' R1CSs. Specifically typed to the `proofs` crate choices of curves -/// and engines. +/// circuits' R1CSs. +/// +/// These auxiliary parameters contain the cryptographic context needed for +/// setting up the proof system. pub type AuxParams = client_side_prover::supernova::AuxParams; -/// The `ProverKey` needed to create a `CompressedSNARK` using the `proofs` -/// crate choices of curves and engines. +/// The `ProverKey` needed to create a `CompressedSNARK`. +/// +/// This key is used by the prover to generate cryptographic proofs. pub type ProverKey = client_side_prover::supernova::snark::ProverKey; -/// The `VerifierKey` needed to create a `CompressedSNARK` using the `proofs` -/// crate choices of curves and engines. +/// The `VerifierKey` needed to create a `CompressedSNARK`. +/// +/// This key is used by the verifier to validate cryptographic proofs. pub type VerifierKey = client_side_prover::supernova::snark::VerifierKey; /// Represents the first elliptic curve engine used in the proof system. +/// +/// The primary engine uses BN256 with KZG polynomial commitments. type E1 = client_side_prover::provider::Bn256EngineKZG; /// Represents the second elliptic curve engine used in the proof system. +/// +/// The secondary engine uses the Grumpkin curve, which is cycle-friendly with BN256. type E2 = GrumpkinEngine; /// Represents the group associated with the first elliptic curve engine. +/// +/// This group is used for cryptographic operations in the primary curve. type G1 = ::GE; /// Represents the evaluation engine for the first elliptic curve. +/// +/// This evaluation engine handles polynomial evaluations for the primary curve. type EE1 = client_side_prover::provider::hyperkzg::EvaluationEngine; /// Represents the evaluation engine for the second elliptic curve. +/// +/// This evaluation engine handles polynomial evaluations for the secondary curve. type EE2 = client_side_prover::provider::ipa_pc::EvaluationEngine; /// Represents the SNARK for the first elliptic curve. +/// +/// This SNARK implementation is used for generating proofs on the primary curve. type S1 = BatchedRelaxedR1CSSNARK; /// Represents the SNARK for the second elliptic curve. +/// +/// This SNARK implementation is used for generating proofs on the secondary curve. type S2 = BatchedRelaxedR1CSSNARK; #[cfg(any(test, feature = "demo"))] +/// Demo module providing example Noir programs for testing and demonstration +/// +/// This module is only available when the crate is built with the `demo` feature +/// or in test mode. It is also used to test the crate's functionality. pub mod demo { use crate::noir::NoirProgram; + /// Creates a basic Noir program example + /// + /// Loads a compiled Noir program that performs simple operations that comprise a single ACIR + /// gate. pub fn basic() -> NoirProgram { let bytecode = std::fs::read("../target/basic.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } + /// Loads a compiled Noir program that demonstrates adding external private values to the running + /// state. pub fn add_external() -> NoirProgram { let bytecode = std::fs::read("../target/add_external.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } + /// Creates a Noir program that squares the zeroth element of its input pub fn square_zeroth() -> NoirProgram { let bytecode = std::fs::read("../target/square_zeroth.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } + /// Creates a Noir program that demonstrates memory swapping between the running state and the + /// folding memory. pub fn swap_memory() -> NoirProgram { let bytecode = std::fs::read("../target/swap_memory.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } + /// Creates a Noir program implementing the Poseidon hash function on the running state. pub fn poseidon() -> NoirProgram { let bytecode = std::fs::read("../target/poseidon.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } + /// Creates a Noir program that is the even case of the function in the Collatz conjecture. pub fn collatz_even() -> NoirProgram { let bytecode = std::fs::read("../target/collatz_even.json").expect("Failed to read Noir program file"); NoirProgram::new(&bytecode) } + /// Creates a Noir program that is the odd case of the function in the Collatz conjecture. pub fn collatz_odd() -> NoirProgram { let bytecode = std::fs::read("../target/collatz_odd.json").expect("Failed to read Noir program file"); diff --git a/frontend/src/program.rs b/frontend/src/program.rs index 273f71c..b8aabb6 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -91,7 +91,7 @@ impl NonUniformCircuit for Switchboard { fn initial_circuit_index(&self) -> usize { self.initial_circuit_index } } -pub fn run(setup: &Setup>) -> Result, ProofError> { +pub fn run(setup: &Setup>) -> Result, FrontendError> { if std::any::type_name::() == std::any::type_name::() { // Safety: We've verified the type matches ROM let setup = unsafe { @@ -111,7 +111,7 @@ pub fn run(setup: &Setup>) -> Result, Pro } } -pub fn run_rom(setup: &Setup>) -> Result, ProofError> { +pub fn run_rom(setup: &Setup>) -> Result, FrontendError> { info!("Starting SuperNova program..."); let z0_primary = &setup.switchboard.public_input; @@ -136,7 +136,7 @@ pub fn run_rom(setup: &Setup>) -> Result, ProofErr // Check if higher bytes are non-zero (which would be truncated in usize conversion) let usize_size = std::mem::size_of::(); if pc_bytes[usize_size..].iter().any(|&b| b != 0) { - return Err(ProofError::Other("Program counter value too large for usize".into())); + return Err(FrontendError::Other("Program counter value too large for usize".into())); } // Convert the relevant bytes to usize (using little-endian order) @@ -182,7 +182,7 @@ pub fn run_rom(setup: &Setup>) -> Result, ProofErr Ok(recursive_snark.unwrap()) } -pub fn run_ram(setup: &Setup>) -> Result, ProofError> { +pub fn run_ram(setup: &Setup>) -> Result, FrontendError> { info!("Starting SuperNova program..."); let z0_primary = &setup.switchboard.public_input; @@ -211,7 +211,7 @@ pub fn run_ram(setup: &Setup>) -> Result, ProofErr // Check if higher bytes are non-zero (which would be truncated in usize conversion) let usize_size = std::mem::size_of::(); if pc_bytes[usize_size..].iter().any(|&b| b != 0) { - return Err(ProofError::Other("Program counter value too large for usize".into())); + return Err(FrontendError::Other("Program counter value too large for usize".into())); } // Convert the relevant bytes to usize (using little-endian order) @@ -263,7 +263,7 @@ pub fn run_ram(setup: &Setup>) -> Result, ProofErr pub fn compress( setup: &Setup>, recursive_snark: &RecursiveSNARK, -) -> Result { +) -> Result { let pk = CompressedSNARK::::initialize_pk( &setup.params, setup.vk_digest_primary, diff --git a/frontend/src/proof.rs b/frontend/src/proof.rs index 2bc50de..17adb2e 100644 --- a/frontend/src/proof.rs +++ b/frontend/src/proof.rs @@ -35,7 +35,7 @@ impl CompressedProof { /// # Returns /// /// A `FoldingProof` with a `Vec` proof and a `String` verifier digest. - pub fn serialize(self) -> Result, String>, ProofError> { + pub fn serialize(self) -> Result, String>, FrontendError> { let proof = bincode::serialize(&self.proof)?; Ok(FoldingProof { proof, verifier_digest: hex::encode(self.verifier_digest.to_bytes()) }) @@ -49,7 +49,7 @@ impl FoldingProof, String> { /// # Returns /// /// A `FoldingProof` with a `CompressedSNARK` proof and a `F` verifier digest. - pub fn deserialize(self) -> Result { + pub fn deserialize(self) -> Result { let proof = bincode::deserialize(&self.proof[..])?; Ok(FoldingProof { diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index e6bb064..70a637f 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -8,7 +8,7 @@ use client_side_prover::{ use tracing::debug; use crate::{ - error::ProofError, + error::FrontendError, program::{Memory, Switchboard}, AuxParams, E1, S1, S2, }; @@ -88,7 +88,7 @@ impl Setup> { } } - pub fn store_file(self, path: &std::path::PathBuf) -> Result, ProofError> { + pub fn store_file(self, path: &std::path::PathBuf) -> Result, FrontendError> { let bytes = self.into_empty().to_bytes(); if let Some(parent) = path.parent() { std::fs::create_dir_all(parent)?; diff --git a/frontend/tests/lib.rs b/frontend/tests/lib.rs index 3fc2f0f..e15f91a 100644 --- a/frontend/tests/lib.rs +++ b/frontend/tests/lib.rs @@ -1,5 +1,3 @@ -#![warn(missing_docs, clippy::missing_docs_in_private_items)] - use client_side_prover_frontend::demo::*; use tracing_test::traced_test; From 679039fb2dfc4ee8b6ea5a457ba69a67b2177126 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 05:55:45 -0700 Subject: [PATCH 37/51] docs: `error.rs` --- frontend/src/error.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/frontend/src/error.rs b/frontend/src/error.rs index 6dc08d6..bf830d5 100644 --- a/frontend/src/error.rs +++ b/frontend/src/error.rs @@ -1,5 +1,16 @@ +//! # Error Handling +//! +//! This module defines the error types used throughout the crate. +//! `FrontendError` is the primary error type that encapsulates various +//! lower-level errors that might occur during proof generation, verification, +//! and other operations. + use thiserror::Error; +/// Represents errors that can occur in the frontend operations of the NIVC system. +/// +/// This enum encapsulates various error types from dependent libraries as well as +/// custom error conditions specific to this crate. #[derive(Debug, Error)] pub enum FrontendError { /// The error is a `bellpepper_core::SynthesisError` From 0fbd4321994170e9ba6e679fc69e116024556261 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 06:03:31 -0700 Subject: [PATCH 38/51] continue cleanup --- frontend/src/error.rs | 4 ++++ frontend/src/noir.rs | 12 ++++------ frontend/src/proof.rs | 51 ++++++++++++++++++++++++++++++++----------- 3 files changed, 46 insertions(+), 21 deletions(-) diff --git a/frontend/src/error.rs b/frontend/src/error.rs index bf830d5..3fbc23a 100644 --- a/frontend/src/error.rs +++ b/frontend/src/error.rs @@ -69,4 +69,8 @@ pub enum FrontendError { /// The error is an invalid manifest #[error("Invalid manifest: {0}")] InvalidManifest(String), + + /// The error is an invalid hex string + #[error(transparent)] + FromHex(#[from] hex::FromHexError), } diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 17ac912..a701647 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -387,22 +387,18 @@ mod tests { fn test_deserialize_abi() { let program = add_external(); - // Verify basic structure - assert_eq!(program.version, "1.0.0-beta.2+1a2a08cbcb68646ff1aaef383cfc1798933c1355"); - assert_eq!(program.hash, 4842196402509912449); - // Verify parameters assert_eq!(program.abi.parameters.len(), 3); - assert_eq!(program.abi.parameters[0].name, "registers"); + assert_eq!(program.abi.parameters[0].name, "folding_variables"); assert_eq!(program.abi.parameters[1].name, "external"); assert_eq!(program.abi.parameters[2].name, "next_pc"); // Verify return type if let AbiType::Struct { fields, path } = &program.abi.return_type.as_ref().unwrap().abi_type { assert_eq!(fields.len(), 2); - assert_eq!(path, "nivc::FoldingOutput"); + assert_eq!(path, "nivc::FoldingVariables"); assert_eq!(fields[0].0, "registers"); - assert_eq!(fields[1].0, "next_pc"); + assert_eq!(fields[1].0, "program_counter"); } else { panic!("Expected tuple return type, got {:?}", program.abi.return_type); } @@ -421,7 +417,7 @@ mod tests { ]; let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); - assert_eq!(cs.num_constraints(), 5); + assert_eq!(cs.num_constraints(), 3); } #[test] diff --git a/frontend/src/proof.rs b/frontend/src/proof.rs index 17adb2e..d7333db 100644 --- a/frontend/src/proof.rs +++ b/frontend/src/proof.rs @@ -20,21 +20,32 @@ use hex; use super::*; use crate::program::CompressedProof; -/// Folding proof`` +/// Represents a folding proof with associated verifier digest +/// +/// A folding proof contains the actual cryptographic proof data along with +/// the verifier digest needed for verification. This is a generic structure +/// that can work with different proof and digest formats. #[derive(Debug, Serialize, Deserialize, Clone)] pub struct FoldingProof { - /// Proof + /// The cryptographic proof data pub proof: T, - /// Verifier digest + /// Digest used by the verifier to check the proof's validity pub verifier_digest: V, } impl CompressedProof { - /// Serializes a `FoldingProof` into a format suitable for storage or transmission. + /// Serializes a `CompressedProof` into a format suitable for storage or transmission. + /// + /// Converts the internal proof representation to a binary format and + /// encodes the verifier digest as a hexadecimal string. /// /// # Returns /// /// A `FoldingProof` with a `Vec` proof and a `String` verifier digest. + /// + /// # Errors + /// + /// Returns a `FrontendError` if serialization fails. pub fn serialize(self) -> Result, String>, FrontendError> { let proof = bincode::serialize(&self.proof)?; @@ -42,22 +53,36 @@ impl CompressedProof { } } -/// Folding proof implementation impl FoldingProof, String> { /// Deserializes a `FoldingProof` from a stored or transmitted format back into its original form. /// + /// Converts the binary proof data back into a `CompressedSNARK` instance and + /// decodes the verifier digest from hexadecimal to its field element representation. + /// /// # Returns /// - /// A `FoldingProof` with a `CompressedSNARK` proof and a `F` verifier digest. + /// A `CompressedProof` with a `CompressedSNARK` proof and a `Scalar` verifier digest. + /// + /// # Errors + /// + /// Returns a `FrontendError` if deserialization fails or if the hex string cannot be converted + /// to the expected field element. pub fn deserialize(self) -> Result { let proof = bincode::deserialize(&self.proof[..])?; - Ok(FoldingProof { - proof, - verifier_digest: Scalar::from_bytes( - &hex::decode(&self.verifier_digest).unwrap().try_into().unwrap(), - ) - .unwrap(), - }) + // Decode the hex string to bytes + let digest_bytes = hex::decode(&self.verifier_digest)?; + + // Convert to fixed-size array safely + let digest_array: [u8; 32] = digest_bytes + .try_into() + .map_err(|_| FrontendError::Other("Invalid digest length".to_string()))?; + + // Convert to Scalar, handling the case where from_bytes returns CtOption + let verifier_digest = Scalar::from_bytes(&digest_array) + .into_option() + .ok_or_else(|| FrontendError::Other("Invalid scalar encoding".to_string()))?; + + Ok(FoldingProof { proof, verifier_digest }) } } From 749647e2ae53417f4f7f985b1157afb662b6bc71 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 15:31:56 -0700 Subject: [PATCH 39/51] docs + WIP end to end --- frontend/src/error.rs | 4 + frontend/src/noir.rs | 190 ++++++++++------ frontend/src/program.rs | 357 ++++++++++++++++++++----------- frontend/src/setup.rs | 153 +++++++++++-- frontend/tests/end_to_end/mod.rs | 90 +++++++- frontend/tests/ivc/mod.rs | 20 +- 6 files changed, 592 insertions(+), 222 deletions(-) diff --git a/frontend/src/error.rs b/frontend/src/error.rs index 3fbc23a..df4588e 100644 --- a/frontend/src/error.rs +++ b/frontend/src/error.rs @@ -73,4 +73,8 @@ pub enum FrontendError { /// The error is an invalid hex string #[error(transparent)] FromHex(#[from] hex::FromHexError), + + /// The error is a [`client_side_prover::fast_serde::SerdeByteError`] + #[error(transparent)] + FastSerde(#[from] client_side_prover::fast_serde::SerdeByteError), } diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index a701647..72efe1e 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -1,3 +1,16 @@ +//! # Noir Program Integration +//! +//! This module provides the integration between Noir programs and the NIVC system. +//! It handles the translation of Noir's ACIR (Abstract Circuit Intermediate Representation) +//! into constraints that can be used in the folding proof system. This allows Noir programs +//! to be used as circuit components in Non-uniform Incrementally Verifiable Computation. +//! +//! ## Key Components +//! +//! - `NoirProgram`: Represents a compiled Noir program with its bytecode and ABI +//! - `StepCircuit` implementation: Allows Noir programs to be used in the `SuperNova` NIVC system +//! - Field conversion functions: Convert between ACIR field representation and proof system fields + use std::collections::{BTreeMap, HashMap}; use acvm::{ @@ -11,60 +24,86 @@ use acvm::{ AcirField, }; use ark_bn254::Fr; -use bellpepper_core::{ - num::AllocatedNum, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable, -}; +use bellpepper_core::{num::AllocatedNum, ConstraintSystem, LinearCombination, SynthesisError}; use client_side_prover::supernova::StepCircuit; use ff::PrimeField; use noirc_abi::{input_parser::InputValue, Abi, AbiType, InputMap}; -use tracing::{debug, error, info, trace, warn}; +use tracing::{error, trace}; use super::*; -// TODO: If we deserialize more here and get metadata, we could more easily look at witnesses, etc. -// Especially if we want to output a constraint to the PC. Using the abi would be handy for -// assigning inputs. +/// Represents a compiled Noir program ready for execution in the NIVC system +/// +/// A `NoirProgram` contains the compiled bytecode of a Noir program along with its ABI +/// (Application Binary Interface) which describes the program's inputs and outputs. +/// It can be used as a circuit component in the `SuperNova` NIVC system. #[derive(Clone, Serialize, Deserialize, Debug)] pub struct NoirProgram { - #[serde(rename = "noir_version")] - pub version: String, - pub hash: u64, - pub abi: Abi, + /// The program's ABI describing its inputs and outputs + pub abi: Abi, + + /// The program's bytecode in ACIR format, serialized as base64 #[serde( serialize_with = "Program::serialize_program_base64", deserialize_with = "Program::deserialize_program_base64" )] - pub bytecode: Program>, - // TODO: We likely don't need these. - pub debug_symbols: serde_json::Value, - // TODO: We likely don't need these. - pub file_map: serde_json::Value, - - pub names: Vec, - pub brillig_names: Vec, + pub bytecode: Program>, + + /// Optional witness inputs for the program (is used internally by the [`program::run`] function) #[serde(skip)] - pub witness: Option, + pub witness: Option, + + /// The index of this program in the switchboard's circuit list #[serde(skip)] - pub index: usize, + pub index: usize, } impl NoirProgram { + /// Creates a new `NoirProgram` from JSON bytecode + /// + /// # Arguments + /// + /// * `bin` - The JSON bytecode of a compiled Noir program + /// + /// # Returns + /// + /// A new `NoirProgram` instance pub fn new(bin: &[u8]) -> Self { serde_json::from_slice(bin).unwrap() } + /// Gets the main circuit from the program + /// + /// # Returns + /// + /// A reference to the main circuit function pub fn circuit(&self) -> &Circuit> { &self.bytecode.functions[0] } + /// Gets the unconstrained functions from the program + /// + /// Unconstrained functions are functions that are executed during witness generation + /// but do not contribute to the circuit's constraints. These are handled by the + /// [`StubbedBlackBoxSolver`]. + /// + /// # Returns + /// + /// A reference to the list of unconstrained functions pub fn unconstrained_functions(&self) -> &Vec>> { &self.bytecode.unconstrained_functions } - pub fn set_inputs(&mut self, switchboard_witness: InputMap) { - self.witness = Some(switchboard_witness); - } + /// Sets the witness inputs for the program + /// + /// # Arguments + /// + /// * `witness` - The input map containing witness values + pub fn set_inputs(&mut self, witness: InputMap) { self.witness = Some(witness); } } impl StepCircuit for NoirProgram { + /// Returns the number of registers in the folding state + /// + /// This is determined by examining the ABI to find the "registers" array + /// in the `FoldingVariables` struct. fn arity(&self) -> usize { - // Find input type with FoldingVariables type (regardless of parameter name) let input_type = self .abi .parameters @@ -78,10 +117,8 @@ impl StepCircuit for NoirProgram { }) .map(|param| ¶m.typ); - // Get the return type let return_type = self.abi.return_type.as_ref().map(|ret| &ret.abi_type); - // Extract register length from a FoldingVariables struct let get_register_length = |typ: &AbiType| -> usize { if let AbiType::Struct { fields, .. } = typ { if let Some((_, AbiType::Array { length, .. })) = @@ -96,10 +133,8 @@ impl StepCircuit for NoirProgram { } }; - // Check types and extract register length match (input_type, return_type) { (Some(input), Some(output)) => { - // Check that both are FoldingVariables if let (AbiType::Struct { path: in_path, .. }, AbiType::Struct { path: out_path, .. }) = (input, output) { @@ -107,12 +142,10 @@ impl StepCircuit for NoirProgram { let in_len = get_register_length(input); let out_len = get_register_length(output); - if in_len != out_len { - panic!( - "Input and output must have same number of registers: {} vs {}", - in_len, out_len - ); - } + assert!( + in_len == out_len, + "Input and output must have same number of registers: {in_len} vs {out_len}", + ); return in_len; } @@ -123,8 +156,25 @@ impl StepCircuit for NoirProgram { } } + /// Returns the index of this circuit in the switchboard fn circuit_index(&self) -> usize { self.index } + /// Synthesizes the Noir program into a constraint system + /// + /// This is the core method that translates the Noir program's ACIR representation + /// into constraints that can be used in the folding proof system. It processes + /// each gate in the ACIR circuit and creates corresponding constraints in the + /// target constraint system. + /// + /// # Arguments + /// + /// * `cs` - The constraint system to add constraints to + /// * `pc` - The program counter (next circuit to execute) + /// * `z` - The current folding state (register values) + /// + /// # Returns + /// + /// A tuple of the next program counter and updated register values #[allow(clippy::too_many_lines)] fn synthesize>( &self, @@ -145,10 +195,8 @@ impl StepCircuit for NoirProgram { &[], ); - // Prepare inputs with registers // TODO: Can we remove this clone since it may be a lot of data? let mut inputs_with_folding_variables = inputs.clone(); - // Create folding variables let folding_variables = InputValue::Struct(BTreeMap::from([ ( "registers".to_string(), @@ -176,7 +224,7 @@ impl StepCircuit for NoirProgram { } // Solve and get resulting witness map - trace!("Executing ACVM solve..."); + debug!("Executing ACVM solve..."); acvm.solve(); acvm.finalize() }); @@ -217,16 +265,16 @@ impl StepCircuit for NoirProgram { // Handle mul terms by creating intermediate variables for each product for mul_term in &gate.mul_terms { - let left_var = get_var(&mul_term.1, &mut allocated_vars, cs)?; - let right_var = get_var(&mul_term.2, &mut allocated_vars, cs)?; + let left_variable = get_var(&mul_term.1, &mut allocated_vars, cs)?; + let right_variable = get_var(&mul_term.2, &mut allocated_vars, cs)?; // Get the values if available - let left_val = acvm_witness_map + let left_value = acvm_witness_map .as_ref() .and_then(|map| map.get(&mul_term.1)) .map(|&v| convert_to_halo2_field(v)); - let right_val = acvm_witness_map + let right_value = acvm_witness_map .as_ref() .and_then(|map| map.get(&mul_term.2)) .map(|&v| convert_to_halo2_field(v)); @@ -235,8 +283,8 @@ impl StepCircuit for NoirProgram { let product = AllocatedNum::alloc( cs.namespace(|| format!("prod_g{idx}_t{}", mul_term.1.as_usize())), || { - let l = left_val.unwrap_or_else(Scalar::zero); - let r = right_val.unwrap_or_else(Scalar::zero); + let l = left_value.unwrap_or_else(Scalar::zero); + let r = right_value.unwrap_or_else(Scalar::zero); Ok(l * r) }, )?; @@ -244,8 +292,8 @@ impl StepCircuit for NoirProgram { // Enforce that this is indeed the product cs.enforce( || format!("prod_constraint_g{idx}_t{}", mul_term.1.as_usize()), - |lc| lc + left_var, - |lc| lc + right_var, + |lc| lc + left_variable, + |lc| lc + right_variable, |lc| lc + product.get_variable(), ); @@ -297,11 +345,6 @@ impl StepCircuit for NoirProgram { if let Some(noirc_abi::AbiReturnType { abi_type: AbiType::Struct { fields, .. }, .. }) = &self.abi.return_type { - // Print debug information - trace!("Return type fields: {:?}", fields.iter().map(|(name, _)| name).collect::>()); - trace!("Return values length: {}", return_values.len()); - - // Check if we have the expected FoldingVariables structure let registers_field = fields .iter() .find(|(name, _)| name == "registers") @@ -312,38 +355,34 @@ impl StepCircuit for NoirProgram { _ => panic!("Expected registers to be an array type"), }; - trace!("Registers length from ABI: {}", registers_length); - - // Find the index of the program_counter in the return values - let pc_index = fields - .iter() - .position(|(name, _)| name == "program_counter") - .unwrap_or_else(|| panic!("Missing 'program_counter' field")); - - trace!("Program counter index: {}", pc_index); - - // The Noir ABI returns fields in order, so we can directly map them to return_values - // First n values are the registers, followed by program_counter - if return_values.len() >= registers_length + 1 { + if return_values.len() > registers_length { let registers = return_values[0..registers_length].to_vec(); let next_pc = Some(return_values[registers_length].clone()); trace!("Extracted {} registers and program counter", registers.len()); return Ok((next_pc, registers)); - } else { - error!( - "Not enough return values. Expected at least {}, got {}", - registers_length + 1, - return_values.len() - ); - return Err(SynthesisError::Unsatisfiable); } + error!( + "Not enough return values. Expected at least {}, got {}", + registers_length + 1, + return_values.len() + ); + return Err(SynthesisError::Unsatisfiable); } Err(SynthesisError::Unsatisfiable) } } +/// Converts a field element from ACIR representation to Halo2 representation +/// +/// # Arguments +/// +/// * `f` - The field element in ACIR representation +/// +/// # Returns +/// +/// The field element in Halo2 representation fn convert_to_halo2_field(f: GenericFieldElement) -> Scalar { let bytes = f.to_be_bytes(); let mut arr = [0u8; 32]; @@ -352,6 +391,15 @@ fn convert_to_halo2_field(f: GenericFieldElement) -> Scalar { Scalar::from_repr(arr).unwrap() } +/// Converts a field element from Halo2 representation to ACIR representation +/// +/// # Arguments +/// +/// * `f` - The field element in Halo2 representation +/// +/// # Returns +/// +/// The field element in ACIR representation fn convert_to_acir_field(f: Scalar) -> GenericFieldElement { let mut bytes = f.to_bytes(); bytes.reverse(); diff --git a/frontend/src/program.rs b/frontend/src/program.rs index b8aabb6..301df80 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -1,3 +1,28 @@ +//! # Program Execution +//! +//! This module provides the core execution functionality for NIVC (Non-uniform Incrementally +//! Verifiable Computation) with Noir circuits. It defines the memory models, switchboard logic for +//! circuit coordination, and functions for running programs and compressing proofs. +//! +//! ## Memory Models +//! +//! Two memory models are supported: +//! - **ROM (Read-Only Memory)**: Programs with all inputs known in advance +//! - **RAM (Random-Access Memory)**: Programs that compute inputs dynamically during execution +//! +//! ## Switchboard +//! +//! The [`Switchboard`] struct manages a collection of Noir circuits and controls the execution flow +//! between them. It maintains: +//! - A list of circuits +//! - The current program counter (circuit index) +//! - Input data appropriate for the memory model +//! +//! ## Execution Functions +//! +//! - [`run`]: Executes a program with the appropriate memory model +//! - [`compress`]: Compresses a recursive SNARK into a more compact form for verification + use client_side_prover::supernova::{NonUniformCircuit, RecursiveSNARK}; use halo2curves::grumpkin; use noirc_abi::InputMap; @@ -10,46 +35,84 @@ use crate::{ setup::{Ready, Setup}, }; -// TODO: Consider moving contents of mod.rs files to a separate files. mod.rs -// files should only be used to adjust the visibility of exported items. - -/// Compressed proof type +/// Compressed proof type representing a folding proof with associated verifier digest +/// +/// This proof can be serialized for transmission or storage and later verified. pub type CompressedProof = FoldingProof, Scalar>; +/// Trait for memory models used in the NIVC system +/// +/// This trait is sealed, meaning it can only be implemented by the types in this crate +/// (specifically, `ROM` and `RAM`). pub trait Memory: private::Sealed { + /// The data type associated with this memory model type Data; } +/// Private module containing implementation details for sealing the Memory trait mod private { use super::{RAM, ROM}; + /// Seals the [`Memory`] trait pub trait Sealed {} impl Sealed for ROM {} impl Sealed for RAM {} } +/// Read-Only Memory model +/// +/// In ROM mode, all inputs for the computation are known in advance and provided +/// as a sequence of witness values. #[derive(Debug, Clone)] pub struct ROM {} impl Memory for ROM { + /// ROM uses a vector of `InputMaps` as its data type Data = Vec; } +/// Random-Access Memory model +/// +/// In RAM mode, inputs are computed dynamically during execution. Each circuit +/// can influence the execution path by setting the program counter for the next step. #[derive(Debug, Clone)] pub struct RAM {} impl Memory for RAM { + /// RAM doesn't require any additional input data type Data = (); } -// NOTE: These are `pub(crate)` to avoid exposing the `index` field to the -// outside world. +/// Manages a collection of circuits and controls execution flow +/// +/// The switchboard holds all the circuits that can be executed in a NIVC computation, +/// and maintains the program counter (current circuit index). It is parameterized by +/// a memory model that determines how inputs are handled. #[derive(Debug, Clone)] pub struct Switchboard { - pub(crate) circuits: Vec, - pub(crate) public_input: Vec, + /// The collection of Noir circuits that can be executed + pub(crate) circuits: Vec, + + /// Public input values (initial registers for the computation) + pub(crate) public_input: Vec, + + /// The initial circuit index to start execution from pub(crate) initial_circuit_index: usize, - pub(crate) switchboard_inputs: M::Data, + + /// Input data specific to the memory model + pub(crate) switchboard_inputs: M::Data, } impl Switchboard { + /// Creates a new switchboard with Read-Only Memory model + /// + /// # Arguments + /// + /// * `circuits` - Collection of Noir circuits that can be executed + /// * `switchboard_inputs` - Sequence of inputs for each execution step + /// * `public_input` - Initial register values + /// * `initial_circuit_index` - The starting circuit index + /// + /// # Returns + /// + /// A new `Switchboard` instance configured for ROM execution pub fn new( mut circuits: Vec, switchboard_inputs: Vec, @@ -64,6 +127,17 @@ impl Switchboard { } impl Switchboard { + /// Creates a new switchboard with Random-Access Memory model + /// + /// # Arguments + /// + /// * `circuits` - Collection of Noir circuits that can be executed + /// * `public_input` - Initial register values + /// * `initial_circuit_index` - The starting circuit index + /// + /// # Returns + /// + /// A new [`Switchboard`] instance configured for RAM execution pub fn new( mut circuits: Vec, public_input: Vec, @@ -80,17 +154,36 @@ impl NonUniformCircuit for Switchboard { type C1 = NoirProgram; type C2 = TrivialCircuit; + /// Returns the number of circuits in the switchboard fn num_circuits(&self) -> usize { self.circuits.len() } + /// Returns the primary circuit at the given index fn primary_circuit(&self, circuit_index: usize) -> Self::C1 { self.circuits[circuit_index].clone() } + /// Returns the secondary circuit (always trivial for NIVC with Noir) fn secondary_circuit(&self) -> Self::C2 { TrivialCircuit::default() } + /// Returns the initial circuit index to start execution from fn initial_circuit_index(&self) -> usize { self.initial_circuit_index } } +/// Executes a program with the appropriate memory model +/// +/// This function dispatches to either [`run_rom`] or [`run_ram`] based on the memory model. +/// +/// # Arguments +/// +/// * `setup` - The setup parameters for the program +/// +/// # Returns +/// +/// A [`RecursiveSNARK`] representing the execution trace +/// +/// # Errors +/// +/// Returns a [`FrontendError`] if execution fails pub fn run(setup: &Setup>) -> Result, FrontendError> { if std::any::type_name::() == std::any::type_name::() { // Safety: We've verified the type matches ROM @@ -111,155 +204,170 @@ pub fn run(setup: &Setup>) -> Result, Fro } } +/// Executes a program using the ROM memory model +/// +/// In ROM mode, all inputs are known in advance and provided as a sequence. +/// The program executes each step with the corresponding input. +/// +/// # Arguments +/// +/// * `setup` - The setup parameters for the program +/// +/// # Returns +/// +/// A [`RecursiveSNARK`] representing the execution trace +/// +/// # Errors +/// +/// Returns a [`FrontendError`] if execution fails pub fn run_rom(setup: &Setup>) -> Result, FrontendError> { - info!("Starting SuperNova program..."); + info!("Starting SuperNova program with ROM memory model..."); let z0_primary = &setup.switchboard.public_input; let z0_secondary = &[grumpkin::Fr::ZERO]; - let time = std::time::Instant::now(); - // Initialize recursive SNARK as None let mut recursive_snark: Option> = None; - for (idx, switchboard_witness) in setup.switchboard.switchboard_inputs.iter().enumerate() { + // ROM-specific: iterate through predefined sequence of inputs + for (idx, witness) in setup.switchboard.switchboard_inputs.iter().enumerate() { info!("Step {} of {} witnesses", idx + 1, setup.switchboard.switchboard_inputs.len()); - // Determine program counter based on current state - let program_counter = match &recursive_snark { - None => setup.switchboard.initial_circuit_index(), - Some(snark) => { - // TODO: I honestly am surprised that the prover chose to use a usize instead of a field - // element for the PC, it would be cleaner to do otherwise - let pc_bytes = snark.program_counter().to_bytes(); - - // Check if higher bytes are non-zero (which would be truncated in usize conversion) - let usize_size = std::mem::size_of::(); - if pc_bytes[usize_size..].iter().any(|&b| b != 0) { - return Err(FrontendError::Other("Program counter value too large for usize".into())); - } - - // Convert the relevant bytes to usize (using little-endian order) - let mut pc_value = 0usize; - for (i, &b) in pc_bytes.iter().take(usize_size).enumerate() { - pc_value |= (b as usize) << (i * 8); - } - - pc_value - }, - }; - - debug!("Program counter = {:?}", program_counter); - - // Prepare circuits for this step - let mut circuit_primary = setup.switchboard.primary_circuit(program_counter); - circuit_primary.witness = Some(switchboard_witness.clone()); - let circuit_secondary = setup.switchboard.secondary_circuit(); - - // Initialize or update the recursive SNARK - if recursive_snark.is_none() { - // Initialize a new recursive SNARK for the first step - recursive_snark = Some(RecursiveSNARK::new( - &setup.params, - &setup.switchboard, - &circuit_primary, - &circuit_secondary, - z0_primary, - z0_secondary, - )?); - } - - // Prove the next step - info!("Proving single step..."); - let snark = recursive_snark.as_mut().unwrap(); - snark.prove_step(&setup.params, &circuit_primary, &circuit_secondary)?; - info!("Done proving single step..."); + recursive_snark = + prove_single_step(setup, recursive_snark, Some(witness.clone()), z0_primary, z0_secondary)?; } trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); - - // Return the completed recursive SNARK Ok(recursive_snark.unwrap()) } +/// Executes a program using the RAM memory model +/// +/// In RAM mode, inputs are computed dynamically during execution. Each circuit +/// can influence the execution path by setting the program counter for the next step. +/// +/// # Arguments +/// +/// * `setup` - The setup parameters for the program +/// +/// # Returns +/// +/// A [`RecursiveSNARK`] representing the execution trace +/// +/// # Errors +/// +/// Returns a [`FrontendError`] if execution fails pub fn run_ram(setup: &Setup>) -> Result, FrontendError> { - info!("Starting SuperNova program..."); + info!("Starting SuperNova program with RAM memory model..."); let z0_primary = &setup.switchboard.public_input; let z0_secondary = &[grumpkin::Fr::ZERO]; - let time = std::time::Instant::now(); - // Initialize recursive SNARK as None let mut recursive_snark: Option> = None; let termination_pc = Scalar::ZERO - Scalar::ONE; + // RAM-specific: loop until termination condition is met loop { - // Determine program counter based on current state - let program_counter = match &recursive_snark { - None => setup.switchboard.initial_circuit_index(), - Some(snark) => { - dbg!(&snark.program_counter()); - let current_pc = snark.program_counter(); - if current_pc == termination_pc { - break; - } - - // Convert Scalar to usize for circuit indexing - let pc_bytes = current_pc.to_bytes(); - - // Check if higher bytes are non-zero (which would be truncated in usize conversion) - let usize_size = std::mem::size_of::(); - if pc_bytes[usize_size..].iter().any(|&b| b != 0) { - return Err(FrontendError::Other("Program counter value too large for usize".into())); - } - - // Convert the relevant bytes to usize (using little-endian order) - let mut pc_value = 0usize; - for (i, &b) in pc_bytes.iter().take(usize_size).enumerate() { - pc_value |= (b as usize) << (i * 8); - } - - pc_value - }, - }; + // Check termination condition if we have a SNARK + if let Some(snark) = &recursive_snark { + let current_pc = snark.program_counter(); + if current_pc == termination_pc { + break; + } + } - debug!("Program counter = {:?}", program_counter); + recursive_snark = prove_single_step( + setup, + recursive_snark, + None, // RAM doesn't use predefined witness values + z0_primary, + z0_secondary, + )?; + } - // Prepare circuits for this step - dbg!(&program_counter); - let mut circuit_primary = setup.switchboard.primary_circuit(program_counter); - // TODO: This is a hack to get the witness to be non-empty so ACVM is spawned + trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); + Ok(recursive_snark.unwrap()) +} + +/// Helper function to prove a single step of execution +/// +/// This handles the common logic between ROM and RAM execution modes. +fn prove_single_step( + setup: &Setup>, + recursive_snark: Option>, + witness: Option, + z0_primary: &[Scalar], + z0_secondary: &[grumpkin::Fr], +) -> Result>, FrontendError> { + let program_counter = match &recursive_snark { + None => setup.switchboard.initial_circuit_index(), + Some(snark) => { + let pc_bytes = snark.program_counter().to_bytes(); + let usize_size = std::mem::size_of::(); + + // Check if higher bytes are non-zero + if pc_bytes[usize_size..].iter().any(|&b| b != 0) { + return Err(FrontendError::Other("Program counter value too large for usize".into())); + } + + // Convert to usize (little-endian) + let mut pc_value = 0usize; + for (i, &b) in pc_bytes.iter().take(usize_size).enumerate() { + pc_value |= (b as usize) << (i * 8); + } + + pc_value + }, + }; + + debug!("Program counter = {:?}", program_counter); + + let mut circuit_primary = setup.switchboard.primary_circuit(program_counter); + + if let Some(w) = witness { + circuit_primary.witness = Some(w); + } else { circuit_primary.witness = Some(InputMap::new()); - let circuit_secondary = setup.switchboard.secondary_circuit(); - - // Initialize or update the recursive SNARK - if recursive_snark.is_none() { - // Initialize a new recursive SNARK for the first step - recursive_snark = Some(RecursiveSNARK::new( - &setup.params, - &setup.switchboard, - &circuit_primary, - &circuit_secondary, - z0_primary, - z0_secondary, - )?); - } + } - // Prove the next step - info!("Proving single step..."); - let snark = recursive_snark.as_mut().unwrap(); - snark.prove_step(&setup.params, &circuit_primary, &circuit_secondary)?; - info!("Done proving single step..."); - dbg!(snark.program_counter()); + let circuit_secondary = setup.switchboard.secondary_circuit(); + + let mut result = recursive_snark; + if result.is_none() { + result = Some(RecursiveSNARK::new( + &setup.params, + &setup.switchboard, + &circuit_primary, + &circuit_secondary, + z0_primary, + z0_secondary, + )?); } - trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); + // Prove the next step + info!("Proving single step..."); + let snark = result.as_mut().unwrap(); + snark.prove_step(&setup.params, &circuit_primary, &circuit_secondary)?; + info!("Done proving single step..."); - // Return the completed recursive SNARK - Ok(recursive_snark.unwrap()) + Ok(result) } +/// Compresses a recursive SNARK into a compact proof for efficient verification +/// +/// # Arguments +/// +/// * `setup` - The setup parameters for the program +/// * `recursive_snark` - The recursive SNARK to compress +/// +/// # Returns +/// +/// A `CompressedProof` that can be serialized and later verified +/// +/// # Errors +/// +/// Returns a `FrontendError` if compression fails pub fn compress( setup: &Setup>, recursive_snark: &RecursiveSNARK, @@ -268,8 +376,7 @@ pub fn compress( &setup.params, setup.vk_digest_primary, setup.vk_digest_secondary, - ) - .unwrap(); + )?; debug!( "initialized pk pk_primary.digest={:?}, pk_secondary.digest={:?}", pk.pk_primary.vk_digest, pk.pk_secondary.vk_digest diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 70a637f..7d07cdd 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -1,3 +1,24 @@ +//! # Setup and Parameter Management +//! +//! This module handles the setup and parameter management for the NIVC system. +//! It provides functionality for: +//! +//! - Creating and managing cryptographic parameters +//! - Storing and loading setup data +//! - Converting between different setup states +//! +//! ## Setup States +//! +//! The setup can be in one of two states: +//! - **Empty**: Contains only auxiliary parameters without a switchboard (can be serialized for +//! storage) +//! - **Ready**: Complete setup with a switchboard that's ready for program execution +//! +//! ## Storage +//! +//! Setup parameters can be serialized and stored to disk, then later deserialized and +//! combined with a switchboard to create a ready setup. + use std::io::Cursor; use client_side_prover::{ @@ -13,48 +34,79 @@ use crate::{ AuxParams, E1, S1, S2, }; +/// Trait that defines the status of a setup +/// +/// This sealed trait can only be implemented by the predefined status types: +/// - `Ready`: A setup that is ready for execution with a specific memory model +/// - `Empty`: A setup that only contains cryptographic parameters without a switchboard pub trait Status: private::Sealed { + /// The switchboard type associated with this status type Switchboard; + + /// The public parameters type associated with this status type PublicParams; } +/// Private module for sealing the Status trait mod private { use super::{Empty, Ready}; + + /// Sealed trait implementation to restrict Status implementations pub trait Sealed {} impl Sealed for Ready {} impl Sealed for Empty {} } +/// Represents a setup that is ready for execution with a specific memory model +/// +/// A `Ready` setup contains both the cryptographic parameters and a switchboard, +/// making it ready to execute programs. #[derive(Debug, Clone)] pub struct Ready { + /// Marker for the memory model type _marker: std::marker::PhantomData, } impl Status for Ready { + /// A ready setup uses a switchboard with the specified memory model type PublicParams = PublicParams; + /// A ready setup has a specific switchboard associated with it type Switchboard = Switchboard; } +/// Represents a setup that only contains cryptographic parameters without a switchboard +/// +/// An `Empty` setup can be serialized and stored, making it useful for saving +/// computationally expensive cryptographic parameters. #[derive(Debug, Clone)] pub struct Empty { + /// Marker for the memory model type _marker: std::marker::PhantomData, } impl Status for Empty { + /// An empty setup only contains auxiliary parameters type PublicParams = AuxParams; + /// An empty setup doesn't have a switchboard type Switchboard = (); } -// TODO: This could probably just store the programs with it +/// Setup parameters for NIVC computation +/// +/// This structure holds the cryptographic parameters, verification key digests, +/// and optionally a switchboard depending on its status. #[derive(Clone, Debug)] pub struct Setup { - /// Auxiliary parameters - pub params: S::PublicParams, + /// Cryptographic parameters (type depends on the status) + pub params: S::PublicParams, + /// Primary verification key digest - pub vk_digest_primary: ::Scalar, + pub vk_digest_primary: ::Scalar, + /// Secondary verification key digest pub vk_digest_secondary: as Engine>::Scalar, + /// Switchboard (if the setup is [`Ready`]) or unit (if [`Empty`]) pub switchboard: S::Switchboard, } @@ -67,18 +119,38 @@ impl PartialEq for Setup { } impl Setup> { - pub fn new(switchboard: Switchboard) -> Self { + /// Creates a new ready setup with the given switchboard + /// + /// This initializes the cryptographic parameters based on the circuits in the switchboard + /// and generates the verification key digests. + /// + /// # Arguments + /// + /// * `switchboard` - The switchboard containing the circuits to be executed + /// + /// # Returns + /// + /// A new ready setup that can be used to execute programs + pub fn new(switchboard: Switchboard) -> Result { let public_params = PublicParams::setup(&switchboard, &*default_ck_hint(), &*default_ck_hint()); - let (pk, _vk) = CompressedSNARK::::setup(&public_params).unwrap(); + let (pk, _vk) = CompressedSNARK::::setup(&public_params)?; - Setup { + Ok(Self { params: public_params, vk_digest_primary: pk.pk_primary.vk_digest, vk_digest_secondary: pk.pk_secondary.vk_digest, switchboard, - } + }) } + /// Converts a ready setup to an empty setup + /// + /// This extracts the auxiliary parameters from the public parameters and + /// creates an empty setup without the switchboard, which can be serialized. + /// + /// # Returns + /// + /// An empty setup containing only the auxiliary parameters fn into_empty(self) -> Setup> { Setup { params: self.params.into_parts().1, @@ -88,6 +160,18 @@ impl Setup> { } } + /// Serializes the setup and stores it to a file + /// + /// This converts the setup to an empty setup, serializes it, and writes + /// the resulting bytes to the specified file path. + /// + /// # Arguments + /// + /// * `path` - The file path where the setup should be stored + /// + /// # Returns + /// + /// The serialized bytes on success, or a `FrontendError` on failure pub fn store_file(self, path: &std::path::PathBuf) -> Result, FrontendError> { let bytes = self.into_empty().to_bytes(); if let Some(parent) = path.parent() { @@ -102,6 +186,18 @@ impl Setup> { } impl Setup> { + /// Converts an empty setup to a ready setup + /// + /// This combines the auxiliary parameters with a switchboard to create + /// a ready setup that can be used to execute programs. + /// + /// # Arguments + /// + /// * `switchboard` - The switchboard to be used for execution + /// + /// # Returns + /// + /// A ready setup containing the parameters and switchboard pub fn into_ready(self, switchboard: Switchboard) -> Setup> { Setup { params: PublicParams::from_parts(get_circuit_shapes(&switchboard), self.params), @@ -110,10 +206,33 @@ impl Setup> { switchboard, } } + + /// Deserializes a setup from a file + /// + /// # Arguments + /// + /// * `path` - The file path where the setup should be stored + /// + /// # Returns + /// + /// The deserialized setup, or a [`FrontendError`] on failure + pub fn load_file(path: &std::path::PathBuf) -> Result { + let bytes = std::fs::read(path)?; + Ok(Self::from_bytes(&bytes)?) + } } -// TODO: We may be able to just use rkyv + +// TODO: We should consider using `rkyv` for serialization and deserialization impl FastSerde for Setup> { - /// Initialize ProvingParams from an efficiently serializable data format. + /// Deserializes a setup from bytes + /// + /// # Arguments + /// + /// * `bytes` - The serialized setup data + /// + /// # Returns + /// + /// The deserialized empty setup, or a `SerdeByteError` on failure fn from_bytes(bytes: &[u8]) -> Result { let mut cursor = Cursor::new(bytes); Self::validate_header(&mut cursor, SerdeByteTypes::ProverParams, 3)?; @@ -133,10 +252,14 @@ impl FastSerde for Setup> { .into_option() .ok_or(SerdeByteError::G1DecodeError)?; - Ok(Setup { params, vk_digest_primary, vk_digest_secondary, switchboard: () }) + Ok(Self { params, vk_digest_primary, vk_digest_secondary, switchboard: () }) } - /// Convert ProvingParams to an efficient serialization. + /// Serializes a setup to bytes + /// + /// # Returns + /// + /// The serialized setup data fn to_bytes(&self) -> Vec { let mut out = Vec::new(); out.extend_from_slice(&fast_serde::MAGIC_NUMBER); @@ -158,13 +281,13 @@ mod tests { #[test] fn test_setup_and_params() { - let setup = Setup::new(Switchboard::::new(vec![square_zeroth()], vec![], 0)); + let setup = Setup::new(Switchboard::::new(vec![square_zeroth()], vec![], 0)).unwrap(); assert_eq!(setup.params.num_constraints_and_variables(0), (10009, 10001)); } #[test] fn test_setup_serialize() { - let setup = Setup::new(Switchboard::::new(vec![square_zeroth()], vec![], 0)); + let setup = Setup::new(Switchboard::::new(vec![square_zeroth()], vec![], 0)).unwrap(); let empty_setup = setup.into_empty(); let serialized = empty_setup.to_bytes(); let deserialized = Setup::>::from_bytes(&serialized).unwrap(); @@ -174,7 +297,7 @@ mod tests { #[test] fn test_setup_store_file() { let switchboard = Switchboard::::new(vec![square_zeroth()], vec![], 0); - let setup = Setup::new(switchboard.clone()); + let setup = Setup::new(switchboard.clone()).unwrap(); let vk_digest_primary = setup.vk_digest_primary; let vk_digest_secondary = setup.vk_digest_secondary; let path = tempfile::tempdir().unwrap().into_path(); diff --git a/frontend/tests/end_to_end/mod.rs b/frontend/tests/end_to_end/mod.rs index 7cec84d..5c81872 100644 --- a/frontend/tests/end_to_end/mod.rs +++ b/frontend/tests/end_to_end/mod.rs @@ -1,3 +1,91 @@ -use client_side_prover_frontend::setup::Setup; +use std::fs; + +use client_side_prover_frontend::{ + demo, + program::{self, Switchboard, ROM}, + proof::FoldingProof, + setup::{Empty, Ready, Setup}, + Scalar, +}; +use noirc_abi::InputMap; +use tempfile::tempdir; use super::*; + +#[test] +#[traced_test] +fn test_end_to_end_workflow() { + // Step 1: Create demo programs for our test + let swap_memory_program = demo::swap_memory(); + let square_program = demo::square_zeroth(); + println!("1. Read programs"); + + // Step 2: Create switchboard with ROM memory model, no inputs are necessary since this is just + // creating the setup + let switchboard = Switchboard::::new( + vec![swap_memory_program.clone(), square_program.clone()], + vec![], + vec![], + 0, + ); + println!("2. Created switchboard"); + + // Step 3: Initialize the setup + let setup = Setup::>::new(switchboard).unwrap(); + println!("3. Initialized setup"); + + // Step 4: Save the setup to a file + let temp_dir = tempdir().unwrap(); + let file_path = temp_dir.path().join("test_setup.bytes"); + setup.store_file(&file_path).unwrap(); + println!("4. Saved setup to file"); + + // Step 5: Read the setup from the file + let setup = Setup::>::load_file(&file_path).unwrap(); + println!("5. Read setup from file"); + + // Step 6: Ready the setup for proving with the switchboard + let switchboard = Switchboard::::new( + vec![swap_memory_program, square_program], + vec![InputMap::new(), InputMap::new()], + vec![Scalar::from(3), Scalar::from(5)], + 0, + ); + let setup = setup.into_ready(switchboard); + println!("6. Ready the setup for proving with the switchboard"); + + // Step 7: Run a proof + let recursive_snark = program::run(&setup).unwrap(); + println!("7. Run a proof"); + + // Step 8: Compress the proof + let compressed_proof = program::compress(&setup, &recursive_snark).unwrap(); + println!("8. Compressed the proof"); + + // Step 9: Serialize the proof + let serialized_proof = compressed_proof.serialize().unwrap(); + println!("9. Serialized the proof"); + + // Step 10: Save the serialized proof to a file + let proof_file_path = temp_dir.path().join("test_proof.bytes"); + let proof_bytes = bincode::serialize(&serialized_proof).unwrap(); + fs::write(&proof_file_path, &proof_bytes).unwrap(); + println!("10. Saved the serialized proof to a file"); + + // Step 11: Read and deserialize the proof + let proof_bytes_from_file = fs::read(&proof_file_path).unwrap(); + let deserialized_proof: FoldingProof, String> = + bincode::deserialize(&proof_bytes_from_file).unwrap(); + println!("11. Read and deserialized the proof"); + + // Step 12: Convert back to compressed proof + let compressed_proof_from_file = deserialized_proof.deserialize().unwrap(); + println!("12. Converted back to compressed proof"); + // Step 15: Verify the proof + // Note: Verification would normally involve checking the proof against the verifier key + // from the setup, but I'll use a simplified check that the digests match + // assert_eq!( + // compressed_proof.verifier_digest, compressed_proof_from_file.verifier_digest, + // "Verifier digests don't match after serialization/deserialization" + // ); +} diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 86869d7..d70f344 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -27,7 +27,7 @@ fn test_ivc() { vec![Scalar::from(2), Scalar::from(1)], 0, ); - let setup = Setup::new(switchboard); + let setup = Setup::new(switchboard).unwrap(); let snark = run(&setup).unwrap(); dbg!(&snark.zi_primary()); assert_eq!(snark.zi_primary()[0], Scalar::from(256)); @@ -66,7 +66,7 @@ fn test_ivc_private_inputs() { vec![Scalar::from(1), Scalar::from(2)], 0, ); - let setup = Setup::new(switchboard); + let setup = Setup::new(switchboard).unwrap(); let snark = run(&setup).unwrap(); let zi = snark.zi_primary(); dbg!(zi); @@ -101,7 +101,7 @@ fn test_nivc() { vec![Scalar::from(1), Scalar::from(2)], 0, ); - let setup = Setup::new(switchboard); + let setup = Setup::new(switchboard).unwrap(); let snark = run(&setup).unwrap(); let zi = snark.zi_primary(); dbg!(zi); @@ -132,7 +132,7 @@ fn test_ivc_verify() { vec![Scalar::from(2), Scalar::from(1)], 0, ); - let setup = Setup::new(switchboard); + let setup = Setup::new(switchboard).unwrap(); let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); @@ -157,7 +157,7 @@ fn test_ivc_compression() { vec![Scalar::from(2), Scalar::from(1)], 0, ); - let setup = Setup::new(switchboard); + let setup = Setup::new(switchboard).unwrap(); let snark = run(&setup).unwrap(); let compressed_proof = compress(&setup, &snark).unwrap(); @@ -177,7 +177,7 @@ fn test_ivc_verify_basic() { ("external_add".to_string(), InputValue::Field(GenericFieldElement::from(10_u64))), ])]; let switchboard = Switchboard::::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); - let setup = Setup::new(switchboard); + let setup = Setup::new(switchboard).unwrap(); let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); @@ -196,7 +196,7 @@ fn test_ivc_compression_basic() { ("external_add".to_string(), InputValue::Field(GenericFieldElement::from(10_u64))), ])]; let switchboard = Switchboard::::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); - let setup = Setup::new(switchboard); + let setup = Setup::new(switchboard).unwrap(); let snark = run(&setup).unwrap(); let compressed_proof = compress(&setup, &snark).unwrap(); let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); @@ -217,7 +217,7 @@ fn test_ivc_verify_poseidon() { vec![Scalar::from(2), Scalar::from(1)], 0, ); - let setup = Setup::new(switchboard); + let setup = Setup::new(switchboard).unwrap(); let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); @@ -236,7 +236,7 @@ fn test_ivc_compression_poseidon() { vec![Scalar::from(2), Scalar::from(1)], 0, ); - let setup = Setup::new(switchboard); + let setup = Setup::new(switchboard).unwrap(); let snark = run(&setup).unwrap(); let compressed_proof = compress(&setup, &snark).unwrap(); @@ -258,7 +258,7 @@ fn test_collatz() { vec![Scalar::from(collatz_start)], initial_circuit_index as usize, ); - let setup = Setup::new(switchboard); + let setup = Setup::new(switchboard).unwrap(); let snark = run(&setup).unwrap(); let (z1_primary, z1_secondary) = snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); From 4e07069311a1923c0ccaf544687a7eac0e8eddaa Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 15:53:02 -0700 Subject: [PATCH 40/51] fix: end to end --- frontend/src/noir.rs | 1 + frontend/src/program.rs | 1 + frontend/tests/end_to_end/mod.rs | 16 +++++++++++----- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 72efe1e..1fef443 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -186,6 +186,7 @@ impl StepCircuit for NoirProgram { // Create variable tracker and initialize ACVM let mut allocated_vars: HashMap> = HashMap::new(); + let acvm_witness_map = self.witness.as_ref().map(|inputs| { let mut acvm = ACVM::new( &StubbedBlackBoxSolver(false), diff --git a/frontend/src/program.rs b/frontend/src/program.rs index 301df80..adb438d 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -233,6 +233,7 @@ pub fn run_rom(setup: &Setup>) -> Result, Frontend for (idx, witness) in setup.switchboard.switchboard_inputs.iter().enumerate() { info!("Step {} of {} witnesses", idx + 1, setup.switchboard.switchboard_inputs.len()); + // TODO: We should not clone the witness here recursive_snark = prove_single_step(setup, recursive_snark, Some(witness.clone()), z0_primary, z0_secondary)?; } diff --git a/frontend/tests/end_to_end/mod.rs b/frontend/tests/end_to_end/mod.rs index 5c81872..690fc01 100644 --- a/frontend/tests/end_to_end/mod.rs +++ b/frontend/tests/end_to_end/mod.rs @@ -1,5 +1,6 @@ use std::fs; +use acvm::acir::acir_field::GenericFieldElement; use client_side_prover_frontend::{ demo, program::{self, Switchboard, ROM}, @@ -7,7 +8,7 @@ use client_side_prover_frontend::{ setup::{Empty, Ready, Setup}, Scalar, }; -use noirc_abi::InputMap; +use noirc_abi::{input_parser::InputValue, InputMap}; use tempfile::tempdir; use super::*; @@ -45,9 +46,15 @@ fn test_end_to_end_workflow() { println!("5. Read setup from file"); // Step 6: Ready the setup for proving with the switchboard + let input1 = + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(1_u64)))]); + let input2 = InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(-1_i128)), + )]); let switchboard = Switchboard::::new( vec![swap_memory_program, square_program], - vec![InputMap::new(), InputMap::new()], + vec![input1, input2], vec![Scalar::from(3), Scalar::from(5)], 0, ); @@ -81,9 +88,8 @@ fn test_end_to_end_workflow() { // Step 12: Convert back to compressed proof let compressed_proof_from_file = deserialized_proof.deserialize().unwrap(); println!("12. Converted back to compressed proof"); - // Step 15: Verify the proof - // Note: Verification would normally involve checking the proof against the verifier key - // from the setup, but I'll use a simplified check that the digests match + + // Step 13: Verify the proof digests match // assert_eq!( // compressed_proof.verifier_digest, compressed_proof_from_file.verifier_digest, // "Verifier digests don't match after serialization/deserialization" From f255e7ca3486930c12626176a2c915bb22389818 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 16:07:23 -0700 Subject: [PATCH 41/51] bug: handle end case --- frontend/tests/end_to_end/mod.rs | 23 ++++++++++++++--------- frontend/tests/ivc/mod.rs | 7 +++++++ 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/frontend/tests/end_to_end/mod.rs b/frontend/tests/end_to_end/mod.rs index 690fc01..a837326 100644 --- a/frontend/tests/end_to_end/mod.rs +++ b/frontend/tests/end_to_end/mod.rs @@ -1,6 +1,7 @@ use std::fs; use acvm::acir::acir_field::GenericFieldElement; +use client_side_prover::supernova::snark::CompressedSNARK; use client_side_prover_frontend::{ demo, program::{self, Switchboard, ROM}, @@ -48,17 +49,15 @@ fn test_end_to_end_workflow() { // Step 6: Ready the setup for proving with the switchboard let input1 = InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(1_u64)))]); - let input2 = InputMap::from([( - "next_pc".to_string(), - InputValue::Field(GenericFieldElement::from(-1_i128)), - )]); + let input2 = + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(1_i128)))]); let switchboard = Switchboard::::new( vec![swap_memory_program, square_program], vec![input1, input2], vec![Scalar::from(3), Scalar::from(5)], 0, ); - let setup = setup.into_ready(switchboard); + let setup = setup.into_ready(switchboard.clone()); println!("6. Ready the setup for proving with the switchboard"); // Step 7: Run a proof @@ -89,9 +88,15 @@ fn test_end_to_end_workflow() { let compressed_proof_from_file = deserialized_proof.deserialize().unwrap(); println!("12. Converted back to compressed proof"); + // TODO: Set up a verifier from file // Step 13: Verify the proof digests match - // assert_eq!( - // compressed_proof.verifier_digest, compressed_proof_from_file.verifier_digest, - // "Verifier digests don't match after serialization/deserialization" - // ); + let vsetup = Setup::>::load_file(&file_path).unwrap(); + let vsetup = vsetup.into_ready(switchboard); + let (_pk, vk) = CompressedSNARK::setup(&vsetup.params).unwrap(); + compressed_proof_from_file.proof.verify( + &vsetup.params, + &vk, + recursive_snark.z0_primary(), + recursive_snark.z0_secondary(), + ); } diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index d70f344..709fd13 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -266,4 +266,11 @@ fn test_collatz() { dbg!(&snark.program_counter()); assert_eq!(&z1_primary, snark.zi_primary()); assert_eq!(&z1_secondary, snark.zi_secondary()); + + let compressed_proof = compress(&setup, &snark).unwrap(); + let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); + compressed_proof + .proof + .verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()) + .unwrap(); } From 9cc21c06ca505c31970bab15025f55441a267c0c Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 16:12:53 -0700 Subject: [PATCH 42/51] fix: arity discrepancy --- prover/src/supernova/snark.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/prover/src/supernova/snark.rs b/prover/src/supernova/snark.rs index 363b26d..ca4d477 100644 --- a/prover/src/supernova/snark.rs +++ b/prover/src/supernova/snark.rs @@ -185,10 +185,9 @@ where z0_primary: &[E1::Scalar], z0_secondary: &[ as Engine>::Scalar], ) -> Result<(Vec, Vec< as Engine>::Scalar>), SuperNovaError> { - let last_circuit_idx = field_as_usize(self.program_counter); - + // Assumes that each circuit has the same arity, so we just use the first one' let num_field_primary_ro = 3 // params_next, i_new, program_counter_new - + 2 * pp[last_circuit_idx].F_arity // zo, z1 + + 2 * pp[0].F_arity // zo, z1 + (7 + 2 * pp.augmented_circuit_params_primary.get_n_limbs()); // # 1 * (7 + [X0, X1]*#num_limb) // secondary circuit From 715ae5304822867683fdbd920d10ea0a12bf963e Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 16:38:52 -0700 Subject: [PATCH 43/51] cleanup + tests pass --- frontend/src/lib.rs | 6 ++- frontend/src/program.rs | 20 +++----- frontend/src/proof.rs | 88 -------------------------------- frontend/src/setup.rs | 14 +++-- frontend/tests/end_to_end/mod.rs | 55 +++++++++++--------- frontend/tests/ivc/mod.rs | 20 ++------ 6 files changed, 53 insertions(+), 150 deletions(-) delete mode 100644 frontend/src/proof.rs diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index c7e4bc1..948a968 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -35,7 +35,7 @@ use client_side_prover::{ provider::GrumpkinEngine, spartan::batched::BatchedRelaxedR1CSSNARK, - supernova::{snark::CompressedSNARK, TrivialCircuit}, + supernova::TrivialCircuit, traits::{Engine, Group}, }; use ff::Field; @@ -47,7 +47,6 @@ use crate::error::FrontendError; pub mod error; pub mod noir; pub mod program; -pub mod proof; pub mod setup; /// Represents the scalar field for the primary curve (bn254) @@ -71,6 +70,9 @@ pub type ProverKey = client_side_prover::supernova::snark::ProverKey /// This key is used by the verifier to validate cryptographic proofs. pub type VerifierKey = client_side_prover::supernova::snark::VerifierKey; +/// Represents the `CompressedSNARK` which is a succinct proof of a `RecursiveSNARK`. +pub type CompressedSNARK = client_side_prover::supernova::snark::CompressedSNARK; + /// Represents the first elliptic curve engine used in the proof system. /// /// The primary engine uses BN256 with KZG polynomial commitments. diff --git a/frontend/src/program.rs b/frontend/src/program.rs index adb438d..a6e88ad 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -26,7 +26,6 @@ use client_side_prover::supernova::{NonUniformCircuit, RecursiveSNARK}; use halo2curves::grumpkin; use noirc_abi::InputMap; -use proof::FoldingProof; use tracing::trace; use super::*; @@ -35,11 +34,6 @@ use crate::{ setup::{Ready, Setup}, }; -/// Compressed proof type representing a folding proof with associated verifier digest -/// -/// This proof can be serialized for transmission or storage and later verified. -pub type CompressedProof = FoldingProof, Scalar>; - /// Trait for memory models used in the NIVC system /// /// This trait is sealed, meaning it can only be implemented by the types in this crate @@ -372,22 +366,20 @@ fn prove_single_step( pub fn compress( setup: &Setup>, recursive_snark: &RecursiveSNARK, -) -> Result { - let pk = CompressedSNARK::::initialize_pk( +) -> Result { + let pk = CompressedSNARK::initialize_pk( &setup.params, setup.vk_digest_primary, setup.vk_digest_secondary, )?; - debug!( + trace!( "initialized pk pk_primary.digest={:?}, pk_secondary.digest={:?}", - pk.pk_primary.vk_digest, pk.pk_secondary.vk_digest + pk.pk_primary.vk_digest, + pk.pk_secondary.vk_digest ); debug!("`CompressedSNARK::prove STARTING PROVING!"); - let proof = FoldingProof { - proof: CompressedSNARK::::prove(&setup.params, &pk, recursive_snark)?, - verifier_digest: pk.pk_primary.vk_digest, - }; + let proof = CompressedSNARK::prove(&setup.params, &pk, recursive_snark)?; debug!("`CompressedSNARK::prove completed!"); Ok(proof) diff --git a/frontend/src/proof.rs b/frontend/src/proof.rs deleted file mode 100644 index d7333db..0000000 --- a/frontend/src/proof.rs +++ /dev/null @@ -1,88 +0,0 @@ -//! # Proof Module -//! -//! This module provides the implementation for generating and verifying proofs. -//! It includes functionalities for serializing and deserializing folding proofs, -//! which are used in the proof system to ensure the integrity and correctness of computations. -//! -//! ## Structs -//! -//! - `FoldingProof`: Represents a folding proof with a generic proof type `T` and verifier -//! digest type `V`. -//! -//! ## Functions -//! -//! - `serialize`: Serializes a `FoldingProof` into a format suitable for storage or transmission. -//! - `deserialize`: Deserializes a `FoldingProof` from a stored or transmitted format back into its -//! original form. - -use hex; - -use super::*; -use crate::program::CompressedProof; - -/// Represents a folding proof with associated verifier digest -/// -/// A folding proof contains the actual cryptographic proof data along with -/// the verifier digest needed for verification. This is a generic structure -/// that can work with different proof and digest formats. -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct FoldingProof { - /// The cryptographic proof data - pub proof: T, - /// Digest used by the verifier to check the proof's validity - pub verifier_digest: V, -} - -impl CompressedProof { - /// Serializes a `CompressedProof` into a format suitable for storage or transmission. - /// - /// Converts the internal proof representation to a binary format and - /// encodes the verifier digest as a hexadecimal string. - /// - /// # Returns - /// - /// A `FoldingProof` with a `Vec` proof and a `String` verifier digest. - /// - /// # Errors - /// - /// Returns a `FrontendError` if serialization fails. - pub fn serialize(self) -> Result, String>, FrontendError> { - let proof = bincode::serialize(&self.proof)?; - - Ok(FoldingProof { proof, verifier_digest: hex::encode(self.verifier_digest.to_bytes()) }) - } -} - -impl FoldingProof, String> { - /// Deserializes a `FoldingProof` from a stored or transmitted format back into its original form. - /// - /// Converts the binary proof data back into a `CompressedSNARK` instance and - /// decodes the verifier digest from hexadecimal to its field element representation. - /// - /// # Returns - /// - /// A `CompressedProof` with a `CompressedSNARK` proof and a `Scalar` verifier digest. - /// - /// # Errors - /// - /// Returns a `FrontendError` if deserialization fails or if the hex string cannot be converted - /// to the expected field element. - pub fn deserialize(self) -> Result { - let proof = bincode::deserialize(&self.proof[..])?; - - // Decode the hex string to bytes - let digest_bytes = hex::decode(&self.verifier_digest)?; - - // Convert to fixed-size array safely - let digest_array: [u8; 32] = digest_bytes - .try_into() - .map_err(|_| FrontendError::Other("Invalid digest length".to_string()))?; - - // Convert to Scalar, handling the case where from_bytes returns CtOption - let verifier_digest = Scalar::from_bytes(&digest_array) - .into_option() - .ok_or_else(|| FrontendError::Other("Invalid scalar encoding".to_string()))?; - - Ok(FoldingProof { proof, verifier_digest }) - } -} diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 7d07cdd..6425148 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -28,11 +28,8 @@ use client_side_prover::{ }; use tracing::debug; -use crate::{ - error::FrontendError, - program::{Memory, Switchboard}, - AuxParams, E1, S1, S2, -}; +use super::*; +use crate::program::{Memory, Switchboard}; /// Trait that defines the status of a setup /// @@ -118,6 +115,8 @@ impl PartialEq for Setup { } } +// TODO: Possibly have a `get_vk` method that returns the verification key for the given setup + impl Setup> { /// Creates a new ready setup with the given switchboard /// @@ -183,6 +182,11 @@ impl Setup> { Ok(bytes) } + + pub fn verifier_key(&self) -> Result { + let (_, vk) = CompressedSNARK::setup(&self.params)?; + Ok(vk) + } } impl Setup> { diff --git a/frontend/tests/end_to_end/mod.rs b/frontend/tests/end_to_end/mod.rs index a837326..901ed07 100644 --- a/frontend/tests/end_to_end/mod.rs +++ b/frontend/tests/end_to_end/mod.rs @@ -1,22 +1,25 @@ use std::fs; use acvm::acir::acir_field::GenericFieldElement; -use client_side_prover::supernova::snark::CompressedSNARK; use client_side_prover_frontend::{ demo, program::{self, Switchboard, ROM}, - proof::FoldingProof, setup::{Empty, Ready, Setup}, - Scalar, + CompressedSNARK, Scalar, }; use noirc_abi::{input_parser::InputValue, InputMap}; use tempfile::tempdir; use super::*; +/// Note that this test goes through a flow that mimics the offline setup component, online proving +/// component, and a separate verification component. #[test] #[traced_test] fn test_end_to_end_workflow() { + // ----------------------------------------------------------------------------------------------------------------- // + // Offline Setup Phase + // ----------------------------------------------------------------------------------------------------------------- // // Step 1: Create demo programs for our test let swap_memory_program = demo::swap_memory(); let square_program = demo::square_zeroth(); @@ -41,7 +44,11 @@ fn test_end_to_end_workflow() { let file_path = temp_dir.path().join("test_setup.bytes"); setup.store_file(&file_path).unwrap(); println!("4. Saved setup to file"); + // ----------------------------------------------------------------------------------------------------------------- // + // ----------------------------------------------------------------------------------------------------------------- // + // Online Proving Phase + // ----------------------------------------------------------------------------------------------------------------- // // Step 5: Read the setup from the file let setup = Setup::>::load_file(&file_path).unwrap(); println!("5. Read setup from file"); @@ -49,8 +56,10 @@ fn test_end_to_end_workflow() { // Step 6: Ready the setup for proving with the switchboard let input1 = InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(1_u64)))]); - let input2 = - InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(1_i128)))]); + let input2 = InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(-1_i128)), + )]); let switchboard = Switchboard::::new( vec![swap_memory_program, square_program], vec![input1, input2], @@ -68,35 +77,31 @@ fn test_end_to_end_workflow() { let compressed_proof = program::compress(&setup, &recursive_snark).unwrap(); println!("8. Compressed the proof"); - // Step 9: Serialize the proof - let serialized_proof = compressed_proof.serialize().unwrap(); - println!("9. Serialized the proof"); - - // Step 10: Save the serialized proof to a file + // Step 9: Serialize and store the proof in a file + let serialized_proof = bincode::serialize(&compressed_proof).unwrap(); let proof_file_path = temp_dir.path().join("test_proof.bytes"); - let proof_bytes = bincode::serialize(&serialized_proof).unwrap(); - fs::write(&proof_file_path, &proof_bytes).unwrap(); - println!("10. Saved the serialized proof to a file"); + fs::write(&proof_file_path, &serialized_proof).unwrap(); + println!("9. Saved the serialized proof to a file"); + // ----------------------------------------------------------------------------------------------------------------- // - // Step 11: Read and deserialize the proof + // ----------------------------------------------------------------------------------------------------------------- // + // Separate Verification Phase + // ----------------------------------------------------------------------------------------------------------------- // + // Step 10: Read and deserialize the proof let proof_bytes_from_file = fs::read(&proof_file_path).unwrap(); - let deserialized_proof: FoldingProof, String> = - bincode::deserialize(&proof_bytes_from_file).unwrap(); - println!("11. Read and deserialized the proof"); - - // Step 12: Convert back to compressed proof - let compressed_proof_from_file = deserialized_proof.deserialize().unwrap(); - println!("12. Converted back to compressed proof"); + let deserialized_proof: CompressedSNARK = bincode::deserialize(&proof_bytes_from_file).unwrap(); + println!("10. Read and deserialized the proof"); - // TODO: Set up a verifier from file - // Step 13: Verify the proof digests match + // Step 11: Verify the proof digests match by loading the setup from file as if we were a verifier let vsetup = Setup::>::load_file(&file_path).unwrap(); let vsetup = vsetup.into_ready(switchboard); - let (_pk, vk) = CompressedSNARK::setup(&vsetup.params).unwrap(); - compressed_proof_from_file.proof.verify( + let vk = vsetup.verifier_key().unwrap(); + deserialized_proof.verify( &vsetup.params, &vk, recursive_snark.z0_primary(), recursive_snark.z0_secondary(), ); + println!("11. Verified the proof"); + // ----------------------------------------------------------------------------------------------------------------- // } diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index 709fd13..d199d29 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -162,10 +162,7 @@ fn test_ivc_compression() { let compressed_proof = compress(&setup, &snark).unwrap(); let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); - compressed_proof - .proof - .verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()) - .unwrap(); + compressed_proof.verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); } #[test] @@ -200,10 +197,7 @@ fn test_ivc_compression_basic() { let snark = run(&setup).unwrap(); let compressed_proof = compress(&setup, &snark).unwrap(); let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); - compressed_proof - .proof - .verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()) - .unwrap(); + compressed_proof.verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); } #[test] @@ -241,10 +235,7 @@ fn test_ivc_compression_poseidon() { let compressed_proof = compress(&setup, &snark).unwrap(); let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); - compressed_proof - .proof - .verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()) - .unwrap(); + compressed_proof.verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); } #[test] @@ -269,8 +260,5 @@ fn test_collatz() { let compressed_proof = compress(&setup, &snark).unwrap(); let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); - compressed_proof - .proof - .verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()) - .unwrap(); + compressed_proof.verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); } From e61973e977d0f23d04d76f2d1e771152593540a7 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 16:47:19 -0700 Subject: [PATCH 44/51] cleanup --- frontend/src/program.rs | 22 ++++------------------ frontend/src/setup.rs | 7 +++++++ frontend/tests/end_to_end/mod.rs | 9 +++------ prover/src/supernova/snark.rs | 5 ----- 4 files changed, 14 insertions(+), 29 deletions(-) diff --git a/frontend/src/program.rs b/frontend/src/program.rs index a6e88ad..58c321e 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -24,7 +24,7 @@ //! - [`compress`]: Compresses a recursive SNARK into a more compact form for verification use client_side_prover::supernova::{NonUniformCircuit, RecursiveSNARK}; -use halo2curves::grumpkin; +use halo2curves::{ff::PrimeField, grumpkin}; use noirc_abi::InputMap; use tracing::trace; @@ -297,23 +297,9 @@ fn prove_single_step( ) -> Result>, FrontendError> { let program_counter = match &recursive_snark { None => setup.switchboard.initial_circuit_index(), - Some(snark) => { - let pc_bytes = snark.program_counter().to_bytes(); - let usize_size = std::mem::size_of::(); - - // Check if higher bytes are non-zero - if pc_bytes[usize_size..].iter().any(|&b| b != 0) { - return Err(FrontendError::Other("Program counter value too large for usize".into())); - } - - // Convert to usize (little-endian) - let mut pc_value = 0usize; - for (i, &b) in pc_bytes.iter().take(usize_size).enumerate() { - pc_value |= (b as usize) << (i * 8); - } - - pc_value - }, + Some(snark) => + u32::from_le_bytes(snark.program_counter().to_repr().as_ref()[0..4].try_into().unwrap()) + as usize, }; debug!("Program counter = {:?}", program_counter); diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index 6425148..bc7033a 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -183,6 +183,13 @@ impl Setup> { Ok(bytes) } + /// Returns the verifier key for the setup + /// + /// This method generates the verifier key for the setup using the public parameters. + /// + /// # Returns + /// + /// The verifier key for the setup pub fn verifier_key(&self) -> Result { let (_, vk) = CompressedSNARK::setup(&self.params)?; Ok(vk) diff --git a/frontend/tests/end_to_end/mod.rs b/frontend/tests/end_to_end/mod.rs index 901ed07..51e0dd6 100644 --- a/frontend/tests/end_to_end/mod.rs +++ b/frontend/tests/end_to_end/mod.rs @@ -96,12 +96,9 @@ fn test_end_to_end_workflow() { let vsetup = Setup::>::load_file(&file_path).unwrap(); let vsetup = vsetup.into_ready(switchboard); let vk = vsetup.verifier_key().unwrap(); - deserialized_proof.verify( - &vsetup.params, - &vk, - recursive_snark.z0_primary(), - recursive_snark.z0_secondary(), - ); + deserialized_proof + .verify(&vsetup.params, &vk, recursive_snark.z0_primary(), recursive_snark.z0_secondary()) + .unwrap(); println!("11. Verified the proof"); // ----------------------------------------------------------------------------------------------------------------- // } diff --git a/prover/src/supernova/snark.rs b/prover/src/supernova/snark.rs index ca4d477..bae4e6f 100644 --- a/prover/src/supernova/snark.rs +++ b/prover/src/supernova/snark.rs @@ -1,6 +1,5 @@ //! This module defines a final compressing SNARK for supernova proofs -use ff::PrimeField; use serde::{Deserialize, Serialize}; use super::{error::SuperNovaError, PublicParams, RecursiveSNARK}; @@ -270,10 +269,6 @@ where } } -fn field_as_usize(x: F) -> usize { - u32::from_le_bytes(x.to_repr().as_ref()[0..4].try_into().unwrap()) as usize -} - #[cfg(test)] mod test { use std::marker::PhantomData; From 265dcc507000a2a667221ea6a5328f497fe63cb1 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 16:55:48 -0700 Subject: [PATCH 45/51] formatting + typo --- .github/workflows/check.yaml | 2 +- Nargo.toml | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index 3c35b71..1621d78 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -1,4 +1,4 @@ -name: Checkta +name: Check on: pull_request: diff --git a/Nargo.toml b/Nargo.toml index d6dd3b6..10a7548 100644 --- a/Nargo.toml +++ b/Nargo.toml @@ -1,11 +1,11 @@ [workspace] members=[ - "nivc", - "examples/add_external", - "examples/square_zeroth", - "examples/swap_memory", - "examples/poseidon", - "examples/basic", - "examples/collatz_even", - "examples/collatz_odd", + "nivc", + "examples/add_external", + "examples/square_zeroth", + "examples/swap_memory", + "examples/poseidon", + "examples/basic", + "examples/collatz_even", + "examples/collatz_odd", ] From d09f7845aebf093920c528f7120b73c626601f55 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Thu, 6 Mar 2025 17:02:21 -0700 Subject: [PATCH 46/51] Update check.yaml --- .github/workflows/check.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index 1621d78..5a71863 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -11,13 +11,19 @@ jobs: name: Tests runs-on: ubuntu-latest steps: + - name: Install Nargo + uses: noir-lang/noirup@v0.1.3 + with: + toolchain: v1.0.0-beta.2 - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: nightly - name: Run tests - run: cargo test --all + run: | + nargo compile --workspace + cargo test --all format: name: Format From d894221e033e55fa91c3a15dfb23e30715e97b41 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 7 Mar 2025 05:43:28 -0700 Subject: [PATCH 47/51] refactor --- frontend/src/program.rs | 51 +++++++++++++++++++++++- frontend/src/setup.rs | 66 +++++++++++++++----------------- frontend/tests/end_to_end/mod.rs | 36 ++++++++--------- 3 files changed, 96 insertions(+), 57 deletions(-) diff --git a/frontend/src/program.rs b/frontend/src/program.rs index 58c321e..fa36b3d 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -45,11 +45,12 @@ pub trait Memory: private::Sealed { /// Private module containing implementation details for sealing the Memory trait mod private { - use super::{RAM, ROM}; + use super::{Configuration, RAM, ROM}; /// Seals the [`Memory`] trait pub trait Sealed {} impl Sealed for ROM {} impl Sealed for RAM {} + impl Sealed for Configuration {} } /// Read-Only Memory model @@ -74,6 +75,16 @@ impl Memory for RAM { type Data = (); } +/// A memory model that doesn't require any additional input data. +/// +/// This is just a placeholder to allow the setup to be easily created then serialized and +/// deserialized. +#[derive(Debug, Clone)] +pub struct Configuration {} +impl Memory for Configuration { + type Data = (); +} + /// Manages a collection of circuits and controls execution flow /// /// The switchboard holds all the circuits that can be executed in a NIVC computation, @@ -94,6 +105,44 @@ pub struct Switchboard { pub(crate) switchboard_inputs: M::Data, } +impl Switchboard { + /// Creates a new switchboard with Blank memory model + /// + /// # Arguments + /// + /// * `circuits` - Collection of Noir circuits that can be executed + /// + /// # Returns + pub fn new(mut circuits: Vec) -> Self { + // Set the index of each circuit given the order they are passed in since this is skipped in + // serde + circuits.iter_mut().enumerate().for_each(|(i, c)| c.index = i); + Self { circuits, public_input: vec![], initial_circuit_index: 0, switchboard_inputs: () } + } + + pub fn into_rom( + self, + initial_circuit_index: usize, + switchboard_inputs: Vec, + public_input: Vec, + ) -> Switchboard { + Switchboard { circuits: self.circuits, public_input, initial_circuit_index, switchboard_inputs } + } + + pub fn into_ram( + self, + initial_circuit_index: usize, + public_input: Vec, + ) -> Switchboard { + Switchboard { + circuits: self.circuits, + public_input, + initial_circuit_index, + switchboard_inputs: self.switchboard_inputs, + } + } +} + impl Switchboard { /// Creates a new switchboard with Read-Only Memory model /// diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs index bc7033a..0ff752d 100644 --- a/frontend/src/setup.rs +++ b/frontend/src/setup.rs @@ -10,7 +10,7 @@ //! ## Setup States //! //! The setup can be in one of two states: -//! - **Empty**: Contains only auxiliary parameters without a switchboard (can be serialized for +//! - **Offline**: Contains only auxiliary parameters without a switchboard (can be serialized for //! storage) //! - **Ready**: Complete setup with a switchboard that's ready for program execution //! @@ -35,7 +35,7 @@ use crate::program::{Memory, Switchboard}; /// /// This sealed trait can only be implemented by the predefined status types: /// - `Ready`: A setup that is ready for execution with a specific memory model -/// - `Empty`: A setup that only contains cryptographic parameters without a switchboard +/// - `Offline`: A setup that only contains cryptographic parameters without a switchboard pub trait Status: private::Sealed { /// The switchboard type associated with this status type Switchboard; @@ -46,12 +46,12 @@ pub trait Status: private::Sealed { /// Private module for sealing the Status trait mod private { - use super::{Empty, Ready}; + use super::{Offline, Ready}; /// Sealed trait implementation to restrict Status implementations pub trait Sealed {} impl Sealed for Ready {} - impl Sealed for Empty {} + impl Sealed for Offline {} } /// Represents a setup that is ready for execution with a specific memory model @@ -73,18 +73,15 @@ impl Status for Ready { /// Represents a setup that only contains cryptographic parameters without a switchboard /// -/// An `Empty` setup can be serialized and stored, making it useful for saving +/// An `Offline` setup can be serialized and stored, making it useful for saving /// computationally expensive cryptographic parameters. #[derive(Debug, Clone)] -pub struct Empty { - /// Marker for the memory model type - _marker: std::marker::PhantomData, -} +pub struct Offline; -impl Status for Empty { - /// An empty setup only contains auxiliary parameters +impl Status for Offline { + /// An offline setup only contains auxiliary parameters type PublicParams = AuxParams; - /// An empty setup doesn't have a switchboard + /// An offline setup doesn't have a switchboard type Switchboard = (); } @@ -103,7 +100,7 @@ pub struct Setup { /// Secondary verification key digest pub vk_digest_secondary: as Engine>::Scalar, - /// Switchboard (if the setup is [`Ready`]) or unit (if [`Empty`]) + /// Switchboard (if the setup is [`Ready`]) or unit (if [`Offline`]) pub switchboard: S::Switchboard, } @@ -142,15 +139,15 @@ impl Setup> { }) } - /// Converts a ready setup to an empty setup + /// Converts a ready setup to an offline setup /// /// This extracts the auxiliary parameters from the public parameters and - /// creates an empty setup without the switchboard, which can be serialized. + /// creates an offline setup without the switchboard, which can be serialized. /// /// # Returns /// - /// An empty setup containing only the auxiliary parameters - fn into_empty(self) -> Setup> { + /// An offline setup containing only the auxiliary parameters + fn into_offline(self) -> Setup { Setup { params: self.params.into_parts().1, vk_digest_primary: self.vk_digest_primary, @@ -161,7 +158,7 @@ impl Setup> { /// Serializes the setup and stores it to a file /// - /// This converts the setup to an empty setup, serializes it, and writes + /// This converts the setup to an offline setup, serializes it, and writes /// the resulting bytes to the specified file path. /// /// # Arguments @@ -172,7 +169,7 @@ impl Setup> { /// /// The serialized bytes on success, or a `FrontendError` on failure pub fn store_file(self, path: &std::path::PathBuf) -> Result, FrontendError> { - let bytes = self.into_empty().to_bytes(); + let bytes = self.into_offline().to_bytes(); if let Some(parent) = path.parent() { std::fs::create_dir_all(parent)?; } @@ -196,8 +193,8 @@ impl Setup> { } } -impl Setup> { - /// Converts an empty setup to a ready setup +impl Setup { + /// Converts an offline setup to a ready setup /// /// This combines the auxiliary parameters with a switchboard to create /// a ready setup that can be used to execute programs. @@ -209,7 +206,7 @@ impl Setup> { /// # Returns /// /// A ready setup containing the parameters and switchboard - pub fn into_ready(self, switchboard: Switchboard) -> Setup> { + pub fn into_ready(self, switchboard: Switchboard) -> Setup> { Setup { params: PublicParams::from_parts(get_circuit_shapes(&switchboard), self.params), vk_digest_primary: self.vk_digest_primary, @@ -234,7 +231,7 @@ impl Setup> { } // TODO: We should consider using `rkyv` for serialization and deserialization -impl FastSerde for Setup> { +impl FastSerde for Setup { /// Deserializes a setup from bytes /// /// # Arguments @@ -243,7 +240,7 @@ impl FastSerde for Setup> { /// /// # Returns /// - /// The deserialized empty setup, or a `SerdeByteError` on failure + /// The deserialized offline setup, or a `SerdeByteError` on failure fn from_bytes(bytes: &[u8]) -> Result { let mut cursor = Cursor::new(bytes); Self::validate_header(&mut cursor, SerdeByteTypes::ProverParams, 3)?; @@ -288,34 +285,33 @@ impl FastSerde for Setup> { #[cfg(test)] mod tests { use super::*; - use crate::{demo::square_zeroth, program::RAM}; + use crate::{demo::square_zeroth, program::Configuration}; #[test] fn test_setup_and_params() { - let setup = Setup::new(Switchboard::::new(vec![square_zeroth()], vec![], 0)).unwrap(); + let setup = Setup::new(Switchboard::::new(vec![square_zeroth()])).unwrap(); assert_eq!(setup.params.num_constraints_and_variables(0), (10009, 10001)); } #[test] fn test_setup_serialize() { - let setup = Setup::new(Switchboard::::new(vec![square_zeroth()], vec![], 0)).unwrap(); - let empty_setup = setup.into_empty(); - let serialized = empty_setup.to_bytes(); - let deserialized = Setup::>::from_bytes(&serialized).unwrap(); - assert_eq!(empty_setup, deserialized); + let setup = Setup::new(Switchboard::::new(vec![square_zeroth()])).unwrap(); + let offline_setup = setup.into_offline(); + let serialized = offline_setup.to_bytes(); + let deserialized = Setup::::from_bytes(&serialized).unwrap(); + assert_eq!(offline_setup, deserialized); } #[test] fn test_setup_store_file() { - let switchboard = Switchboard::::new(vec![square_zeroth()], vec![], 0); + let switchboard = Switchboard::::new(vec![square_zeroth()]); let setup = Setup::new(switchboard.clone()).unwrap(); let vk_digest_primary = setup.vk_digest_primary; let vk_digest_secondary = setup.vk_digest_secondary; let path = tempfile::tempdir().unwrap().into_path(); - let bytes = setup.store_file(&path.join("setup.bytes")).unwrap(); - assert!(!bytes.is_empty()); + let _bytes = setup.store_file(&path.join("setup.bytes")).unwrap(); let stored_bytes = std::fs::read(path.join("setup.bytes")).unwrap(); - let deserialized = Setup::>::from_bytes(&stored_bytes).unwrap(); + let deserialized = Setup::::from_bytes(&stored_bytes).unwrap(); let ready_setup = deserialized.into_ready(switchboard); assert_eq!(vk_digest_primary, ready_setup.vk_digest_primary); assert_eq!(vk_digest_secondary, ready_setup.vk_digest_secondary); diff --git a/frontend/tests/end_to_end/mod.rs b/frontend/tests/end_to_end/mod.rs index 51e0dd6..553bb28 100644 --- a/frontend/tests/end_to_end/mod.rs +++ b/frontend/tests/end_to_end/mod.rs @@ -3,8 +3,8 @@ use std::fs; use acvm::acir::acir_field::GenericFieldElement; use client_side_prover_frontend::{ demo, - program::{self, Switchboard, ROM}, - setup::{Empty, Ready, Setup}, + program::{self, Configuration, Switchboard}, + setup::Setup, CompressedSNARK, Scalar, }; use noirc_abi::{input_parser::InputValue, InputMap}; @@ -27,16 +27,12 @@ fn test_end_to_end_workflow() { // Step 2: Create switchboard with ROM memory model, no inputs are necessary since this is just // creating the setup - let switchboard = Switchboard::::new( - vec![swap_memory_program.clone(), square_program.clone()], - vec![], - vec![], - 0, - ); + let switchboard = + Switchboard::::new(vec![swap_memory_program.clone(), square_program.clone()]); println!("2. Created switchboard"); // Step 3: Initialize the setup - let setup = Setup::>::new(switchboard).unwrap(); + let setup = Setup::new(switchboard.clone()).unwrap(); println!("3. Initialized setup"); // Step 4: Save the setup to a file @@ -50,7 +46,7 @@ fn test_end_to_end_workflow() { // Online Proving Phase // ----------------------------------------------------------------------------------------------------------------- // // Step 5: Read the setup from the file - let setup = Setup::>::load_file(&file_path).unwrap(); + let psetup = Setup::load_file(&file_path).unwrap(); println!("5. Read setup from file"); // Step 6: Ready the setup for proving with the switchboard @@ -60,21 +56,18 @@ fn test_end_to_end_workflow() { "next_pc".to_string(), InputValue::Field(GenericFieldElement::from(-1_i128)), )]); - let switchboard = Switchboard::::new( - vec![swap_memory_program, square_program], - vec![input1, input2], - vec![Scalar::from(3), Scalar::from(5)], - 0, - ); - let setup = setup.into_ready(switchboard.clone()); + // Briefly test the switchboard into_rom method + let pswitchboard = + switchboard.into_rom(0, vec![input1, input2], vec![Scalar::from(3), Scalar::from(5)]); + let psetup = psetup.into_ready(pswitchboard); println!("6. Ready the setup for proving with the switchboard"); // Step 7: Run a proof - let recursive_snark = program::run(&setup).unwrap(); + let recursive_snark = program::run(&psetup).unwrap(); println!("7. Run a proof"); // Step 8: Compress the proof - let compressed_proof = program::compress(&setup, &recursive_snark).unwrap(); + let compressed_proof = program::compress(&psetup, &recursive_snark).unwrap(); println!("8. Compressed the proof"); // Step 9: Serialize and store the proof in a file @@ -93,8 +86,9 @@ fn test_end_to_end_workflow() { println!("10. Read and deserialized the proof"); // Step 11: Verify the proof digests match by loading the setup from file as if we were a verifier - let vsetup = Setup::>::load_file(&file_path).unwrap(); - let vsetup = vsetup.into_ready(switchboard); + let vsetup = Setup::load_file(&file_path).unwrap(); + let vswitchboard = Switchboard::::new(vec![swap_memory_program, square_program]); + let vsetup = vsetup.into_ready(vswitchboard); let vk = vsetup.verifier_key().unwrap(); deserialized_proof .verify(&vsetup.params, &vk, recursive_snark.z0_primary(), recursive_snark.z0_secondary()) From fb54c8f32d1d83c53f77fc8ac936caf613ae667c Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 7 Mar 2025 05:52:29 -0700 Subject: [PATCH 48/51] remove deps + error variants --- Cargo.lock | 143 -------------------------------------- frontend/Cargo.toml | 54 +++++--------- frontend/src/error.rs | 41 ----------- frontend/src/lib.rs | 2 +- frontend/src/noir.rs | 2 +- frontend/src/program.rs | 11 +-- frontend/tests/ivc/mod.rs | 3 +- 7 files changed, 28 insertions(+), 228 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 95a13b1..3a0e56e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,15 +75,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - [[package]] name = "adler2" version = "2.0.0" @@ -277,21 +268,6 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" -[[package]] -name = "backtrace" -version = "0.3.74" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets", -] - [[package]] name = "base16ct" version = "0.1.1" @@ -496,12 +472,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" -[[package]] -name = "bytes" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" - [[package]] name = "cast" version = "0.3.0" @@ -634,27 +604,17 @@ dependencies = [ "ark-bn254", "bellpepper-core", "bincode", - "byteorder", "client-side-prover", "client-side-prover-frontend", - "ff 0.13.0", "halo2curves", - "hex", - "itertools 0.13.0", - "js-sys", "noirc_abi", - "num-bigint 0.4.6", "serde", - "serde-wasm-bindgen", "serde_json", "tempdir", "tempfile", "thiserror", - "tokio", "tracing", "tracing-test", - "wasm-bindgen", - "wasm-bindgen-futures", ] [[package]] @@ -1025,12 +985,6 @@ dependencies = [ "windows-targets", ] -[[package]] -name = "gimli" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" - [[package]] name = "glob" version = "0.3.1" @@ -1360,17 +1314,6 @@ dependencies = [ "adler2", ] -[[package]] -name = "mio" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" -dependencies = [ - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", -] - [[package]] name = "neptune" version = "13.0.0" @@ -1475,15 +1418,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.36.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" -dependencies = [ - "memchr", -] - [[package]] name = "once_cell" version = "1.20.2" @@ -1914,12 +1848,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - [[package]] name = "rustix" version = "0.38.44" @@ -2014,17 +1942,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde-wasm-bindgen" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8302e169f0eddcc139c70f139d19d6467353af16f9fce27e8c30158036a1e16b" -dependencies = [ - "js-sys", - "serde", - "wasm-bindgen", -] - [[package]] name = "serde_arrays" version = "0.1.0" @@ -2102,15 +2019,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" -[[package]] -name = "signal-hook-registry" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" -dependencies = [ - "libc", -] - [[package]] name = "signature" version = "1.6.4" @@ -2127,16 +2035,6 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" -[[package]] -name = "socket2" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - [[package]] name = "spin" version = "0.9.8" @@ -2305,35 +2203,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "tokio" -version = "1.43.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" -dependencies = [ - "backtrace", - "bytes", - "libc", - "mio", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "windows-sys 0.52.0", -] - -[[package]] -name = "tokio-macros" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.98", -] - [[package]] name = "toml" version = "0.7.8" @@ -2570,18 +2439,6 @@ dependencies = [ "wasm-bindgen-shared", ] -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - [[package]] name = "wasm-bindgen-macro" version = "0.2.93" diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index 62029ea..d285530 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -1,44 +1,28 @@ [package] -name ="client-side-prover-frontend" -version="0.1.0" -edition="2021" +name = "client-side-prover-frontend" +version = "0.1.0" +edition = "2021" [dependencies] -client-side-prover={ path="../prover" } - -serde ={ workspace=true } -serde_json ={ workspace=true } -thiserror ={ workspace=true } -tracing ={ workspace=true } -hex ={ workspace=true } -ff ={ workspace=true } -bellpepper-core={ workspace=true } -halo2curves ={ workspace=true } - -byteorder ={ workspace=true } -num-bigint={ workspace=true } -itertools ={ workspace=true } -bincode ={ workspace=true } +client-side-prover = { path = "../prover" } +serde = { workspace = true } +serde_json = { workspace = true } +thiserror = { workspace = true } +tracing = { workspace = true } +bellpepper-core = { workspace = true } +halo2curves = { workspace = true } # noir -acvm ={ git="https://github.com/noir-lang/noir", rev="v1.0.0-beta.2" } -noirc_abi={ git="https://github.com/noir-lang/noir", rev="v1.0.0-beta.2" } -ark-bn254="0.5" - -#- Wasm target configuration ----------------------------------------------------------------------# -[target.'cfg(target_arch = "wasm32")'.dependencies] -serde-wasm-bindgen ="0.6.5" -wasm-bindgen ="0.2.87" -js-sys ="0.3.64" -wasm-bindgen-futures="0.4.37" +acvm = { git = "https://github.com/noir-lang/noir", rev = "v1.0.0-beta.2" } +noirc_abi = { git = "https://github.com/noir-lang/noir", rev = "v1.0.0-beta.2" } +ark-bn254 = "0.5" [dev-dependencies] -tracing-test ={ workspace=true } -tempdir ="0.3.7" -tokio ={ version="1.43", features=["full"] } -client-side-prover-frontend={ path=".", features=["demo"] } -tempfile ="3.17" +tracing-test = { workspace = true } +tempdir = "0.3.7" +client-side-prover-frontend = { path = ".", features = ["demo"] } +tempfile = "3.17" +bincode = { workspace = true } [features] -verify-steps=[] -demo =[] +demo = [] diff --git a/frontend/src/error.rs b/frontend/src/error.rs index df4588e..3a8042a 100644 --- a/frontend/src/error.rs +++ b/frontend/src/error.rs @@ -21,30 +21,10 @@ pub enum FrontendError { #[error(transparent)] Io(#[from] std::io::Error), - /// The error is a `serde_json::Error` - #[error(transparent)] - Serde(#[from] serde_json::Error), - /// The error is a custom error with a message #[error("Other error: {0}")] Other(String), - /// The error is a failed proof verification - #[error("Failed to verify proof: {0}")] - VerifyFailed(String), - - /// The error is a `num_bigint::ParseBigIntError` - #[error(transparent)] - Parse(#[from] num_bigint::ParseBigIntError), - - /// The error is a missing header section - #[error("Missing header section")] - MissingSection, - - /// The error is a `bincode::ErrorKind` - #[error(transparent)] - Bincode(#[from] Box), - /// The error is a `client_side_prover::errors::NovaError` #[error(transparent)] Nova(#[from] client_side_prover::errors::NovaError), @@ -53,27 +33,6 @@ pub enum FrontendError { #[error(transparent)] SuperNova(#[from] client_side_prover::supernova::error::SuperNovaError), - /// The error is a json key error - #[error("json key not found: {0}")] - JsonKeyError(String), - - /// The error is an invalid circuit size - #[error("Invalid circuit size")] - InvalidCircuitSize, - - /// The error is a serde_wasm_bindgen::Error - #[cfg(target_arch = "wasm32")] - #[error(transparent)] - SerdeWasmBindgen(#[from] serde_wasm_bindgen::Error), - - /// The error is an invalid manifest - #[error("Invalid manifest: {0}")] - InvalidManifest(String), - - /// The error is an invalid hex string - #[error(transparent)] - FromHex(#[from] hex::FromHexError), - /// The error is a [`client_side_prover::fast_serde::SerdeByteError`] #[error(transparent)] FastSerde(#[from] client_side_prover::fast_serde::SerdeByteError), diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs index 948a968..9db7b17 100644 --- a/frontend/src/lib.rs +++ b/frontend/src/lib.rs @@ -38,7 +38,7 @@ use client_side_prover::{ supernova::TrivialCircuit, traits::{Engine, Group}, }; -use ff::Field; +use halo2curves::ff::Field; use serde::{Deserialize, Serialize}; use tracing::{debug, info}; diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs index 1fef443..78a31c2 100644 --- a/frontend/src/noir.rs +++ b/frontend/src/noir.rs @@ -26,7 +26,7 @@ use acvm::{ use ark_bn254::Fr; use bellpepper_core::{num::AllocatedNum, ConstraintSystem, LinearCombination, SynthesisError}; use client_side_prover::supernova::StepCircuit; -use ff::PrimeField; +use halo2curves::ff::PrimeField; use noirc_abi::{input_parser::InputValue, Abi, AbiType, InputMap}; use tracing::{error, trace}; diff --git a/frontend/src/program.rs b/frontend/src/program.rs index fa36b3d..8e13171 100644 --- a/frontend/src/program.rs +++ b/frontend/src/program.rs @@ -344,12 +344,13 @@ fn prove_single_step( z0_primary: &[Scalar], z0_secondary: &[grumpkin::Fr], ) -> Result>, FrontendError> { - let program_counter = match &recursive_snark { - None => setup.switchboard.initial_circuit_index(), - Some(snark) => + let program_counter = recursive_snark.as_ref().map_or_else( + || setup.switchboard.initial_circuit_index(), + |snark| { u32::from_le_bytes(snark.program_counter().to_repr().as_ref()[0..4].try_into().unwrap()) - as usize, - }; + as usize + }, + ); debug!("Program counter = {:?}", program_counter); diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs index d199d29..ce828a3 100644 --- a/frontend/tests/ivc/mod.rs +++ b/frontend/tests/ivc/mod.rs @@ -5,8 +5,7 @@ use client_side_prover_frontend::{ setup::Setup, Scalar, }; -use ff::Field; -use halo2curves::grumpkin; +use halo2curves::{ff::Field, grumpkin}; use noirc_abi::{input_parser::InputValue, InputMap}; use super::*; From 03e1d9d593ca8677b011c11b614194483fd85ce0 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 7 Mar 2025 05:52:51 -0700 Subject: [PATCH 49/51] Update error.rs --- frontend/src/error.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/frontend/src/error.rs b/frontend/src/error.rs index 3a8042a..6520aec 100644 --- a/frontend/src/error.rs +++ b/frontend/src/error.rs @@ -21,10 +21,6 @@ pub enum FrontendError { #[error(transparent)] Io(#[from] std::io::Error), - /// The error is a custom error with a message - #[error("Other error: {0}")] - Other(String), - /// The error is a `client_side_prover::errors::NovaError` #[error(transparent)] Nova(#[from] client_side_prover::errors::NovaError), From 2496afd362ad2a27594a19ca611303793ba2657f Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 7 Mar 2025 05:58:52 -0700 Subject: [PATCH 50/51] revert arity check --- prover/src/supernova/mod.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/prover/src/supernova/mod.rs b/prover/src/supernova/mod.rs index 7944679..5130f9c 100644 --- a/prover/src/supernova/mod.rs +++ b/prover/src/supernova/mod.rs @@ -584,14 +584,12 @@ where E1: CurveCycleEquipped } // check the arity of all the primary circuits match the initial input length - // pp.circuit_shapes.iter().try_for_each(|circuit| { - // if circuit.F_arity != z0_primary.len() { - // return Err(SuperNovaError::NovaError( - // NovaError::InvalidStepOutputLength, - // )); - // } - // Ok(()) - // })?; + pp.circuit_shapes.iter().try_for_each(|circuit| { + if circuit.F_arity != z0_primary.len() { + return Err(SuperNovaError::NovaError(NovaError::InvalidStepOutputLength)); + } + Ok(()) + })?; // base case for the primary let mut cs_primary = SatisfyingAssignment::::new(); From 4a739beb15a670234186c2ebb125a2c9aa982327 Mon Sep 17 00:00:00 2001 From: Colin Roberts Date: Fri, 7 Mar 2025 06:00:44 -0700 Subject: [PATCH 51/51] Update Cargo.toml --- frontend/Cargo.toml | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml index d285530..3db7bfb 100644 --- a/frontend/Cargo.toml +++ b/frontend/Cargo.toml @@ -1,28 +1,28 @@ [package] -name = "client-side-prover-frontend" -version = "0.1.0" -edition = "2021" +name ="client-side-prover-frontend" +version="0.1.0" +edition="2021" [dependencies] -client-side-prover = { path = "../prover" } -serde = { workspace = true } -serde_json = { workspace = true } -thiserror = { workspace = true } -tracing = { workspace = true } -bellpepper-core = { workspace = true } -halo2curves = { workspace = true } +client-side-prover={ path="../prover" } +serde ={ workspace=true } +serde_json ={ workspace=true } +thiserror ={ workspace=true } +tracing ={ workspace=true } +bellpepper-core ={ workspace=true } +halo2curves ={ workspace=true } # noir -acvm = { git = "https://github.com/noir-lang/noir", rev = "v1.0.0-beta.2" } -noirc_abi = { git = "https://github.com/noir-lang/noir", rev = "v1.0.0-beta.2" } -ark-bn254 = "0.5" +acvm ={ git="https://github.com/noir-lang/noir", rev="v1.0.0-beta.2" } +noirc_abi={ git="https://github.com/noir-lang/noir", rev="v1.0.0-beta.2" } +ark-bn254="0.5" [dev-dependencies] -tracing-test = { workspace = true } -tempdir = "0.3.7" -client-side-prover-frontend = { path = ".", features = ["demo"] } -tempfile = "3.17" -bincode = { workspace = true } +tracing-test ={ workspace=true } +tempdir ="0.3.7" +client-side-prover-frontend={ path=".", features=["demo"] } +tempfile ="3.17" +bincode ={ workspace=true } [features] -demo = [] +demo=[]